From 9d750ef7efa9da22e263ca319e392c9340a72c21 Mon Sep 17 00:00:00 2001 From: zheng-huanhuan Date: Fri, 27 Mar 2020 16:39:02 +0800 Subject: [PATCH] initial version --- .gitee/PULL_REQUEST_TEMPLATE.md | 25 + .gitignore | 30 ++ LICENSE | 201 +++++++ NOTICE | 2 + README.md | 74 +++ RELEASE.md | 11 + docs/README.md | 3 + example/data_processing.py | 62 +++ example/mnist_demo/README.md | 46 ++ example/mnist_demo/lenet5_net.py | 64 +++ example/mnist_demo/mnist_attack_cw.py | 118 ++++ example/mnist_demo/mnist_attack_deepfool.py | 120 +++++ example/mnist_demo/mnist_attack_fgsm.py | 119 ++++ example/mnist_demo/mnist_attack_genetic.py | 138 +++++ example/mnist_demo/mnist_attack_hsja.py | 150 ++++++ example/mnist_demo/mnist_attack_jsma.py | 124 +++++ example/mnist_demo/mnist_attack_lbfgs.py | 132 +++++ example/mnist_demo/mnist_attack_nes.py | 168 ++++++ example/mnist_demo/mnist_attack_pgd.py | 119 ++++ example/mnist_demo/mnist_attack_pointwise.py | 138 +++++ example/mnist_demo/mnist_attack_pso.py | 131 +++++ .../mnist_attack_salt_and_pepper.py | 142 +++++ example/mnist_demo/mnist_defense_nad.py | 144 +++++ example/mnist_demo/mnist_evaluation.py | 326 +++++++++++ .../mnist_demo/mnist_similarity_detector.py | 182 +++++++ example/mnist_demo/mnist_train.py | 88 +++ mindarmour/__init__.py | 13 + mindarmour/attacks/__init__.py | 39 ++ mindarmour/attacks/attack.py | 97 ++++ mindarmour/attacks/black/__init__.py | 0 mindarmour/attacks/black/black_model.py | 75 +++ mindarmour/attacks/black/genetic_attack.py | 230 ++++++++ .../attacks/black/hop_skip_jump_attack.py | 510 ++++++++++++++++++ .../black/natural_evolutionary_strategy.py | 432 +++++++++++++++ mindarmour/attacks/black/pointwise_attack.py | 326 +++++++++++ mindarmour/attacks/black/pso_attack.py | 302 +++++++++++ .../attacks/black/salt_and_pepper_attack.py | 166 ++++++ mindarmour/attacks/carlini_wagner.py | 419 ++++++++++++++ mindarmour/attacks/deep_fool.py | 154 ++++++ mindarmour/attacks/gradient_method.py | 402 ++++++++++++++ .../attacks/iterative_gradient_method.py | 432 +++++++++++++++ mindarmour/attacks/jsma.py | 196 +++++++ mindarmour/attacks/lbfgs.py | 224 ++++++++ mindarmour/defenses/__init__.py | 15 + mindarmour/defenses/adversarial_defense.py | 169 ++++++ mindarmour/defenses/defense.py | 86 +++ .../defenses/natural_adversarial_defense.py | 56 ++ .../defenses/projected_adversarial_defense.py | 69 +++ mindarmour/detectors/__init__.py | 18 + mindarmour/detectors/black/__init__.py | 0 .../detectors/black/similarity_detector.py | 284 ++++++++++ mindarmour/detectors/detector.py | 101 ++++ mindarmour/detectors/ensemble_detector.py | 126 +++++ mindarmour/detectors/mag_net.py | 228 ++++++++ mindarmour/detectors/region_based_detector.py | 235 ++++++++ mindarmour/detectors/spatial_smoothing.py | 171 ++++++ mindarmour/evaluations/__init__.py | 14 + mindarmour/evaluations/attack_evaluation.py | 275 ++++++++++ mindarmour/evaluations/black/__init__.py | 0 .../evaluations/black/defense_evaluation.py | 204 +++++++ mindarmour/evaluations/defense_evaluation.py | 152 ++++++ mindarmour/evaluations/visual_metrics.py | 141 +++++ mindarmour/utils/__init__.py | 7 + mindarmour/utils/_check_param.py | 269 +++++++++ mindarmour/utils/logger.py | 154 ++++++ mindarmour/utils/util.py | 147 +++++ package.sh | 38 ++ requirements.txt | 7 + setup.py | 102 ++++ tests/st/resnet50/resnet_cifar10.py | 311 +++++++++++ tests/st/resnet50/test_cifar10_attack_fgsm.py | 76 +++ .../attacks/black/test_genetic_attack.py | 144 +++++ tests/ut/python/attacks/black/test_hsja.py | 166 ++++++ tests/ut/python/attacks/black/test_nes.py | 217 ++++++++ .../attacks/black/test_pointwise_attack.py | 90 ++++ .../python/attacks/black/test_pso_attack.py | 166 ++++++ .../black/test_salt_and_pepper_attack.py | 123 +++++ .../attacks/test_batch_generate_attack.py | 74 +++ tests/ut/python/attacks/test_cw.py | 90 ++++ tests/ut/python/attacks/test_deep_fool.py | 119 ++++ .../ut/python/attacks/test_gradient_method.py | 242 +++++++++ .../attacks/test_iterative_gradient_method.py | 136 +++++ tests/ut/python/attacks/test_jsma.py | 161 ++++++ tests/ut/python/attacks/test_lbfgs.py | 72 +++ tests/ut/python/defenses/mock_net.py | 107 ++++ tests/ut/python/defenses/test_ad.py | 66 +++ tests/ut/python/defenses/test_ead.py | 70 +++ tests/ut/python/defenses/test_nad.py | 65 +++ tests/ut/python/defenses/test_pad.py | 66 +++ .../black/test_similarity_detector.py | 101 ++++ .../detectors/test_ensemble_detector.py | 112 ++++ tests/ut/python/detectors/test_mag_net.py | 164 ++++++ .../detectors/test_region_based_detector.py | 115 ++++ .../detectors/test_spatial_smoothing.py | 116 ++++ .../black/test_black_defense_eval.py | 73 +++ .../ut/python/evaluations/test_attack_eval.py | 95 ++++ .../python/evaluations/test_defense_eval.py | 51 ++ .../python/evaluations/test_radar_metric.py | 57 ++ .../checkpoint_lenet-10_1875.ckpt | Bin 0 -> 494163 bytes tests/ut/python/utils/test_log_util.py | 64 +++ 100 files changed, 13451 insertions(+) create mode 100644 .gitee/PULL_REQUEST_TEMPLATE.md create mode 100644 .gitignore create mode 100644 LICENSE create mode 100644 NOTICE create mode 100644 README.md create mode 100644 RELEASE.md create mode 100644 docs/README.md create mode 100644 example/data_processing.py create mode 100644 example/mnist_demo/README.md create mode 100644 example/mnist_demo/lenet5_net.py create mode 100644 example/mnist_demo/mnist_attack_cw.py create mode 100644 example/mnist_demo/mnist_attack_deepfool.py create mode 100644 example/mnist_demo/mnist_attack_fgsm.py create mode 100644 example/mnist_demo/mnist_attack_genetic.py create mode 100644 example/mnist_demo/mnist_attack_hsja.py create mode 100644 example/mnist_demo/mnist_attack_jsma.py create mode 100644 example/mnist_demo/mnist_attack_lbfgs.py create mode 100644 example/mnist_demo/mnist_attack_nes.py create mode 100644 example/mnist_demo/mnist_attack_pgd.py create mode 100644 example/mnist_demo/mnist_attack_pointwise.py create mode 100644 example/mnist_demo/mnist_attack_pso.py create mode 100644 example/mnist_demo/mnist_attack_salt_and_pepper.py create mode 100644 example/mnist_demo/mnist_defense_nad.py create mode 100644 example/mnist_demo/mnist_evaluation.py create mode 100644 example/mnist_demo/mnist_similarity_detector.py create mode 100644 example/mnist_demo/mnist_train.py create mode 100644 mindarmour/__init__.py create mode 100644 mindarmour/attacks/__init__.py create mode 100644 mindarmour/attacks/attack.py create mode 100644 mindarmour/attacks/black/__init__.py create mode 100644 mindarmour/attacks/black/black_model.py create mode 100644 mindarmour/attacks/black/genetic_attack.py create mode 100644 mindarmour/attacks/black/hop_skip_jump_attack.py create mode 100644 mindarmour/attacks/black/natural_evolutionary_strategy.py create mode 100644 mindarmour/attacks/black/pointwise_attack.py create mode 100644 mindarmour/attacks/black/pso_attack.py create mode 100644 mindarmour/attacks/black/salt_and_pepper_attack.py create mode 100644 mindarmour/attacks/carlini_wagner.py create mode 100644 mindarmour/attacks/deep_fool.py create mode 100644 mindarmour/attacks/gradient_method.py create mode 100644 mindarmour/attacks/iterative_gradient_method.py create mode 100644 mindarmour/attacks/jsma.py create mode 100644 mindarmour/attacks/lbfgs.py create mode 100644 mindarmour/defenses/__init__.py create mode 100644 mindarmour/defenses/adversarial_defense.py create mode 100644 mindarmour/defenses/defense.py create mode 100644 mindarmour/defenses/natural_adversarial_defense.py create mode 100644 mindarmour/defenses/projected_adversarial_defense.py create mode 100644 mindarmour/detectors/__init__.py create mode 100644 mindarmour/detectors/black/__init__.py create mode 100644 mindarmour/detectors/black/similarity_detector.py create mode 100644 mindarmour/detectors/detector.py create mode 100644 mindarmour/detectors/ensemble_detector.py create mode 100644 mindarmour/detectors/mag_net.py create mode 100644 mindarmour/detectors/region_based_detector.py create mode 100644 mindarmour/detectors/spatial_smoothing.py create mode 100644 mindarmour/evaluations/__init__.py create mode 100644 mindarmour/evaluations/attack_evaluation.py create mode 100644 mindarmour/evaluations/black/__init__.py create mode 100644 mindarmour/evaluations/black/defense_evaluation.py create mode 100644 mindarmour/evaluations/defense_evaluation.py create mode 100644 mindarmour/evaluations/visual_metrics.py create mode 100644 mindarmour/utils/__init__.py create mode 100644 mindarmour/utils/_check_param.py create mode 100644 mindarmour/utils/logger.py create mode 100644 mindarmour/utils/util.py create mode 100644 package.sh create mode 100644 requirements.txt create mode 100644 setup.py create mode 100644 tests/st/resnet50/resnet_cifar10.py create mode 100644 tests/st/resnet50/test_cifar10_attack_fgsm.py create mode 100644 tests/ut/python/attacks/black/test_genetic_attack.py create mode 100644 tests/ut/python/attacks/black/test_hsja.py create mode 100644 tests/ut/python/attacks/black/test_nes.py create mode 100644 tests/ut/python/attacks/black/test_pointwise_attack.py create mode 100644 tests/ut/python/attacks/black/test_pso_attack.py create mode 100644 tests/ut/python/attacks/black/test_salt_and_pepper_attack.py create mode 100644 tests/ut/python/attacks/test_batch_generate_attack.py create mode 100644 tests/ut/python/attacks/test_cw.py create mode 100644 tests/ut/python/attacks/test_deep_fool.py create mode 100644 tests/ut/python/attacks/test_gradient_method.py create mode 100644 tests/ut/python/attacks/test_iterative_gradient_method.py create mode 100644 tests/ut/python/attacks/test_jsma.py create mode 100644 tests/ut/python/attacks/test_lbfgs.py create mode 100644 tests/ut/python/defenses/mock_net.py create mode 100644 tests/ut/python/defenses/test_ad.py create mode 100644 tests/ut/python/defenses/test_ead.py create mode 100644 tests/ut/python/defenses/test_nad.py create mode 100644 tests/ut/python/defenses/test_pad.py create mode 100644 tests/ut/python/detectors/black/test_similarity_detector.py create mode 100644 tests/ut/python/detectors/test_ensemble_detector.py create mode 100644 tests/ut/python/detectors/test_mag_net.py create mode 100644 tests/ut/python/detectors/test_region_based_detector.py create mode 100644 tests/ut/python/detectors/test_spatial_smoothing.py create mode 100644 tests/ut/python/evaluations/black/test_black_defense_eval.py create mode 100644 tests/ut/python/evaluations/test_attack_eval.py create mode 100644 tests/ut/python/evaluations/test_defense_eval.py create mode 100644 tests/ut/python/evaluations/test_radar_metric.py create mode 100644 tests/ut/python/test_data/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt create mode 100644 tests/ut/python/utils/test_log_util.py diff --git a/.gitee/PULL_REQUEST_TEMPLATE.md b/.gitee/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..eaab01d --- /dev/null +++ b/.gitee/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,25 @@ + + +**What type of PR is this?** +> Uncomment only one ` /kind <>` line, hit enter to put that in a new line, and remove leading whitespaces from that line: +> +> /kind bug +> /kind task +> /kind feature + + +**What this PR does / why we need it**: + + +**Which issue(s) this PR fixes**: + +Fixes # + +**Special notes for your reviewer**: + diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..9f1685b --- /dev/null +++ b/.gitignore @@ -0,0 +1,30 @@ +*.dot +*.ir +*.dat +*.pyc +*.npy +*.csv +*.gz +*.tar +*.zip +*.rar +*.png +*.ipynb +.idea/ +build/ +dist/ +local_script/ +example/dataset/ +example/mnist_demo/MNIST_unzip/ +example/mnist_demo/trained_ckpt_file/ +example/mnist_demo/model/ +example/cifar_demo/model/ +example/dog_cat_demo/model/ +mindarmour.egg-info/ +*model/ +*MNIST/ +*out.data/ +*defensed_model/ +*pre_trained_model/ +*__pycache__/ +*kernel_meta diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/NOTICE b/NOTICE new file mode 100644 index 0000000..2e97cce --- /dev/null +++ b/NOTICE @@ -0,0 +1,2 @@ +MindSpore MindArmour +Copyright 2019-2020 Huawei Technologies Co., Ltd \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..dbe4d34 --- /dev/null +++ b/README.md @@ -0,0 +1,74 @@ +# MindArmour + +- [What is MindArmour](#what-is-mindarmour) +- [Setting up](#setting-up-mindarmour) +- [Docs](#docs) +- [Community](#community) +- [Contributing](#contributing) +- [Release Notes](#release-notes) +- [License](#license) + +## What is MindArmour + +A tool box for MindSpore users to enhance model security and trustworthiness. + +MindArmour is designed for adversarial examples, including four submodule: adversarial examples generation, adversarial example detection, model defense and evaluation. The architecture is shown as follow: + +![mindarmour_architecture](docs/mindarmour_architecture.png) + +## Setting up MindArmour + +### Dependencies + +This library uses MindSpore to accelerate graph computations performed by many machine learning models. Therefore, installing MindSpore is a pre-requisite. All other dependencies are included in `setup.py`. + +### Installation + +#### Installation for development + +1. Download source code from Gitee. + +```bash +git clone https://gitee.com/mindspore/mindarmour.git +``` + +2. Compile and install in MindArmour directory. + +```bash +$ cd mindarmour +$ python setup.py install +``` + +#### `Pip` installation + +1. Download whl package from [MindSpore website](https://www.mindspore.cn/versions/en), then run the following command: + +``` +pip install mindarmour-{version}-cp37-cp37m-linux_{arch}.whl +``` + +2. Successfully installed, if there is no error message such as `No module named 'mindarmour'` when execute the following command: + +```bash +python -c 'import mindarmour' +``` + +## Docs + +Guidance on installation, tutorials, API, see our [User Documentation](https://gitee.com/mindspore/docs). + +## Community + +- [MindSpore Slack](https://join.slack.com/t/mindspore/shared_invite/enQtOTcwMTIxMDI3NjM0LTNkMWM2MzI5NjIyZWU5ZWQ5M2EwMTQ5MWNiYzMxOGM4OWFhZjI4M2E5OGI2YTg3ODU1ODE2Njg1MThiNWI3YmQ) - Ask questions and find answers. + +## Contributing + +Welcome contributions. See our [Contributor Wiki](https://gitee.com/mindspore/mindspore/blob/master/CONTRIBUTING.md) for more details. + +## Release Notes + +The release notes, see our [RELEASE](RELEASE.md). + +## License + +[Apache License 2.0](LICENSE) diff --git a/RELEASE.md b/RELEASE.md new file mode 100644 index 0000000..870fe6b --- /dev/null +++ b/RELEASE.md @@ -0,0 +1,11 @@ +# Release 0.1.0-alpha + +Initial release of MindArmour. + +## Major Features + +- Support adversarial attack and defense on the platform of MindSpore. +- Include 13 white-box and 7 black-box attack methods. +- Provide 5 detection algorithms to detect attacking in multiple way. +- Provide adversarial training to enhance model security. +- Provide 6 evaluation metrics for attack methods and 9 evaluation metrics for defense methods. \ No newline at end of file diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000..49e2fa8 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,3 @@ +# MindArmour Documentation + +The MindArmour documentation is in the [MindSpore Docs](https://gitee.com/mindspore/docs) repository. diff --git a/example/data_processing.py b/example/data_processing.py new file mode 100644 index 0000000..8b4007e --- /dev/null +++ b/example/data_processing.py @@ -0,0 +1,62 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import mindspore.dataset as ds +import mindspore.dataset.transforms.vision.c_transforms as CV +import mindspore.dataset.transforms.c_transforms as C +from mindspore.dataset.transforms.vision import Inter +import mindspore.common.dtype as mstype + + +def generate_mnist_dataset(data_path, batch_size=32, repeat_size=1, + num_parallel_workers=1, sparse=True): + """ + create dataset for training or testing + """ + # define dataset + ds1 = ds.MnistDataset(data_path) + + # define operation parameters + resize_height, resize_width = 32, 32 + rescale = 1.0 / 255.0 + shift = 0.0 + + # define map operations + resize_op = CV.Resize((resize_height, resize_width), + interpolation=Inter.LINEAR) + rescale_op = CV.Rescale(rescale, shift) + hwc2chw_op = CV.HWC2CHW() + type_cast_op = C.TypeCast(mstype.int32) + one_hot_enco = C.OneHot(10) + + # apply map operations on images + if not sparse: + ds1 = ds1.map(input_columns="label", operations=one_hot_enco, + num_parallel_workers=num_parallel_workers) + type_cast_op = C.TypeCast(mstype.float32) + ds1 = ds1.map(input_columns="label", operations=type_cast_op, + num_parallel_workers=num_parallel_workers) + ds1 = ds1.map(input_columns="image", operations=resize_op, + num_parallel_workers=num_parallel_workers) + ds1 = ds1.map(input_columns="image", operations=rescale_op, + num_parallel_workers=num_parallel_workers) + ds1 = ds1.map(input_columns="image", operations=hwc2chw_op, + num_parallel_workers=num_parallel_workers) + + # apply DatasetOps + buffer_size = 10000 + ds1 = ds1.shuffle(buffer_size=buffer_size) + ds1 = ds1.batch(batch_size, drop_remainder=True) + ds1 = ds1.repeat(repeat_size) + + return ds1 diff --git a/example/mnist_demo/README.md b/example/mnist_demo/README.md new file mode 100644 index 0000000..e2e1cbc --- /dev/null +++ b/example/mnist_demo/README.md @@ -0,0 +1,46 @@ +# mnist demo +## Introduction + +The MNIST database of handwritten digits, available from this page, has a training set of 60,000 examples, and a test set of 10,000 examples. It is a subset of a larger set available from MNIST. The digits have been size-normalized and centered in a fixed-size image. + +## run demo + +### 1. download dataset +```sh +$ cd example/mnist_demo +$ mkdir MNIST_unzip +$ cd MNIST_unzip +$ mkdir train +$ mkdir test +$ cd train +$ wget "http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz" +$ wget "http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz" +$ gzip train-images-idx3-ubyte.gz -d +$ gzip train-labels-idx1-ubyte.gz -d +$ cd ../test +$ wget "http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz" +$ wget "http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz" +$ gzip t10k-images-idx3-ubyte.gz -d +$ gzip t10k-images-idx3-ubyte.gz -d +$ cd ../../ +``` + +### 1. trian model +```sh +$ python mnist_train.py + +``` + +### 2. run attack test +```sh +$ mkdir out.data +$ python mnist_attack_jsma.py + +``` + +### 3. run defense/detector test +```sh +$ python mnist_defense_nad.py +$ python mnist_similarity_detector.py + +``` diff --git a/example/mnist_demo/lenet5_net.py b/example/mnist_demo/lenet5_net.py new file mode 100644 index 0000000..0606015 --- /dev/null +++ b/example/mnist_demo/lenet5_net.py @@ -0,0 +1,64 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import mindspore.nn as nn +import mindspore.ops.operations as P +from mindspore.common.initializer import TruncatedNormal + + +def conv(in_channels, out_channels, kernel_size, stride=1, padding=0): + weight = weight_variable() + return nn.Conv2d(in_channels, out_channels, + kernel_size=kernel_size, stride=stride, padding=padding, + weight_init=weight, has_bias=False, pad_mode="valid") + + +def fc_with_initialize(input_channels, out_channels): + weight = weight_variable() + bias = weight_variable() + return nn.Dense(input_channels, out_channels, weight, bias) + + +def weight_variable(): + return TruncatedNormal(0.2) + + +class LeNet5(nn.Cell): + """ + Lenet network + """ + def __init__(self): + super(LeNet5, self).__init__() + self.conv1 = conv(1, 6, 5) + self.conv2 = conv(6, 16, 5) + self.fc1 = fc_with_initialize(16*5*5, 120) + self.fc2 = fc_with_initialize(120, 84) + self.fc3 = fc_with_initialize(84, 10) + self.relu = nn.ReLU() + self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) + self.reshape = P.Reshape() + + def construct(self, x): + x = self.conv1(x) + x = self.relu(x) + x = self.max_pool2d(x) + x = self.conv2(x) + x = self.relu(x) + x = self.max_pool2d(x) + x = self.reshape(x, (-1, 16*5*5)) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.relu(x) + x = self.fc3(x) + return x diff --git a/example/mnist_demo/mnist_attack_cw.py b/example/mnist_demo/mnist_attack_cw.py new file mode 100644 index 0000000..3fa614e --- /dev/null +++ b/example/mnist_demo/mnist_attack_cw.py @@ -0,0 +1,118 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import time +import numpy as np +import pytest +from scipy.special import softmax + +from mindspore import Model +from mindspore import Tensor +from mindspore import context +from mindspore.train.serialization import load_checkpoint, load_param_into_net + +from mindarmour.attacks.carlini_wagner import CarliniWagnerL2Attack +from mindarmour.utils.logger import LogUtil +from mindarmour.evaluations.attack_evaluation import AttackEvaluate + +from lenet5_net import LeNet5 + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + +sys.path.append("..") +from data_processing import generate_mnist_dataset + +LOGGER = LogUtil.get_instance() +TAG = 'CW_Test' + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_carlini_wagner_attack(): + """ + CW-Attack test + """ + # upload trained network + ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt' + net = LeNet5() + load_dict = load_checkpoint(ckpt_name) + load_param_into_net(net, load_dict) + + # get test data + data_list = "./MNIST_unzip/test" + batch_size = 32 + ds = generate_mnist_dataset(data_list, batch_size=batch_size) + + # prediction accuracy before attack + model = Model(net) + batch_num = 3 # the number of batches of attacking samples + test_images = [] + test_labels = [] + predict_labels = [] + i = 0 + for data in ds.create_tuple_iterator(): + i += 1 + images = data[0].astype(np.float32) + labels = data[1] + test_images.append(images) + test_labels.append(labels) + pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(), + axis=1) + predict_labels.append(pred_labels) + if i >= batch_num: + break + predict_labels = np.concatenate(predict_labels) + true_labels = np.concatenate(test_labels) + accuracy = np.mean(np.equal(predict_labels, true_labels)) + LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy) + + # attacking + num_classes = 10 + attack = CarliniWagnerL2Attack(net, num_classes, targeted=False) + start_time = time.clock() + adv_data = attack.batch_generate(np.concatenate(test_images), + np.concatenate(test_labels), batch_size=32) + stop_time = time.clock() + pred_logits_adv = model.predict(Tensor(adv_data)).asnumpy() + # rescale predict confidences into (0, 1). + pred_logits_adv = softmax(pred_logits_adv, axis=1) + pred_labels_adv = np.argmax(pred_logits_adv, axis=1) + accuracy_adv = np.mean(np.equal(pred_labels_adv, true_labels)) + LOGGER.info(TAG, "prediction accuracy after attacking is : %s", + accuracy_adv) + test_labels = np.eye(10)[np.concatenate(test_labels)] + attack_evaluate = AttackEvaluate(np.concatenate(test_images).transpose(0, 2, 3, 1), + test_labels, adv_data.transpose(0, 2, 3, 1), + pred_logits_adv) + LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s', + attack_evaluate.mis_classification_rate()) + LOGGER.info(TAG, 'The average confidence of adversarial class is : %s', + attack_evaluate.avg_conf_adv_class()) + LOGGER.info(TAG, 'The average confidence of true class is : %s', + attack_evaluate.avg_conf_true_class()) + LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original ' + 'samples and adversarial samples are: %s', + attack_evaluate.avg_lp_distance()) + LOGGER.info(TAG, 'The average structural similarity between original ' + 'samples and adversarial samples are: %s', + attack_evaluate.avg_ssim()) + LOGGER.info(TAG, 'The average costing time is %s', + (stop_time - start_time)/(batch_num*batch_size)) + + +if __name__ == '__main__': + test_carlini_wagner_attack() diff --git a/example/mnist_demo/mnist_attack_deepfool.py b/example/mnist_demo/mnist_attack_deepfool.py new file mode 100644 index 0000000..925f50b --- /dev/null +++ b/example/mnist_demo/mnist_attack_deepfool.py @@ -0,0 +1,120 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import time +import numpy as np +import pytest +from scipy.special import softmax + +from mindspore import Model +from mindspore import Tensor +from mindspore import context +from mindspore.train.serialization import load_checkpoint, load_param_into_net + +from mindarmour.attacks.deep_fool import DeepFool +from mindarmour.utils.logger import LogUtil +from mindarmour.evaluations.attack_evaluation import AttackEvaluate + +from lenet5_net import LeNet5 + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + +sys.path.append("..") +from data_processing import generate_mnist_dataset + +LOGGER = LogUtil.get_instance() +TAG = 'DeepFool_Test' + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_deepfool_attack(): + """ + DeepFool-Attack test + """ + # upload trained network + ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt' + net = LeNet5() + load_dict = load_checkpoint(ckpt_name) + load_param_into_net(net, load_dict) + + # get test data + data_list = "./MNIST_unzip/test" + batch_size = 32 + ds = generate_mnist_dataset(data_list, batch_size=batch_size) + + # prediction accuracy before attack + model = Model(net) + batch_num = 3 # the number of batches of attacking samples + test_images = [] + test_labels = [] + predict_labels = [] + i = 0 + for data in ds.create_tuple_iterator(): + i += 1 + images = data[0].astype(np.float32) + labels = data[1] + test_images.append(images) + test_labels.append(labels) + pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(), + axis=1) + predict_labels.append(pred_labels) + if i >= batch_num: + break + predict_labels = np.concatenate(predict_labels) + true_labels = np.concatenate(test_labels) + accuracy = np.mean(np.equal(predict_labels, true_labels)) + LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy) + + # attacking + classes = 10 + attack = DeepFool(net, classes, norm_level=2, + bounds=(0.0, 1.0)) + start_time = time.clock() + adv_data = attack.batch_generate(np.concatenate(test_images), + np.concatenate(test_labels), batch_size=32) + stop_time = time.clock() + pred_logits_adv = model.predict(Tensor(adv_data)).asnumpy() + # rescale predict confidences into (0, 1). + pred_logits_adv = softmax(pred_logits_adv, axis=1) + pred_labels_adv = np.argmax(pred_logits_adv, axis=1) + accuracy_adv = np.mean(np.equal(pred_labels_adv, true_labels)) + LOGGER.info(TAG, "prediction accuracy after attacking is : %s", + accuracy_adv) + test_labels = np.eye(10)[np.concatenate(test_labels)] + attack_evaluate = AttackEvaluate(np.concatenate(test_images).transpose(0, 2, 3, 1), + test_labels, adv_data.transpose(0, 2, 3, 1), + pred_logits_adv) + LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s', + attack_evaluate.mis_classification_rate()) + LOGGER.info(TAG, 'The average confidence of adversarial class is : %s', + attack_evaluate.avg_conf_adv_class()) + LOGGER.info(TAG, 'The average confidence of true class is : %s', + attack_evaluate.avg_conf_true_class()) + LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original ' + 'samples and adversarial samples are: %s', + attack_evaluate.avg_lp_distance()) + LOGGER.info(TAG, 'The average structural similarity between original ' + 'samples and adversarial samples are: %s', + attack_evaluate.avg_ssim()) + LOGGER.info(TAG, 'The average costing time is %s', + (stop_time - start_time)/(batch_num*batch_size)) + + +if __name__ == '__main__': + test_deepfool_attack() diff --git a/example/mnist_demo/mnist_attack_fgsm.py b/example/mnist_demo/mnist_attack_fgsm.py new file mode 100644 index 0000000..f951656 --- /dev/null +++ b/example/mnist_demo/mnist_attack_fgsm.py @@ -0,0 +1,119 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import time +import numpy as np +import pytest +from scipy.special import softmax + +from mindspore import Model +from mindspore import Tensor +from mindspore import context +from mindspore.train.serialization import load_checkpoint, load_param_into_net + +from mindarmour.attacks.gradient_method import FastGradientSignMethod + +from mindarmour.utils.logger import LogUtil +from mindarmour.evaluations.attack_evaluation import AttackEvaluate + +from lenet5_net import LeNet5 + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + +sys.path.append("..") +from data_processing import generate_mnist_dataset + +LOGGER = LogUtil.get_instance() +TAG = 'FGSM_Test' + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_fast_gradient_sign_method(): + """ + FGSM-Attack test + """ + # upload trained network + ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt' + net = LeNet5() + load_dict = load_checkpoint(ckpt_name) + load_param_into_net(net, load_dict) + + # get test data + data_list = "./MNIST_unzip/test" + batch_size = 32 + ds = generate_mnist_dataset(data_list, batch_size, sparse=False) + + # prediction accuracy before attack + model = Model(net) + batch_num = 3 # the number of batches of attacking samples + test_images = [] + test_labels = [] + predict_labels = [] + i = 0 + for data in ds.create_tuple_iterator(): + i += 1 + images = data[0].astype(np.float32) + labels = data[1] + test_images.append(images) + test_labels.append(labels) + pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(), + axis=1) + predict_labels.append(pred_labels) + if i >= batch_num: + break + predict_labels = np.concatenate(predict_labels) + true_labels = np.argmax(np.concatenate(test_labels), axis=1) + accuracy = np.mean(np.equal(predict_labels, true_labels)) + LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy) + + # attacking + attack = FastGradientSignMethod(net, eps=0.3) + start_time = time.clock() + adv_data = attack.batch_generate(np.concatenate(test_images), + np.concatenate(test_labels), batch_size=32) + stop_time = time.clock() + np.save('./adv_data', adv_data) + pred_logits_adv = model.predict(Tensor(adv_data)).asnumpy() + # rescale predict confidences into (0, 1). + pred_logits_adv = softmax(pred_logits_adv, axis=1) + pred_labels_adv = np.argmax(pred_logits_adv, axis=1) + accuracy_adv = np.mean(np.equal(pred_labels_adv, true_labels)) + LOGGER.info(TAG, "prediction accuracy after attacking is : %s", accuracy_adv) + attack_evaluate = AttackEvaluate(np.concatenate(test_images).transpose(0, 2, 3, 1), + np.concatenate(test_labels), + adv_data.transpose(0, 2, 3, 1), + pred_logits_adv) + LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s', + attack_evaluate.mis_classification_rate()) + LOGGER.info(TAG, 'The average confidence of adversarial class is : %s', + attack_evaluate.avg_conf_adv_class()) + LOGGER.info(TAG, 'The average confidence of true class is : %s', + attack_evaluate.avg_conf_true_class()) + LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original ' + 'samples and adversarial samples are: %s', + attack_evaluate.avg_lp_distance()) + LOGGER.info(TAG, 'The average structural similarity between original ' + 'samples and adversarial samples are: %s', + attack_evaluate.avg_ssim()) + LOGGER.info(TAG, 'The average costing time is %s', + (stop_time - start_time)/(batch_num*batch_size)) + + +if __name__ == '__main__': + test_fast_gradient_sign_method() diff --git a/example/mnist_demo/mnist_attack_genetic.py b/example/mnist_demo/mnist_attack_genetic.py new file mode 100644 index 0000000..6c4a6f6 --- /dev/null +++ b/example/mnist_demo/mnist_attack_genetic.py @@ -0,0 +1,138 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import time +import numpy as np +import pytest +from scipy.special import softmax + +from mindspore import Tensor +from mindspore import context +from mindspore.train.serialization import load_checkpoint, load_param_into_net + +from mindarmour.attacks.black.genetic_attack import GeneticAttack +from mindarmour.attacks.black.black_model import BlackModel +from mindarmour.utils.logger import LogUtil +from mindarmour.evaluations.attack_evaluation import AttackEvaluate + +from lenet5_net import LeNet5 + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + +sys.path.append("..") +from data_processing import generate_mnist_dataset + +LOGGER = LogUtil.get_instance() +TAG = 'Genetic_Attack' + + +class ModelToBeAttacked(BlackModel): + """model to be attack""" + + def __init__(self, network): + super(ModelToBeAttacked, self).__init__() + self._network = network + + def predict(self, inputs): + """predict""" + result = self._network(Tensor(inputs.astype(np.float32))) + return result.asnumpy() + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_genetic_attack_on_mnist(): + """ + Genetic-Attack test + """ + # upload trained network + ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt' + net = LeNet5() + load_dict = load_checkpoint(ckpt_name) + load_param_into_net(net, load_dict) + + # get test data + data_list = "./MNIST_unzip/test" + batch_size = 32 + ds = generate_mnist_dataset(data_list, batch_size=batch_size) + + # prediction accuracy before attack + model = ModelToBeAttacked(net) + batch_num = 3 # the number of batches of attacking samples + test_images = [] + test_labels = [] + predict_labels = [] + i = 0 + for data in ds.create_tuple_iterator(): + i += 1 + images = data[0].astype(np.float32) + labels = data[1] + test_images.append(images) + test_labels.append(labels) + pred_labels = np.argmax(model.predict(images), axis=1) + predict_labels.append(pred_labels) + if i >= batch_num: + break + predict_labels = np.concatenate(predict_labels) + true_labels = np.concatenate(test_labels) + accuracy = np.mean(np.equal(predict_labels, true_labels)) + LOGGER.info(TAG, "prediction accuracy before attacking is : %g", accuracy) + + # attacking + attack = GeneticAttack(model=model, pop_size=6, mutation_rate=0.05, + per_bounds=0.1, step_size=0.25, temp=0.1, + sparse=True) + targeted_labels = np.random.randint(0, 10, size=len(true_labels)) + for i in range(len(true_labels)): + if targeted_labels[i] == true_labels[i]: + targeted_labels[i] = (targeted_labels[i] + 1) % 10 + start_time = time.clock() + success_list, adv_data, query_list = attack.generate( + np.concatenate(test_images), targeted_labels) + stop_time = time.clock() + LOGGER.info(TAG, 'success_list: %s', success_list) + LOGGER.info(TAG, 'average of query times is : %s', np.mean(query_list)) + pred_logits_adv = model.predict(adv_data) + # rescale predict confidences into (0, 1). + pred_logits_adv = softmax(pred_logits_adv, axis=1) + pred_lables_adv = np.argmax(pred_logits_adv, axis=1) + accuracy_adv = np.mean(np.equal(pred_lables_adv, true_labels)) + LOGGER.info(TAG, "prediction accuracy after attacking is : %g", + accuracy_adv) + test_labels_onehot = np.eye(10)[true_labels] + attack_evaluate = AttackEvaluate(np.concatenate(test_images), + test_labels_onehot, adv_data, + pred_logits_adv, targeted=True, + target_label=targeted_labels) + LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s', + attack_evaluate.mis_classification_rate()) + LOGGER.info(TAG, 'The average confidence of adversarial class is : %s', + attack_evaluate.avg_conf_adv_class()) + LOGGER.info(TAG, 'The average confidence of true class is : %s', + attack_evaluate.avg_conf_true_class()) + LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original ' + 'samples and adversarial samples are: %s', + attack_evaluate.avg_lp_distance()) + LOGGER.info(TAG, 'The average structural similarity between original ' + 'samples and adversarial samples are: %s', + attack_evaluate.avg_ssim()) + LOGGER.info(TAG, 'The average costing time is %s', + (stop_time - start_time)/(batch_num*batch_size)) + + +if __name__ == '__main__': + test_genetic_attack_on_mnist() diff --git a/example/mnist_demo/mnist_attack_hsja.py b/example/mnist_demo/mnist_attack_hsja.py new file mode 100644 index 0000000..11d0c18 --- /dev/null +++ b/example/mnist_demo/mnist_attack_hsja.py @@ -0,0 +1,150 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import numpy as np +import pytest + +from mindspore import Tensor +from mindspore import context +from mindspore.train.serialization import load_checkpoint, load_param_into_net + +from mindarmour.attacks.black.hop_skip_jump_attack import HopSkipJumpAttack +from mindarmour.attacks.black.black_model import BlackModel + +from mindarmour.utils.logger import LogUtil +from lenet5_net import LeNet5 + +sys.path.append("..") +from data_processing import generate_mnist_dataset + +context.set_context(mode=context.GRAPH_MODE) +context.set_context(device_target="Ascend") + +LOGGER = LogUtil.get_instance() +TAG = 'HopSkipJumpAttack' + + +class ModelToBeAttacked(BlackModel): + """model to be attack""" + + def __init__(self, network): + super(ModelToBeAttacked, self).__init__() + self._network = network + + def predict(self, inputs): + """predict""" + if len(inputs.shape) == 3: + inputs = inputs[np.newaxis, :] + result = self._network(Tensor(inputs.astype(np.float32))) + return result.asnumpy() + + +def random_target_labels(true_labels): + target_labels = [] + for label in true_labels: + while True: + target_label = np.random.randint(0, 10) + if target_label != label: + target_labels.append(target_label) + break + return target_labels + + +def create_target_images(dataset, data_labels, target_labels): + res = [] + for label in target_labels: + for i in range(len(data_labels)): + if data_labels[i] == label: + res.append(dataset[i]) + break + return np.array(res) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_hsja_mnist_attack(): + """ + hsja-Attack test + """ + # upload trained network + ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt' + net = LeNet5() + load_dict = load_checkpoint(ckpt_name) + load_param_into_net(net, load_dict) + net.set_train(False) + + # get test data + data_list = "./MNIST_unzip/test" + batch_size = 32 + ds = generate_mnist_dataset(data_list, batch_size=batch_size) + + # prediction accuracy before attack + model = ModelToBeAttacked(net) + batch_num = 5 # the number of batches of attacking samples + test_images = [] + test_labels = [] + predict_labels = [] + i = 0 + for data in ds.create_tuple_iterator(): + i += 1 + images = data[0].astype(np.float32) + labels = data[1] + test_images.append(images) + test_labels.append(labels) + pred_labels = np.argmax(model.predict(images), axis=1) + predict_labels.append(pred_labels) + if i >= batch_num: + break + predict_labels = np.concatenate(predict_labels) + true_labels = np.concatenate(test_labels) + accuracy = np.mean(np.equal(predict_labels, true_labels)) + LOGGER.info(TAG, "prediction accuracy before attacking is : %s", + accuracy) + test_images = np.concatenate(test_images) + + # attacking + norm = 'l2' + search = 'grid_search' + target = False + attack = HopSkipJumpAttack(model, constraint=norm, stepsize_search=search) + if target: + target_labels = random_target_labels(true_labels) + target_images = create_target_images(test_images, predict_labels, + target_labels) + attack.set_target_images(target_images) + success_list, adv_data, query_list = attack.generate(test_images, target_labels) + else: + success_list, adv_data, query_list = attack.generate(test_images, None) + + adv_datas = [] + gts = [] + for success, adv, gt in zip(success_list, adv_data, true_labels): + if success: + adv_datas.append(adv) + gts.append(gt) + if len(gts) > 0: + adv_datas = np.concatenate(np.asarray(adv_datas), axis=0) + gts = np.asarray(gts) + pred_logits_adv = model.predict(adv_datas) + pred_lables_adv = np.argmax(pred_logits_adv, axis=1) + accuracy_adv = np.mean(np.equal(pred_lables_adv, gts)) + LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s', + accuracy_adv) + + +if __name__ == '__main__': + test_hsja_mnist_attack() diff --git a/example/mnist_demo/mnist_attack_jsma.py b/example/mnist_demo/mnist_attack_jsma.py new file mode 100644 index 0000000..de8b24f --- /dev/null +++ b/example/mnist_demo/mnist_attack_jsma.py @@ -0,0 +1,124 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import time +import numpy as np +import pytest +from scipy.special import softmax + +from mindspore import Model +from mindspore import Tensor +from mindspore import context +from mindspore.train.serialization import load_checkpoint, load_param_into_net + +from mindarmour.attacks.jsma import JSMAAttack +from mindarmour.utils.logger import LogUtil +from mindarmour.evaluations.attack_evaluation import AttackEvaluate + +from lenet5_net import LeNet5 + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + +sys.path.append("..") +from data_processing import generate_mnist_dataset + +LOGGER = LogUtil.get_instance() +TAG = 'JSMA_Test' + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_jsma_attack(): + """ + JSMA-Attack test + """ + # upload trained network + ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt' + net = LeNet5() + load_dict = load_checkpoint(ckpt_name) + load_param_into_net(net, load_dict) + + # get test data + data_list = "./MNIST_unzip/test" + batch_size = 32 + ds = generate_mnist_dataset(data_list, batch_size=batch_size) + + # prediction accuracy before attack + model = Model(net) + batch_num = 3 # the number of batches of attacking samples + test_images = [] + test_labels = [] + predict_labels = [] + i = 0 + for data in ds.create_tuple_iterator(): + i += 1 + images = data[0].astype(np.float32) + labels = data[1] + test_images.append(images) + test_labels.append(labels) + pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(), + axis=1) + predict_labels.append(pred_labels) + if i >= batch_num: + break + predict_labels = np.concatenate(predict_labels) + true_labels = np.concatenate(test_labels) + targeted_labels = np.random.randint(0, 10, size=len(true_labels)) + for i in range(len(true_labels)): + if targeted_labels[i] == true_labels[i]: + targeted_labels[i] = (targeted_labels[i] + 1) % 10 + accuracy = np.mean(np.equal(predict_labels, true_labels)) + LOGGER.info(TAG, "prediction accuracy before attacking is : %g", accuracy) + + # attacking + classes = 10 + attack = JSMAAttack(net, classes) + start_time = time.clock() + adv_data = attack.batch_generate(np.concatenate(test_images), + targeted_labels, batch_size=32) + stop_time = time.clock() + pred_logits_adv = model.predict(Tensor(adv_data)).asnumpy() + # rescale predict confidences into (0, 1). + pred_logits_adv = softmax(pred_logits_adv, axis=1) + pred_lables_adv = np.argmax(pred_logits_adv, axis=1) + accuracy_adv = np.mean(np.equal(pred_lables_adv, true_labels)) + LOGGER.info(TAG, "prediction accuracy after attacking is : %g", + accuracy_adv) + test_labels = np.eye(10)[np.concatenate(test_labels)] + attack_evaluate = AttackEvaluate( + np.concatenate(test_images).transpose(0, 2, 3, 1), + test_labels, adv_data.transpose(0, 2, 3, 1), + pred_logits_adv, targeted=True, target_label=targeted_labels) + LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s', + attack_evaluate.mis_classification_rate()) + LOGGER.info(TAG, 'The average confidence of adversarial class is : %s', + attack_evaluate.avg_conf_adv_class()) + LOGGER.info(TAG, 'The average confidence of true class is : %s', + attack_evaluate.avg_conf_true_class()) + LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original ' + 'samples and adversarial samples are: %s', + attack_evaluate.avg_lp_distance()) + LOGGER.info(TAG, 'The average structural similarity between original ' + 'samples and adversarial samples are: %s', + attack_evaluate.avg_ssim()) + LOGGER.info(TAG, 'The average costing time is %s', + (stop_time - start_time) / (batch_num*batch_size)) + + +if __name__ == '__main__': + test_jsma_attack() diff --git a/example/mnist_demo/mnist_attack_lbfgs.py b/example/mnist_demo/mnist_attack_lbfgs.py new file mode 100644 index 0000000..425b105 --- /dev/null +++ b/example/mnist_demo/mnist_attack_lbfgs.py @@ -0,0 +1,132 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import time +import numpy as np +import pytest +from scipy.special import softmax + +from mindspore import Model +from mindspore import Tensor +from mindspore import context +from mindspore.train.serialization import load_checkpoint, load_param_into_net + +from mindarmour.attacks.lbfgs import LBFGS +from mindarmour.utils.logger import LogUtil +from mindarmour.evaluations.attack_evaluation import AttackEvaluate + +from lenet5_net import LeNet5 + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + +sys.path.append("..") +from data_processing import generate_mnist_dataset + +LOGGER = LogUtil.get_instance() +TAG = 'LBFGS_Test' + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_lbfgs_attack(): + """ + LBFGS-Attack test + """ + # upload trained network + ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt' + net = LeNet5() + load_dict = load_checkpoint(ckpt_name) + load_param_into_net(net, load_dict) + + # get test data + data_list = "./MNIST_unzip/test" + batch_size = 32 + ds = generate_mnist_dataset(data_list, batch_size=batch_size, sparse=False) + + # prediction accuracy before attack + model = Model(net) + batch_num = 3 # the number of batches of attacking samples + test_images = [] + test_labels = [] + predict_labels = [] + i = 0 + for data in ds.create_tuple_iterator(): + i += 1 + images = data[0].astype(np.float32) + labels = data[1] + test_images.append(images) + test_labels.append(labels) + pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(), + axis=1) + predict_labels.append(pred_labels) + if i >= batch_num: + break + predict_labels = np.concatenate(predict_labels) + true_labels = np.argmax(np.concatenate(test_labels), axis=1) + accuracy = np.mean(np.equal(predict_labels, true_labels)) + LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy) + + # attacking + is_targeted = True + if is_targeted: + targeted_labels = np.random.randint(0, 10, size=len(true_labels)).astype(np.int32) + for i in range(len(true_labels)): + if targeted_labels[i] == true_labels[i]: + targeted_labels[i] = (targeted_labels[i] + 1) % 10 + else: + targeted_labels = true_labels.astype(np.int32) + targeted_labels = np.eye(10)[targeted_labels].astype(np.float32) + attack = LBFGS(net, is_targeted=is_targeted) + start_time = time.clock() + adv_data = attack.batch_generate(np.concatenate(test_images), + targeted_labels, + batch_size=batch_size) + stop_time = time.clock() + pred_logits_adv = model.predict(Tensor(adv_data)).asnumpy() + # rescale predict confidences into (0, 1). + pred_logits_adv = softmax(pred_logits_adv, axis=1) + pred_labels_adv = np.argmax(pred_logits_adv, axis=1) + + accuracy_adv = np.mean(np.equal(pred_labels_adv, true_labels)) + LOGGER.info(TAG, "prediction accuracy after attacking is : %s", + accuracy_adv) + attack_evaluate = AttackEvaluate(np.concatenate(test_images).transpose(0, 2, 3, 1), + np.concatenate(test_labels), + adv_data.transpose(0, 2, 3, 1), + pred_logits_adv, + targeted=is_targeted, + target_label=np.argmax(targeted_labels, + axis=1)) + LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s', + attack_evaluate.mis_classification_rate()) + LOGGER.info(TAG, 'The average confidence of adversarial class is : %s', + attack_evaluate.avg_conf_adv_class()) + LOGGER.info(TAG, 'The average confidence of true class is : %s', + attack_evaluate.avg_conf_true_class()) + LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original ' + 'samples and adversarial samples are: %s', + attack_evaluate.avg_lp_distance()) + LOGGER.info(TAG, 'The average structural similarity between original ' + 'samples and adversarial samples are: %s', + attack_evaluate.avg_ssim()) + LOGGER.info(TAG, 'The average costing time is %s', + (stop_time - start_time)/(batch_num*batch_size)) + + +if __name__ == '__main__': + test_lbfgs_attack() diff --git a/example/mnist_demo/mnist_attack_nes.py b/example/mnist_demo/mnist_attack_nes.py new file mode 100644 index 0000000..35e322c --- /dev/null +++ b/example/mnist_demo/mnist_attack_nes.py @@ -0,0 +1,168 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import numpy as np +import pytest + +from mindspore import Tensor +from mindspore import context +from mindspore.train.serialization import load_checkpoint, load_param_into_net + +from mindarmour.attacks.black.natural_evolutionary_strategy import NES +from mindarmour.attacks.black.black_model import BlackModel + +from mindarmour.utils.logger import LogUtil +from lenet5_net import LeNet5 + +sys.path.append("..") +from data_processing import generate_mnist_dataset + +context.set_context(mode=context.GRAPH_MODE) +context.set_context(device_target="Ascend") + +LOGGER = LogUtil.get_instance() +TAG = 'HopSkipJumpAttack' + + +class ModelToBeAttacked(BlackModel): + """model to be attack""" + + def __init__(self, network): + super(ModelToBeAttacked, self).__init__() + self._network = network + + def predict(self, inputs): + """predict""" + if len(inputs.shape) == 3: + inputs = inputs[np.newaxis, :] + result = self._network(Tensor(inputs.astype(np.float32))) + return result.asnumpy() + + +def random_target_labels(true_labels, labels_list): + target_labels = [] + for label in true_labels: + while True: + target_label = np.random.choice(labels_list) + if target_label != label: + target_labels.append(target_label) + break + return target_labels + + +def _pseudorandom_target(index, total_indices, true_class): + """ pseudo random_target """ + rng = np.random.RandomState(index) + target = true_class + while target == true_class: + target = rng.randint(0, total_indices) + return target + + +def create_target_images(dataset, data_labels, target_labels): + res = [] + for label in target_labels: + for i in range(len(data_labels)): + if data_labels[i] == label: + res.append(dataset[i]) + break + return np.array(res) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_nes_mnist_attack(): + """ + hsja-Attack test + """ + # upload trained network + ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt' + net = LeNet5() + load_dict = load_checkpoint(ckpt_name) + load_param_into_net(net, load_dict) + net.set_train(False) + + # get test data + data_list = "./MNIST_unzip/test" + batch_size = 32 + ds = generate_mnist_dataset(data_list, batch_size=batch_size) + + # prediction accuracy before attack + model = ModelToBeAttacked(net) + # the number of batches of attacking samples + batch_num = 5 + test_images = [] + test_labels = [] + predict_labels = [] + i = 0 + for data in ds.create_tuple_iterator(): + i += 1 + images = data[0].astype(np.float32) + labels = data[1] + test_images.append(images) + test_labels.append(labels) + pred_labels = np.argmax(model.predict(images), axis=1) + predict_labels.append(pred_labels) + if i >= batch_num: + break + predict_labels = np.concatenate(predict_labels) + true_labels = np.concatenate(test_labels) + + accuracy = np.mean(np.equal(predict_labels, true_labels)) + LOGGER.info(TAG, "prediction accuracy before attacking is : %s", + accuracy) + test_images = np.concatenate(test_images) + + # attacking + scene = 'Query_Limit' + if scene == 'Query_Limit': + top_k = -1 + elif scene == 'Partial_Info': + top_k = 5 + elif scene == 'Label_Only': + top_k = 5 + + success = 0 + queries_num = 0 + + nes_instance = NES(model, scene, top_k=top_k) + test_length = 32 + advs = [] + for img_index in range(test_length): + # Initial image and class selection + initial_img = test_images[img_index] + orig_class = true_labels[img_index] + initial_img = [initial_img] + target_class = random_target_labels([orig_class], true_labels) + target_image = create_target_images(test_images, true_labels, + target_class) + nes_instance.set_target_images(target_image) + tag, adv, queries = nes_instance.generate(initial_img, target_class) + if tag[0]: + success += 1 + queries_num += queries[0] + advs.append(adv) + + advs = np.reshape(advs, (len(advs), 1, 32, 32)) + adv_pred = np.argmax(model.predict(advs), axis=1) + adv_accuracy = np.mean(np.equal(adv_pred, true_labels[:test_length])) + LOGGER.info(TAG, "prediction accuracy after attacking is : %s", + adv_accuracy) + + +if __name__ == '__main__': + test_nes_mnist_attack() diff --git a/example/mnist_demo/mnist_attack_pgd.py b/example/mnist_demo/mnist_attack_pgd.py new file mode 100644 index 0000000..e084aca --- /dev/null +++ b/example/mnist_demo/mnist_attack_pgd.py @@ -0,0 +1,119 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import time +import numpy as np +import pytest +from scipy.special import softmax + +from mindspore import Model +from mindspore import Tensor +from mindspore import context +from mindspore.train.serialization import load_checkpoint, load_param_into_net + +from mindarmour.attacks.iterative_gradient_method import ProjectedGradientDescent + +from mindarmour.utils.logger import LogUtil +from mindarmour.evaluations.attack_evaluation import AttackEvaluate + +from lenet5_net import LeNet5 + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + +sys.path.append("..") +from data_processing import generate_mnist_dataset + +LOGGER = LogUtil.get_instance() +TAG = 'PGD_Test' + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_projected_gradient_descent_method(): + """ + PGD-Attack test + """ + # upload trained network + ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt' + net = LeNet5() + load_dict = load_checkpoint(ckpt_name) + load_param_into_net(net, load_dict) + + # get test data + data_list = "./MNIST_unzip/test" + batch_size = 32 + ds = generate_mnist_dataset(data_list, batch_size, sparse=False) + + # prediction accuracy before attack + model = Model(net) + batch_num = 32 # the number of batches of attacking samples + test_images = [] + test_labels = [] + predict_labels = [] + i = 0 + for data in ds.create_tuple_iterator(): + i += 1 + images = data[0].astype(np.float32) + labels = data[1] + test_images.append(images) + test_labels.append(labels) + pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(), + axis=1) + predict_labels.append(pred_labels) + if i >= batch_num: + break + predict_labels = np.concatenate(predict_labels) + true_labels = np.argmax(np.concatenate(test_labels), axis=1) + accuracy = np.mean(np.equal(predict_labels, true_labels)) + LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy) + + # attacking + attack = ProjectedGradientDescent(net, eps=0.3) + start_time = time.clock() + adv_data = attack.batch_generate(np.concatenate(test_images), + np.concatenate(test_labels), batch_size=32) + stop_time = time.clock() + np.save('./adv_data', adv_data) + pred_logits_adv = model.predict(Tensor(adv_data)).asnumpy() + # rescale predict confidences into (0, 1). + pred_logits_adv = softmax(pred_logits_adv, axis=1) + pred_labels_adv = np.argmax(pred_logits_adv, axis=1) + accuracy_adv = np.mean(np.equal(pred_labels_adv, true_labels)) + LOGGER.info(TAG, "prediction accuracy after attacking is : %s", accuracy_adv) + attack_evaluate = AttackEvaluate(np.concatenate(test_images).transpose(0, 2, 3, 1), + np.concatenate(test_labels), + adv_data.transpose(0, 2, 3, 1), + pred_logits_adv) + LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s', + attack_evaluate.mis_classification_rate()) + LOGGER.info(TAG, 'The average confidence of adversarial class is : %s', + attack_evaluate.avg_conf_adv_class()) + LOGGER.info(TAG, 'The average confidence of true class is : %s', + attack_evaluate.avg_conf_true_class()) + LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original ' + 'samples and adversarial samples are: %s', + attack_evaluate.avg_lp_distance()) + LOGGER.info(TAG, 'The average structural similarity between original ' + 'samples and adversarial samples are: %s', + attack_evaluate.avg_ssim()) + LOGGER.info(TAG, 'The average costing time is %s', + (stop_time - start_time)/(batch_num*batch_size)) + + +if __name__ == '__main__': + test_projected_gradient_descent_method() diff --git a/example/mnist_demo/mnist_attack_pointwise.py b/example/mnist_demo/mnist_attack_pointwise.py new file mode 100644 index 0000000..5ac33e0 --- /dev/null +++ b/example/mnist_demo/mnist_attack_pointwise.py @@ -0,0 +1,138 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import numpy as np +import pytest +from scipy.special import softmax + +from mindspore import Tensor +from mindspore import context +from mindspore.train.serialization import load_checkpoint, load_param_into_net + +from mindarmour.attacks.black.pointwise_attack import PointWiseAttack +from mindarmour.attacks.black.black_model import BlackModel +from mindarmour.utils.logger import LogUtil +from mindarmour.evaluations.attack_evaluation import AttackEvaluate + +from lenet5_net import LeNet5 + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + +sys.path.append("..") +from data_processing import generate_mnist_dataset + +LOGGER = LogUtil.get_instance() +TAG = 'Pointwise_Attack' +LOGGER.set_level('INFO') + + +class ModelToBeAttacked(BlackModel): + """model to be attack""" + + def __init__(self, network): + super(ModelToBeAttacked, self).__init__() + self._network = network + + def predict(self, inputs): + """predict""" + if len(inputs.shape) == 3: + inputs = inputs[np.newaxis, :] + result = self._network(Tensor(inputs.astype(np.float32))) + return result.asnumpy() + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_pointwise_attack_on_mnist(): + """ + Salt-and-Pepper-Attack test + """ + # upload trained network + ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt' + net = LeNet5() + load_dict = load_checkpoint(ckpt_name) + load_param_into_net(net, load_dict) + + # get test data + data_list = "./MNIST_unzip/test" + batch_size = 32 + ds = generate_mnist_dataset(data_list, batch_size=batch_size) + + # prediction accuracy before attack + model = ModelToBeAttacked(net) + batch_num = 3 # the number of batches of attacking samples + test_images = [] + test_labels = [] + predict_labels = [] + i = 0 + for data in ds.create_tuple_iterator(): + i += 1 + images = data[0].astype(np.float32) + labels = data[1] + test_images.append(images) + test_labels.append(labels) + pred_labels = np.argmax(model.predict(images), axis=1) + predict_labels.append(pred_labels) + if i >= batch_num: + break + predict_labels = np.concatenate(predict_labels) + true_labels = np.concatenate(test_labels) + accuracy = np.mean(np.equal(predict_labels, true_labels)) + LOGGER.info(TAG, "prediction accuracy before attacking is : %g", accuracy) + + # attacking + is_target = False + attack = PointWiseAttack(model=model, is_targeted=is_target) + if is_target: + targeted_labels = np.random.randint(0, 10, size=len(true_labels)) + for i in range(len(true_labels)): + if targeted_labels[i] == true_labels[i]: + targeted_labels[i] = (targeted_labels[i] + 1) % 10 + else: + targeted_labels = true_labels + success_list, adv_data, query_list = attack.generate( + np.concatenate(test_images), targeted_labels) + success_list = np.arange(success_list.shape[0])[success_list] + LOGGER.info(TAG, 'success_list: %s', success_list) + LOGGER.info(TAG, 'average of query times is : %s', np.mean(query_list)) + adv_preds = [] + for ite_data in adv_data: + pred_logits_adv = model.predict(ite_data) + # rescale predict confidences into (0, 1). + pred_logits_adv = softmax(pred_logits_adv, axis=1) + adv_preds.extend(pred_logits_adv) + accuracy_adv = np.mean(np.equal(np.max(adv_preds, axis=1), true_labels)) + LOGGER.info(TAG, "prediction accuracy after attacking is : %g", + accuracy_adv) + test_labels_onehot = np.eye(10)[true_labels] + attack_evaluate = AttackEvaluate(np.concatenate(test_images), + test_labels_onehot, adv_data, + adv_preds, targeted=is_target, + target_label=targeted_labels) + LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s', + attack_evaluate.mis_classification_rate()) + LOGGER.info(TAG, 'The average confidence of adversarial class is : %s', + attack_evaluate.avg_conf_adv_class()) + LOGGER.info(TAG, 'The average confidence of true class is : %s', + attack_evaluate.avg_conf_true_class()) + LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original ' + 'samples and adversarial samples are: %s', + attack_evaluate.avg_lp_distance()) + + +if __name__ == '__main__': + test_pointwise_attack_on_mnist() diff --git a/example/mnist_demo/mnist_attack_pso.py b/example/mnist_demo/mnist_attack_pso.py new file mode 100644 index 0000000..19c4213 --- /dev/null +++ b/example/mnist_demo/mnist_attack_pso.py @@ -0,0 +1,131 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import time +import numpy as np +import pytest +from scipy.special import softmax + +from mindspore import Tensor +from mindspore import context +from mindspore.train.serialization import load_checkpoint, load_param_into_net + +from mindarmour.attacks.black.pso_attack import PSOAttack +from mindarmour.attacks.black.black_model import BlackModel +from mindarmour.utils.logger import LogUtil +from mindarmour.evaluations.attack_evaluation import AttackEvaluate + +from lenet5_net import LeNet5 + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + +sys.path.append("..") +from data_processing import generate_mnist_dataset + +LOGGER = LogUtil.get_instance() +TAG = 'PSO_Attack' + + +class ModelToBeAttacked(BlackModel): + """model to be attack""" + + def __init__(self, network): + super(ModelToBeAttacked, self).__init__() + self._network = network + + def predict(self, inputs): + """predict""" + result = self._network(Tensor(inputs.astype(np.float32))) + return result.asnumpy() + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_pso_attack_on_mnist(): + """ + PSO-Attack test + """ + # upload trained network + ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt' + net = LeNet5() + load_dict = load_checkpoint(ckpt_name) + load_param_into_net(net, load_dict) + + # get test data + data_list = "./MNIST_unzip/test" + batch_size = 32 + ds = generate_mnist_dataset(data_list, batch_size=batch_size) + + # prediction accuracy before attack + model = ModelToBeAttacked(net) + batch_num = 3 # the number of batches of attacking samples + test_images = [] + test_labels = [] + predict_labels = [] + i = 0 + for data in ds.create_tuple_iterator(): + i += 1 + images = data[0].astype(np.float32) + labels = data[1] + test_images.append(images) + test_labels.append(labels) + pred_labels = np.argmax(model.predict(images), axis=1) + predict_labels.append(pred_labels) + if i >= batch_num: + break + predict_labels = np.concatenate(predict_labels) + true_labels = np.concatenate(test_labels) + accuracy = np.mean(np.equal(predict_labels, true_labels)) + LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy) + + # attacking + attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=True) + start_time = time.clock() + success_list, adv_data, query_list = attack.generate( + np.concatenate(test_images), np.concatenate(test_labels)) + stop_time = time.clock() + LOGGER.info(TAG, 'success_list: %s', success_list) + LOGGER.info(TAG, 'average of query times is : %s', np.mean(query_list)) + pred_logits_adv = model.predict(adv_data) + # rescale predict confidences into (0, 1). + pred_logits_adv = softmax(pred_logits_adv, axis=1) + pred_labels_adv = np.argmax(pred_logits_adv, axis=1) + accuracy_adv = np.mean(np.equal(pred_labels_adv, true_labels)) + LOGGER.info(TAG, "prediction accuracy after attacking is : %s", + accuracy_adv) + test_labels_onehot = np.eye(10)[np.concatenate(test_labels)] + attack_evaluate = AttackEvaluate(np.concatenate(test_images), + test_labels_onehot, adv_data, + pred_logits_adv) + LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s', + attack_evaluate.mis_classification_rate()) + LOGGER.info(TAG, 'The average confidence of adversarial class is : %s', + attack_evaluate.avg_conf_adv_class()) + LOGGER.info(TAG, 'The average confidence of true class is : %s', + attack_evaluate.avg_conf_true_class()) + LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original ' + 'samples and adversarial samples are: %s', + attack_evaluate.avg_lp_distance()) + LOGGER.info(TAG, 'The average structural similarity between original ' + 'samples and adversarial samples are: %s', + attack_evaluate.avg_ssim()) + LOGGER.info(TAG, 'The average costing time is %s', + (stop_time - start_time)/(batch_num*batch_size)) + + +if __name__ == '__main__': + test_pso_attack_on_mnist() diff --git a/example/mnist_demo/mnist_attack_salt_and_pepper.py b/example/mnist_demo/mnist_attack_salt_and_pepper.py new file mode 100644 index 0000000..441ebe4 --- /dev/null +++ b/example/mnist_demo/mnist_attack_salt_and_pepper.py @@ -0,0 +1,142 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import numpy as np +import pytest +from scipy.special import softmax + +from mindspore import Tensor +from mindspore import context +from mindspore.train.serialization import load_checkpoint, load_param_into_net + +from mindarmour.attacks.black.salt_and_pepper_attack import SaltAndPepperNoiseAttack +from mindarmour.attacks.black.black_model import BlackModel +from mindarmour.utils.logger import LogUtil +from mindarmour.evaluations.attack_evaluation import AttackEvaluate + +from lenet5_net import LeNet5 + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + +sys.path.append("..") +from data_processing import generate_mnist_dataset + +LOGGER = LogUtil.get_instance() +TAG = 'Salt_and_Pepper_Attack' +LOGGER.set_level('DEBUG') + + +class ModelToBeAttacked(BlackModel): + """model to be attack""" + + def __init__(self, network): + super(ModelToBeAttacked, self).__init__() + self._network = network + + def predict(self, inputs): + """predict""" + if len(inputs.shape) == 3: + inputs = inputs[np.newaxis, :] + result = self._network(Tensor(inputs.astype(np.float32))) + return result.asnumpy() + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_salt_and_pepper_attack_on_mnist(): + """ + Salt-and-Pepper-Attack test + """ + # upload trained network + ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt' + net = LeNet5() + load_dict = load_checkpoint(ckpt_name) + load_param_into_net(net, load_dict) + + # get test data + data_list = "./MNIST_unzip/test" + batch_size = 32 + ds = generate_mnist_dataset(data_list, batch_size=batch_size) + + # prediction accuracy before attack + model = ModelToBeAttacked(net) + batch_num = 3 # the number of batches of attacking samples + test_images = [] + test_labels = [] + predict_labels = [] + i = 0 + for data in ds.create_tuple_iterator(): + i += 1 + images = data[0].astype(np.float32) + labels = data[1] + test_images.append(images) + test_labels.append(labels) + pred_labels = np.argmax(model.predict(images), axis=1) + predict_labels.append(pred_labels) + if i >= batch_num: + break + LOGGER.debug(TAG, 'model input image shape is: {}'.format(np.array(test_images).shape)) + predict_labels = np.concatenate(predict_labels) + true_labels = np.concatenate(test_labels) + accuracy = np.mean(np.equal(predict_labels, true_labels)) + LOGGER.info(TAG, "prediction accuracy before attacking is : %g", accuracy) + + # attacking + is_target = False + attack = SaltAndPepperNoiseAttack(model=model, + is_targeted=is_target, + sparse=True) + if is_target: + targeted_labels = np.random.randint(0, 10, size=len(true_labels)) + for i in range(len(true_labels)): + if targeted_labels[i] == true_labels[i]: + targeted_labels[i] = (targeted_labels[i] + 1) % 10 + else: + targeted_labels = true_labels + LOGGER.debug(TAG, 'input shape is: {}'.format(np.concatenate(test_images).shape)) + success_list, adv_data, query_list = attack.generate( + np.concatenate(test_images), targeted_labels) + success_list = np.arange(success_list.shape[0])[success_list] + LOGGER.info(TAG, 'success_list: %s', success_list) + LOGGER.info(TAG, 'average of query times is : %s', np.mean(query_list)) + adv_preds = [] + for ite_data in adv_data: + pred_logits_adv = model.predict(ite_data) + # rescale predict confidences into (0, 1). + pred_logits_adv = softmax(pred_logits_adv, axis=1) + adv_preds.extend(pred_logits_adv) + accuracy_adv = np.mean(np.equal(np.max(adv_preds, axis=1), true_labels)) + LOGGER.info(TAG, "prediction accuracy after attacking is : %g", + accuracy_adv) + test_labels_onehot = np.eye(10)[true_labels] + attack_evaluate = AttackEvaluate(np.concatenate(test_images), + test_labels_onehot, adv_data, + adv_preds, targeted=is_target, + target_label=targeted_labels) + LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s', + attack_evaluate.mis_classification_rate()) + LOGGER.info(TAG, 'The average confidence of adversarial class is : %s', + attack_evaluate.avg_conf_adv_class()) + LOGGER.info(TAG, 'The average confidence of true class is : %s', + attack_evaluate.avg_conf_true_class()) + LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original ' + 'samples and adversarial samples are: %s', + attack_evaluate.avg_lp_distance()) + + +if __name__ == '__main__': + test_salt_and_pepper_attack_on_mnist() diff --git a/example/mnist_demo/mnist_defense_nad.py b/example/mnist_demo/mnist_defense_nad.py new file mode 100644 index 0000000..e9e04d3 --- /dev/null +++ b/example/mnist_demo/mnist_defense_nad.py @@ -0,0 +1,144 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""defense example using nad""" +import sys + +import logging + +import numpy as np +import pytest + +from mindspore import Tensor +from mindspore import context +from mindspore import nn +from mindspore.nn import SoftmaxCrossEntropyWithLogits +from mindspore.train.serialization import load_checkpoint, load_param_into_net + +from mindarmour.attacks import FastGradientSignMethod +from mindarmour.defenses import NaturalAdversarialDefense +from mindarmour.utils.logger import LogUtil + +from lenet5_net import LeNet5 + +sys.path.append("..") +from data_processing import generate_mnist_dataset + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + +LOGGER = LogUtil.get_instance() +TAG = 'Nad_Example' + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_nad_method(): + """ + NAD-Defense test. + """ + # 1. load trained network + ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt' + net = LeNet5() + load_dict = load_checkpoint(ckpt_name) + load_param_into_net(net, load_dict) + + loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=False) + opt = nn.Momentum(net.trainable_params(), 0.01, 0.09) + + nad = NaturalAdversarialDefense(net, loss_fn=loss, optimizer=opt, + bounds=(0.0, 1.0), eps=0.3) + + # 2. get test data + data_list = "./MNIST_unzip/test" + batch_size = 32 + ds_test = generate_mnist_dataset(data_list, batch_size=batch_size, + sparse=False) + inputs = [] + labels = [] + for data in ds_test.create_tuple_iterator(): + inputs.append(data[0].astype(np.float32)) + labels.append(data[1]) + inputs = np.concatenate(inputs) + labels = np.concatenate(labels) + + # 3. get accuracy of test data on original model + net.set_train(False) + acc_list = [] + batchs = inputs.shape[0] // batch_size + for i in range(batchs): + batch_inputs = inputs[i*batch_size : (i + 1)*batch_size] + batch_labels = np.argmax(labels[i*batch_size : (i + 1)*batch_size], axis=1) + logits = net(Tensor(batch_inputs)).asnumpy() + label_pred = np.argmax(logits, axis=1) + acc_list.append(np.mean(batch_labels == label_pred)) + + LOGGER.debug(TAG, 'accuracy of TEST data on original model is : %s', + np.mean(acc_list)) + + # 4. get adv of test data + attack = FastGradientSignMethod(net, eps=0.3) + adv_data = attack.batch_generate(inputs, labels) + LOGGER.debug(TAG, 'adv_data.shape is : %s', adv_data.shape) + + # 5. get accuracy of adv data on original model + net.set_train(False) + acc_list = [] + batchs = adv_data.shape[0] // batch_size + for i in range(batchs): + batch_inputs = adv_data[i*batch_size : (i + 1)*batch_size] + batch_labels = np.argmax(labels[i*batch_size : (i + 1)*batch_size], axis=1) + logits = net(Tensor(batch_inputs)).asnumpy() + label_pred = np.argmax(logits, axis=1) + acc_list.append(np.mean(batch_labels == label_pred)) + + LOGGER.debug(TAG, 'accuracy of adv data on original model is : %s', + np.mean(acc_list)) + + # 6. defense + net.set_train() + nad.batch_defense(inputs, labels, batch_size=32, epochs=10) + + # 7. get accuracy of test data on defensed model + net.set_train(False) + acc_list = [] + batchs = inputs.shape[0] // batch_size + for i in range(batchs): + batch_inputs = inputs[i*batch_size : (i + 1)*batch_size] + batch_labels = np.argmax(labels[i*batch_size : (i + 1)*batch_size], axis=1) + logits = net(Tensor(batch_inputs)).asnumpy() + label_pred = np.argmax(logits, axis=1) + acc_list.append(np.mean(batch_labels == label_pred)) + + LOGGER.debug(TAG, 'accuracy of TEST data on defensed model is : %s', + np.mean(acc_list)) + + # 8. get accuracy of adv data on defensed model + acc_list = [] + batchs = adv_data.shape[0] // batch_size + for i in range(batchs): + batch_inputs = adv_data[i*batch_size : (i + 1)*batch_size] + batch_labels = np.argmax(labels[i*batch_size : (i + 1)*batch_size], axis=1) + logits = net(Tensor(batch_inputs)).asnumpy() + label_pred = np.argmax(logits, axis=1) + acc_list.append(np.mean(batch_labels == label_pred)) + + LOGGER.debug(TAG, 'accuracy of adv data on defensed model is : %s', + np.mean(acc_list)) + + +if __name__ == '__main__': + LOGGER.set_level(logging.DEBUG) + test_nad_method() diff --git a/example/mnist_demo/mnist_evaluation.py b/example/mnist_demo/mnist_evaluation.py new file mode 100644 index 0000000..35871f6 --- /dev/null +++ b/example/mnist_demo/mnist_evaluation.py @@ -0,0 +1,326 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""evaluate example""" +import sys +import os +import time +import numpy as np +from scipy.special import softmax + +from lenet5_net import LeNet5 +from mindspore import Model +from mindspore import Tensor +from mindspore import context +from mindspore import nn +from mindspore.nn import Cell +from mindspore.ops.operations import TensorAdd +from mindspore.nn import SoftmaxCrossEntropyWithLogits +from mindspore.train.serialization import load_checkpoint, load_param_into_net + +from mindarmour.attacks import FastGradientSignMethod +from mindarmour.attacks import GeneticAttack +from mindarmour.attacks.black.black_model import BlackModel +from mindarmour.defenses import NaturalAdversarialDefense +from mindarmour.evaluations import BlackDefenseEvaluate +from mindarmour.evaluations import DefenseEvaluate +from mindarmour.utils.logger import LogUtil +from mindarmour.detectors.black.similarity_detector import SimilarityDetector + +sys.path.append("..") +from data_processing import generate_mnist_dataset + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + +LOGGER = LogUtil.get_instance() +TAG = 'Defense_Evaluate_Example' + + +def get_detector(train_images): + encoder = Model(EncoderNet(encode_dim=256)) + detector = SimilarityDetector(max_k_neighbor=50, trans_model=encoder) + detector.fit(inputs=train_images) + return detector + + +class EncoderNet(Cell): + """ + Similarity encoder for input data + """ + + def __init__(self, encode_dim): + super(EncoderNet, self).__init__() + self._encode_dim = encode_dim + self.add = TensorAdd() + + def construct(self, inputs): + """ + construct the neural network + Args: + inputs (Tensor): input data to neural network. + Returns: + Tensor, output of neural network. + """ + return self.add(inputs, inputs) + + def get_encode_dim(self): + """ + Get the dimension of encoded inputs + + Returns: + int, dimension of encoded inputs. + """ + return self._encode_dim + + +class ModelToBeAttacked(BlackModel): + """ + model to be attack + """ + + def __init__(self, network, defense=False, train_images=None): + super(ModelToBeAttacked, self).__init__() + self._network = network + self._queries = [] + self._defense = defense + self._detector = None + self._detected_res = [] + if self._defense: + self._detector = get_detector(train_images) + + def predict(self, inputs): + """ + predict function + """ + query_num = inputs.shape[0] + results = [] + if self._detector: + for i in range(query_num): + query = np.expand_dims(inputs[i].astype(np.float32), axis=0) + result = self._network(Tensor(query)).asnumpy() + det_num = len(self._detector.get_detected_queries()) + self._detector.detect([query]) + new_det_num = len(self._detector.get_detected_queries()) + # If attack query detected, return random predict result + if new_det_num > det_num: + results.append(result + np.random.rand(*result.shape)) + self._detected_res.append(True) + else: + results.append(result) + self._detected_res.append(False) + results = np.concatenate(results) + else: + results = self._network(Tensor(inputs.astype(np.float32))).asnumpy() + return results + + def get_detected_result(self): + return self._detected_res + + +def test_black_defense(): + # load trained network + current_dir = os.path.dirname(os.path.abspath(__file__)) + ckpt_name = os.path.abspath(os.path.join( + current_dir, './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt')) + # ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt' + wb_net = LeNet5() + load_dict = load_checkpoint(ckpt_name) + load_param_into_net(wb_net, load_dict) + + # get test data + data_list = "./MNIST_unzip/test" + batch_size = 32 + ds_test = generate_mnist_dataset(data_list, batch_size=batch_size, + sparse=False) + inputs = [] + labels = [] + for data in ds_test.create_tuple_iterator(): + inputs.append(data[0].astype(np.float32)) + labels.append(data[1]) + inputs = np.concatenate(inputs).astype(np.float32) + labels = np.concatenate(labels).astype(np.float32) + labels_sparse = np.argmax(labels, axis=1) + + target_label = np.random.randint(0, 10, size=labels_sparse.shape[0]) + for idx in range(labels_sparse.shape[0]): + while target_label[idx] == labels_sparse[idx]: + target_label[idx] = np.random.randint(0, 10) + target_label = np.eye(10)[target_label].astype(np.float32) + + attacked_size = 50 + benign_size = 500 + + attacked_sample = inputs[:attacked_size] + attacked_true_label = labels[:attacked_size] + benign_sample = inputs[attacked_size:attacked_size + benign_size] + + wb_model = ModelToBeAttacked(wb_net) + + # gen white-box adversarial examples of test data + wb_attack = FastGradientSignMethod(wb_net, eps=0.3) + wb_adv_sample = wb_attack.generate(attacked_sample, + attacked_true_label) + + wb_raw_preds = softmax(wb_model.predict(wb_adv_sample), axis=1) + accuracy_test = np.mean( + np.equal(np.argmax(wb_model.predict(attacked_sample), axis=1), + np.argmax(attacked_true_label, axis=1))) + LOGGER.info(TAG, "prediction accuracy before white-box attack is : %s", + accuracy_test) + accuracy_adv = np.mean(np.equal(np.argmax(wb_raw_preds, axis=1), + np.argmax(attacked_true_label, axis=1))) + LOGGER.info(TAG, "prediction accuracy after white-box attack is : %s", + accuracy_adv) + + # improve the robustness of model with white-box adversarial examples + loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=False) + opt = nn.Momentum(wb_net.trainable_params(), 0.01, 0.09) + + nad = NaturalAdversarialDefense(wb_net, loss_fn=loss, optimizer=opt, + bounds=(0.0, 1.0), eps=0.3) + wb_net.set_train(False) + nad.batch_defense(inputs[:5000], labels[:5000], batch_size=32, epochs=10) + + wb_def_preds = wb_net(Tensor(wb_adv_sample)).asnumpy() + wb_def_preds = softmax(wb_def_preds, axis=1) + accuracy_def = np.mean(np.equal(np.argmax(wb_def_preds, axis=1), + np.argmax(attacked_true_label, axis=1))) + LOGGER.info(TAG, "prediction accuracy after defense is : %s", accuracy_def) + + # calculate defense evaluation metrics for defense against white-box attack + wb_def_evaluate = DefenseEvaluate(wb_raw_preds, wb_def_preds, + np.argmax(attacked_true_label, axis=1)) + LOGGER.info(TAG, 'defense evaluation for white-box adversarial attack') + LOGGER.info(TAG, + 'classification accuracy variance (CAV) is : {:.2f}'.format( + wb_def_evaluate.cav())) + LOGGER.info(TAG, 'classification rectify ratio (CRR) is : {:.2f}'.format( + wb_def_evaluate.crr())) + LOGGER.info(TAG, 'classification sacrifice ratio (CSR) is : {:.2f}'.format( + wb_def_evaluate.csr())) + LOGGER.info(TAG, + 'classification confidence variance (CCV) is : {:.2f}'.format( + wb_def_evaluate.ccv())) + LOGGER.info(TAG, 'classification output stability is : {:.2f}'.format( + wb_def_evaluate.cos())) + + # calculate defense evaluation metrics for defense against black-box attack + LOGGER.info(TAG, 'defense evaluation for black-box adversarial attack') + bb_raw_preds = [] + bb_def_preds = [] + raw_query_counts = [] + raw_query_time = [] + def_query_counts = [] + def_query_time = [] + def_detection_counts = [] + + # gen black-box adversarial examples of test data + bb_net = LeNet5() + load_param_into_net(bb_net, load_dict) + bb_model = ModelToBeAttacked(bb_net, defense=False) + attack_rm = GeneticAttack(model=bb_model, pop_size=6, mutation_rate=0.05, + per_bounds=0.1, step_size=0.25, temp=0.1, + sparse=False) + attack_target_label = target_label[:attacked_size] + true_label = labels_sparse[:attacked_size + benign_size] + # evaluate robustness of original model + # gen black-box adversarial examples of test data + for idx in range(attacked_size): + raw_st = time.time() + raw_sl, raw_a, raw_qc = attack_rm.generate( + np.expand_dims(attacked_sample[idx], axis=0), + np.expand_dims(attack_target_label[idx], axis=0)) + raw_t = time.time() - raw_st + bb_raw_preds.extend(softmax(bb_model.predict(raw_a), axis=1)) + raw_query_counts.extend(raw_qc) + raw_query_time.append(raw_t) + + for idx in range(benign_size): + raw_st = time.time() + bb_raw_pred = softmax( + bb_model.predict(np.expand_dims(benign_sample[idx], axis=0)), + axis=1) + raw_t = time.time() - raw_st + bb_raw_preds.extend(bb_raw_pred) + raw_query_counts.extend([0]) + raw_query_time.append(raw_t) + + accuracy_test = np.mean( + np.equal(np.argmax(bb_raw_preds[0:len(attack_target_label)], axis=1), + np.argmax(attack_target_label, axis=1))) + LOGGER.info(TAG, "attack success before adv defense is : %s", + accuracy_test) + + # improve the robustness of model with similarity-based detector + bb_def_model = ModelToBeAttacked(bb_net, defense=True, + train_images=inputs[0:6000]) + # attack defensed model + attack_dm = GeneticAttack(model=bb_def_model, pop_size=6, + mutation_rate=0.05, + per_bounds=0.1, step_size=0.25, temp=0.1, + sparse=False) + for idx in range(attacked_size): + def_st = time.time() + def_sl, def_a, def_qc = attack_dm.generate( + np.expand_dims(attacked_sample[idx], axis=0), + np.expand_dims(attack_target_label[idx], axis=0)) + def_t = time.time() - def_st + det_res = bb_def_model.get_detected_result() + def_detection_counts.append(np.sum(det_res[-def_qc[0]:])) + bb_def_preds.extend(softmax(bb_def_model.predict(def_a), axis=1)) + def_query_counts.extend(def_qc) + def_query_time.append(def_t) + + for idx in range(benign_size): + def_st = time.time() + bb_def_pred = softmax( + bb_def_model.predict(np.expand_dims(benign_sample[idx], axis=0)), + axis=1) + def_t = time.time() - def_st + det_res = bb_def_model.get_detected_result() + def_detection_counts.append(np.sum(det_res[-1])) + bb_def_preds.extend(bb_def_pred) + def_query_counts.extend([0]) + def_query_time.append(def_t) + + accuracy_adv = np.mean( + np.equal(np.argmax(bb_def_preds[0:len(attack_target_label)], axis=1), + np.argmax(attack_target_label, axis=1))) + LOGGER.info(TAG, "attack success rate after adv defense is : %s", + accuracy_adv) + + bb_raw_preds = np.array(bb_raw_preds).astype(np.float32) + bb_def_preds = np.array(bb_def_preds).astype(np.float32) + # check evaluate data + max_queries = 6000 + + def_evaluate = BlackDefenseEvaluate(bb_raw_preds, bb_def_preds, + np.array(raw_query_counts), + np.array(def_query_counts), + np.array(raw_query_time), + np.array(def_query_time), + np.array(def_detection_counts), + true_label, max_queries) + + LOGGER.info(TAG, 'query count variance of adversaries is : {:.2f}'.format( + def_evaluate.qcv())) + LOGGER.info(TAG, 'attack success rate variance of adversaries ' + 'is : {:.2f}'.format(def_evaluate.asv())) + LOGGER.info(TAG, 'false positive rate (FPR) of the query-based detector ' + 'is : {:.2f}'.format(def_evaluate.fpr())) + LOGGER.info(TAG, 'the benign query response time variance (QRV) ' + 'is : {:.2f}'.format(def_evaluate.qrv())) + + +if __name__ == '__main__': + test_black_defense() diff --git a/example/mnist_demo/mnist_similarity_detector.py b/example/mnist_demo/mnist_similarity_detector.py new file mode 100644 index 0000000..da438a7 --- /dev/null +++ b/example/mnist_demo/mnist_similarity_detector.py @@ -0,0 +1,182 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import numpy as np +import pytest +from scipy.special import softmax + +from mindspore import Model +from mindspore import context +from mindspore import Tensor +from mindspore.nn import Cell +from mindspore.ops.operations import TensorAdd +from mindspore.train.serialization import load_checkpoint, load_param_into_net + +from mindarmour.utils.logger import LogUtil +from mindarmour.attacks.black.pso_attack import PSOAttack +from mindarmour.attacks.black.black_model import BlackModel +from mindarmour.detectors.black.similarity_detector import SimilarityDetector + +from lenet5_net import LeNet5 + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + +sys.path.append("..") +from data_processing import generate_mnist_dataset + +LOGGER = LogUtil.get_instance() +TAG = 'Similarity Detector test' + + +class ModelToBeAttacked(BlackModel): + """ + model to be attack + """ + + def __init__(self, network): + super(ModelToBeAttacked, self).__init__() + self._network = network + self._queries = [] + + def predict(self, inputs): + """ + predict function + """ + query_num = inputs.shape[0] + for i in range(query_num): + self._queries.append(inputs[i].astype(np.float32)) + result = self._network(Tensor(inputs.astype(np.float32))) + return result.asnumpy() + + def get_queries(self): + return self._queries + + +class EncoderNet(Cell): + """ + Similarity encoder for input data + """ + + def __init__(self, encode_dim): + super(EncoderNet, self).__init__() + self._encode_dim = encode_dim + self.add = TensorAdd() + + def construct(self, inputs): + """ + construct the neural network + Args: + inputs (Tensor): input data to neural network. + Returns: + Tensor, output of neural network. + """ + return self.add(inputs, inputs) + + def get_encode_dim(self): + """ + Get the dimension of encoded inputs + + Returns: + int, dimension of encoded inputs. + """ + return self._encode_dim + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_similarity_detector(): + """ + Similarity Detector test. + """ + # load trained network + ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt' + net = LeNet5() + load_dict = load_checkpoint(ckpt_name) + load_param_into_net(net, load_dict) + + # get mnist data + data_list = "./MNIST_unzip/test" + batch_size = 1000 + ds = generate_mnist_dataset(data_list, batch_size=batch_size) + model = ModelToBeAttacked(net) + + batch_num = 10 # the number of batches of input samples + all_images = [] + true_labels = [] + predict_labels = [] + i = 0 + for data in ds.create_tuple_iterator(): + i += 1 + images = data[0].astype(np.float32) + labels = data[1] + all_images.append(images) + true_labels.append(labels) + pred_labels = np.argmax(model.predict(images), axis=1) + predict_labels.append(pred_labels) + if i >= batch_num: + break + all_images = np.concatenate(all_images) + true_labels = np.concatenate(true_labels) + predict_labels = np.concatenate(predict_labels) + accuracy = np.mean(np.equal(predict_labels, true_labels)) + LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy) + + train_images = all_images[0:6000, :, :, :] + attacked_images = all_images[0:10, :, :, :] + attacked_labels = true_labels[0:10] + + # generate malicious query sequence of black attack + attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=True, + t_max=1000) + success_list, adv_data, query_list = attack.generate(attacked_images, + attacked_labels) + LOGGER.info(TAG, 'pso attack success_list: %s', success_list) + LOGGER.info(TAG, 'average of query counts is : %s', np.mean(query_list)) + pred_logits_adv = model.predict(adv_data) + # rescale predict confidences into (0, 1). + pred_logits_adv = softmax(pred_logits_adv, axis=1) + pred_lables_adv = np.argmax(pred_logits_adv, axis=1) + accuracy_adv = np.mean(np.equal(pred_lables_adv, attacked_labels)) + LOGGER.info(TAG, "prediction accuracy after attacking is : %g", + accuracy_adv) + + benign_queries = all_images[6000:10000, :, :, :] + suspicious_queries = model.get_queries() + + # explicit threshold not provided, calculate threshold for K + encoder = Model(EncoderNet(encode_dim=256)) + detector = SimilarityDetector(max_k_neighbor=50, trans_model=encoder) + detector.fit(inputs=train_images) + + # test benign queries + detector.detect(benign_queries) + fpr = len(detector.get_detected_queries()) / benign_queries.shape[0] + LOGGER.info(TAG, 'Number of false positive of attack detector is : %s', + len(detector.get_detected_queries())) + LOGGER.info(TAG, 'False positive rate of attack detector is : %s', fpr) + + # test attack queries + detector.clear_buffer() + detector.detect(suspicious_queries) + LOGGER.info(TAG, 'Number of detected attack queries is : %s', + len(detector.get_detected_queries())) + LOGGER.info(TAG, 'The detected attack query indexes are : %s', + detector.get_detected_queries()) + + +if __name__ == '__main__': + test_similarity_detector() diff --git a/example/mnist_demo/mnist_train.py b/example/mnist_demo/mnist_train.py new file mode 100644 index 0000000..d9ef839 --- /dev/null +++ b/example/mnist_demo/mnist_train.py @@ -0,0 +1,88 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +import os +import sys + +import mindspore.nn as nn +from mindspore import context, Tensor +from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor +from mindspore.train.serialization import load_checkpoint, load_param_into_net +from mindspore.train import Model +import mindspore.ops.operations as P +from mindspore.nn.metrics import Accuracy +from mindspore.ops import functional as F +from mindspore.common import dtype as mstype + +from mindarmour.utils.logger import LogUtil + +from lenet5_net import LeNet5 + +sys.path.append("..") +from data_processing import generate_mnist_dataset +LOGGER = LogUtil.get_instance() +TAG = 'Lenet5_train' + + +class CrossEntropyLoss(nn.Cell): + """ + Define loss for network + """ + def __init__(self): + super(CrossEntropyLoss, self).__init__() + self.cross_entropy = P.SoftmaxCrossEntropyWithLogits() + self.mean = P.ReduceMean() + self.one_hot = P.OneHot() + self.on_value = Tensor(1.0, mstype.float32) + self.off_value = Tensor(0.0, mstype.float32) + + def construct(self, logits, label): + label = self.one_hot(label, F.shape(logits)[1], self.on_value, self.off_value) + loss = self.cross_entropy(logits, label)[0] + loss = self.mean(loss, (-1,)) + return loss + + +def mnist_train(epoch_size, batch_size, lr, momentum): + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", + enable_mem_reuse=False) + + lr = lr + momentum = momentum + epoch_size = epoch_size + mnist_path = "./MNIST_unzip/" + ds = generate_mnist_dataset(os.path.join(mnist_path, "train"), + batch_size=batch_size, repeat_size=1) + + network = LeNet5() + network.set_train() + net_loss = CrossEntropyLoss() + net_opt = nn.Momentum(network.trainable_params(), lr, momentum) + config_ck = CheckpointConfig(save_checkpoint_steps=1875, keep_checkpoint_max=10) + ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", directory='./trained_ckpt_file/', config=config_ck) + model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}) + + LOGGER.info(TAG, "============== Starting Training ==============") + model.train(epoch_size, ds, callbacks=[ckpoint_cb, LossMonitor()], dataset_sink_mode=False) # train + + LOGGER.info(TAG, "============== Starting Testing ==============") + param_dict = load_checkpoint("trained_ckpt_file/checkpoint_lenet-10_1875.ckpt") + load_param_into_net(network, param_dict) + ds_eval = generate_mnist_dataset(os.path.join(mnist_path, "test"), batch_size=batch_size) + acc = model.eval(ds_eval) + LOGGER.info(TAG, "============== Accuracy: %s ==============", acc) + + +if __name__ == '__main__': + mnist_train(10, 32, 0.001, 0.9) diff --git a/mindarmour/__init__.py b/mindarmour/__init__.py new file mode 100644 index 0000000..65069e4 --- /dev/null +++ b/mindarmour/__init__.py @@ -0,0 +1,13 @@ +""" +MindArmour, a tool box of MindSpore to enhance model security and +trustworthiness against adversarial examples. +""" +from .attacks import Attack +from .attacks.black.black_model import BlackModel +from .defenses.defense import Defense +from .detectors.detector import Detector + +__all__ = ['Attack', + 'BlackModel', + 'Detector', + 'Defense'] diff --git a/mindarmour/attacks/__init__.py b/mindarmour/attacks/__init__.py new file mode 100644 index 0000000..11072ea --- /dev/null +++ b/mindarmour/attacks/__init__.py @@ -0,0 +1,39 @@ +""" +This module includes classical black-box and white-box attack algorithms +in making adversarial examples. +""" +from .gradient_method import * +from .iterative_gradient_method import * +from .deep_fool import DeepFool +from .jsma import JSMAAttack +from .carlini_wagner import CarliniWagnerL2Attack +from .lbfgs import LBFGS +from . import black +from .black.hop_skip_jump_attack import HopSkipJumpAttack +from .black.genetic_attack import GeneticAttack +from .black.natural_evolutionary_strategy import NES +from .black.pointwise_attack import PointWiseAttack +from .black.pso_attack import PSOAttack +from .black.salt_and_pepper_attack import SaltAndPepperNoiseAttack + +__all__ = ['FastGradientMethod', + 'RandomFastGradientMethod', + 'FastGradientSignMethod', + 'RandomFastGradientSignMethod', + 'LeastLikelyClassMethod', + 'RandomLeastLikelyClassMethod', + 'IterativeGradientMethod', + 'BasicIterativeMethod', + 'MomentumIterativeMethod', + 'ProjectedGradientDescent', + 'DeepFool', + 'CarliniWagnerL2Attack', + 'JSMAAttack', + 'LBFGS', + 'GeneticAttack', + 'HopSkipJumpAttack', + 'NES', + 'PointWiseAttack', + 'PSOAttack', + 'SaltAndPepperNoiseAttack' + ] diff --git a/mindarmour/attacks/attack.py b/mindarmour/attacks/attack.py new file mode 100644 index 0000000..0758a75 --- /dev/null +++ b/mindarmour/attacks/attack.py @@ -0,0 +1,97 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Base Class of Attack. +""" +from abc import abstractmethod + +import numpy as np + +from mindarmour.utils._check_param import check_pair_numpy_param, \ + check_int_positive +from mindarmour.utils.logger import LogUtil + +LOGGER = LogUtil.get_instance() +TAG = 'Attack' + + +class Attack: + """ + The abstract base class for all attack classes creating adversarial examples. + """ + def __init__(self): + pass + + def batch_generate(self, inputs, labels, batch_size=64): + """ + Generate adversarial examples in batch, based on input samples and + their labels. + + Args: + inputs (numpy.ndarray): Samples based on which adversarial + examples are generated. + labels (numpy.ndarray): Labels of samples, whose values determined + by specific attacks. + batch_size (int): The number of samples in one batch. + + Returns: + numpy.ndarray, generated adversarial examples + + Examples: + >>> inputs = Tensor([[0.2, 0.4, 0.5, 0.2], [0.7, 0.2, 0.4, 0.3]]) + >>> labels = [3, 0] + >>> advs = attack.batch_generate(inputs, labels, batch_size=2) + """ + arr_x, arr_y = check_pair_numpy_param('inputs', inputs, 'labels', labels) + len_x = arr_x.shape[0] + batch_size = check_int_positive('batch_size', batch_size) + batchs = int(len_x / batch_size) + rest = len_x - batchs*batch_size + res = [] + for i in range(batchs): + x_batch = arr_x[i*batch_size: (i + 1)*batch_size] + y_batch = arr_y[i*batch_size: (i + 1)*batch_size] + adv_x = self.generate(x_batch, y_batch) + # Black-attack methods will return 3 values, just get the second. + res.append(adv_x[1] if isinstance(adv_x, tuple) else adv_x) + + if rest != 0: + x_batch = arr_x[batchs*batch_size:] + y_batch = arr_y[batchs*batch_size:] + adv_x = self.generate(x_batch, y_batch) + # Black-attack methods will return 3 values, just get the second. + res.append(adv_x[1] if isinstance(adv_x, tuple) else adv_x) + + + adv_x = np.concatenate(res, axis=0) + return adv_x + + @abstractmethod + def generate(self, inputs, labels): + """ + Generate adversarial examples based on normal samples and their labels. + + Args: + inputs (numpy.ndarray): Samples based on which adversarial + examples are generated. + labels (numpy.ndarray): Labels of samples, whose values determined + by specific attacks. + + Raises: + NotImplementedError: It is an abstract method. + """ + msg = 'The function generate() is an abstract function in class ' \ + '`Attack` and should be implemented in child class.' + LOGGER.error(TAG, msg) + raise NotImplementedError(msg) diff --git a/mindarmour/attacks/black/__init__.py b/mindarmour/attacks/black/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mindarmour/attacks/black/black_model.py b/mindarmour/attacks/black/black_model.py new file mode 100644 index 0000000..39572fb --- /dev/null +++ b/mindarmour/attacks/black/black_model.py @@ -0,0 +1,75 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Black model. +""" +from abc import abstractmethod + +import numpy as np + +from mindarmour.utils.logger import LogUtil + +LOGGER = LogUtil.get_instance() +TAG = 'BlackModel' + + +class BlackModel: + """ + The abstract class which treats the target model as a black box. The model + should be defined by users. + """ + def __init__(self): + pass + + @abstractmethod + def predict(self, inputs): + """ + Predict using the user specified model. The shape of predict results + should be (m, n), where n represents the number of classes this model + classifies. + + Args: + inputs (numpy.ndarray): The input samples to be predicted. + + Raises: + NotImplementedError: It is an abstract method. + """ + msg = 'The function predict() is an abstract function in class ' \ + '`BlackModel` and should be implemented in child class by user.' + LOGGER.error(TAG, msg) + raise NotImplementedError(msg) + + def is_adversarial(self, data, label, is_targeted): + """ + Check if input sample is adversarial example or not. + + Args: + data (numpy.ndarray): The input sample to be check, typically some + maliciously perturbed examples. + label (numpy.ndarray): For targeted attacks, label is intended + label of perturbed example. For untargeted attacks, label is + original label of corresponding unperturbed sample. + is_targeted (bool): For targeted/untargeted attacks, select True/False. + + Returns: + bool. + - If True, the input sample is adversarial. + + - If False, the input sample is not adversarial. + """ + logits = self.predict(np.expand_dims(data, axis=0))[0] + predicts = np.argmax(logits) + if is_targeted: + return predicts == label + return predicts != label diff --git a/mindarmour/attacks/black/genetic_attack.py b/mindarmour/attacks/black/genetic_attack.py new file mode 100644 index 0000000..6e2c403 --- /dev/null +++ b/mindarmour/attacks/black/genetic_attack.py @@ -0,0 +1,230 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Genetic-Attack. +""" +import numpy as np +from scipy.special import softmax + +from mindarmour.attacks.attack import Attack +from mindarmour.utils.logger import LogUtil +from mindarmour.attacks.black.black_model import BlackModel +from mindarmour.utils._check_param import check_numpy_param, check_model, \ + check_pair_numpy_param, check_param_type, check_value_positive, \ + check_int_positive, check_param_multi_types + + +LOGGER = LogUtil.get_instance() +TAG = 'GeneticAttack' + + +def _mutation(cur_pop, step_noise=0.01, prob=0.005): + """ + Generate mutation samples in genetic_attack. + + Args: + cur_pop (numpy.ndarray): Samples before mutation. + step_noise (float): Noise range. Default: 0.01. + prob (float): Mutation probability. Default: 0.005. + + Returns: + numpy.ndarray, samples after mutation operation in genetic_attack. + + Examples: + >>> mul_pop = self._mutation_op([0.2, 0.3, 0.4], step_noise=0.03, + >>> prob=0.01) + """ + cur_pop = check_numpy_param('cur_pop', cur_pop) + perturb_noise = np.clip(np.random.random(cur_pop.shape) - 0.5, + -step_noise, step_noise) + mutated_pop = perturb_noise*( + np.random.random(cur_pop.shape) < prob) + cur_pop + return mutated_pop + + +class GeneticAttack(Attack): + """ + The Genetic Attack represents the black-box attack based on the genetic algorithm, + which belongs to differential evolution algorithms. + + This attack was proposed by Moustafa Alzantot et al. (2018). + + References: `Moustafa Alzantot, Yash Sharma, Supriyo Chakraborty, + "GeneticAttack: Practical Black-box Attacks with + Gradient-FreeOptimization" `_ + + Args: + model (BlackModel): Target model. + pop_size (int): The number of particles, which should be greater than + zero. Default: 6. + mutation_rate (float): The probability of mutations. Default: 0.005. + per_bounds (float): Maximum L_inf distance. + max_steps (int): The maximum round of iteration for each adversarial + example. Default: 1000. + step_size (float): Attack step size. Default: 0.2. + temp (float): Sampling temperature for selection. Default: 0.3. + bounds (tuple): Upper and lower bounds of data. In form of (clip_min, + clip_max). Default: (0, 1.0) + adaptive (bool): If True, turns on dynamic scaling of mutation + parameters. If false, turns on static mutation parameters. + Default: False. + sparse (bool): If True, input labels are sparse-encoded. If False, + input labels are one-hot-encoded. Default: True. + + Examples: + >>> attack = GeneticAttack(model) + """ + def __init__(self, model, pop_size=6, + mutation_rate=0.005, per_bounds=0.15, max_steps=1000, + step_size=0.20, temp=0.3, bounds=(0, 1.0), adaptive=False, + sparse=True): + super(GeneticAttack, self).__init__() + self._model = check_model('model', model, BlackModel) + self._per_bounds = check_value_positive('per_bounds', per_bounds) + self._pop_size = check_int_positive('pop_size', pop_size) + self._step_size = check_value_positive('step_size', step_size) + self._temp = check_value_positive('temp', temp) + self._max_steps = check_int_positive('max_steps', max_steps) + self._mutation_rate = check_value_positive('mutation_rate', + mutation_rate) + self._adaptive = check_param_type('adaptive', adaptive, bool) + self._bounds = check_param_multi_types('bounds', bounds, [list, tuple]) + for b in self._bounds: + _ = check_param_multi_types('bound', b, [int, float]) + # initial global optimum fitness value + self._best_fit = -1 + # count times of no progress + self._plateau_times = 0 + # count times of changing attack step + self._adap_times = 0 + self._sparse = check_param_type('sparse', sparse, bool) + + def generate(self, inputs, labels): + """ + Generate adversarial examples based on input data and targeted + labels (or ground_truth labels). + + Args: + inputs (numpy.ndarray): Input samples. + labels (numpy.ndarray): Targeted labels. + + Returns: + - numpy.ndarray, bool values for each attack result. + + - numpy.ndarray, generated adversarial examples. + + - numpy.ndarray, query times for each sample. + + Examples: + >>> advs = attack.generate([[0.2, 0.3, 0.4], + >>> [0.3, 0.3, 0.2]], + >>> [1, 2]) + """ + inputs, labels = check_pair_numpy_param('inputs', inputs, + 'labels', labels) + # if input is one-hot encoded, get sparse format value + if not self._sparse: + if labels.ndim != 2: + raise ValueError('labels must be 2 dims, ' + 'but got {} dims.'.format(labels.ndim)) + labels = np.argmax(labels, axis=1) + adv_list = [] + success_list = [] + query_times_list = [] + for i in range(inputs.shape[0]): + is_success = False + target_label = labels[i] + iters = 0 + x_ori = inputs[i] + # generate particles + ori_copies = np.repeat( + x_ori[np.newaxis, :], self._pop_size, axis=0) + # initial perturbations + cur_pert = np.clip(np.random.random(ori_copies.shape)*self._step_size, + (0 - self._per_bounds), + self._per_bounds) + query_times = 0 + while iters < self._max_steps: + iters += 1 + cur_pop = np.clip( + ori_copies + cur_pert, self._bounds[0], self._bounds[1]) + pop_preds = self._model.predict(cur_pop) + query_times += cur_pop.shape[0] + all_preds = np.argmax(pop_preds, axis=1) + success_pop = np.equal(target_label, all_preds).astype(np.int32) + success = max(success_pop) + if success == 1: + is_success = True + adv = cur_pop[np.argmax(success_pop)] + break + target_preds = pop_preds[:, target_label] + others_preds_sum = np.sum(pop_preds, axis=1) - target_preds + fit_vals = target_preds - others_preds_sum + best_fit = max(target_preds - np.max(pop_preds)) + if best_fit > self._best_fit: + self._best_fit = best_fit + self._plateau_times = 0 + else: + self._plateau_times += 1 + adap_threshold = (lambda z: 100 if z > -0.4 else 300)(best_fit) + if self._plateau_times > adap_threshold: + self._adap_times += 1 + self._plateau_times = 0 + if self._adaptive: + step_noise = max(self._step_size, 0.4*(0.9**self._adap_times)) + step_p = max(self._step_size, 0.5*(0.9**self._adap_times)) + else: + step_noise = self._step_size + step_p = self._mutation_rate + step_temp = self._temp + elite = cur_pert[np.argmax(fit_vals)] + select_probs = softmax(fit_vals/step_temp) + select_args = np.arange(self._pop_size) + parents_arg = np.random.choice( + a=select_args, size=2*(self._pop_size - 1), + replace=True, p=select_probs) + parent1 = cur_pert[parents_arg[:self._pop_size - 1]] + parent2 = cur_pert[parents_arg[self._pop_size - 1:]] + parent1_probs = select_probs[parents_arg[:self._pop_size - 1]] + parent2_probs = select_probs[parents_arg[self._pop_size - 1:]] + parent2_probs = parent2_probs / (parent1_probs + parent2_probs) + # duplicate the probabilities to all features of each particle. + dims = len(x_ori.shape) + for _ in range(dims): + parent2_probs = parent2_probs[:, np.newaxis] + parent2_probs = np.tile(parent2_probs, ((1,) + x_ori.shape)) + cross_probs = (np.random.random(parent1.shape) > + parent2_probs).astype(np.int32) + childs = parent1*cross_probs + parent2*(1 - cross_probs) + mutated_childs = _mutation( + childs, step_noise=self._per_bounds*step_noise, + prob=step_p) + cur_pert = np.concatenate((mutated_childs, elite[np.newaxis, :])) + if is_success: + LOGGER.debug(TAG, 'successfully find one adversarial sample ' + 'and start Reduction process.') + adv_list.append(adv) + else: + LOGGER.debug(TAG, 'fail to find adversarial sample.') + adv_list.append(elite + x_ori) + LOGGER.debug(TAG, + 'iteration times is: %d and query times is: %d', + iters, + query_times) + success_list.append(is_success) + query_times_list.append(query_times) + del ori_copies, cur_pert, cur_pop + return np.asarray(success_list), \ + np.asarray(adv_list), \ + np.asarray(query_times_list) diff --git a/mindarmour/attacks/black/hop_skip_jump_attack.py b/mindarmour/attacks/black/hop_skip_jump_attack.py new file mode 100644 index 0000000..1bd4625 --- /dev/null +++ b/mindarmour/attacks/black/hop_skip_jump_attack.py @@ -0,0 +1,510 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Hop-skip-jump attack. +""" +import numpy as np + +from mindarmour.attacks.attack import Attack +from mindarmour.utils.logger import LogUtil +from mindarmour.attacks.black.black_model import BlackModel +from mindarmour.utils._check_param import check_pair_numpy_param, check_model, \ + check_numpy_param, check_int_positive, check_value_positive, \ + check_value_non_negative, check_param_type + +LOGGER = LogUtil.get_instance() +TAG = 'HopSkipJumpAttack' + + +def _clip_image(image, clip_min, clip_max): + """ + Clip an image, or an image batch, with upper and lower threshold. + """ + return np.clip(image, clip_min, clip_max) + + +class HopSkipJumpAttack(Attack): + """ + HopSkipJumpAttack proposed by Chen, Jordan and Wainwright is a + decision-based attack. The attack requires access to output labels of + target model. + + References: `Chen J, Michael I. Jordan, Martin J. Wainwright. + HopSkipJumpAttack: A Query-Efficient Decision-Based Attack. 2019. + arXiv:1904.02144 `_ + + Args: + model (BlackModel): Target model. + init_num_evals (int): The initial number of evaluations for gradient + estimation. Default: 100. + max_num_evals (int): The maximum number of evaluations for gradient + estimation. Default: 1000. + stepsize_search (str): Indicating how to search for stepsize; Possible + values are 'geometric_progression', 'grid_search', 'geometric_progression'. + num_iterations (int): The number of iterations. Default: 64. + gamma (float): Used to set binary search threshold theta. Default: 1.0. + For l2 attack the binary search threshold `theta` is: + math:`gamma / d^{3/2}`. For linf attack is math:`gamma / d^2`. + constraint (str): The norm distance to optimize. Possible values are 'l2', + 'linf'. Default: l2. + batch_size (int): Batch size. Default: 32. + clip_min (float, optional): The minimum image component value. + Default: 0. + clip_max (float, optional): The maximum image component value. + Default: 1. + sparse (bool): If True, input labels are sparse-encoded. If False, + input labels are one-hot-encoded. Default: True. + + Raises: + ValueError: If stepsize_search not in ['geometric_progression', + 'grid_search'] + ValueError: If constraint not in ['l2', 'linf'] + + Examples: + >>> x_test = np.asarray(np.random.random((sample_num, + >>> sample_length)), np.float32) + >>> y_test = np.random.randint(0, class_num, size=sample_num) + >>> instance = HopSkipJumpAttack(user_model) + >>> adv_x = instance.generate(x_test, y_test) + """ + + def __init__(self, model, init_num_evals=100, max_num_evals=1000, + stepsize_search='geometric_progression', num_iterations=20, + gamma=1.0, constraint='l2', batch_size=32, clip_min=0.0, + clip_max=1.0, sparse=True): + super(HopSkipJumpAttack, self).__init__() + self._model = check_model('model', model, BlackModel) + self._init_num_evals = check_int_positive('initial_num_evals', + init_num_evals) + self._max_num_evals = check_int_positive('max_num_evals', max_num_evals) + self._batch_size = check_int_positive('batch_size', batch_size) + self._clip_min = check_value_non_negative('clip_min', clip_min) + self._clip_max = check_value_non_negative('clip_max', clip_max) + self._sparse = check_param_type('sparse', sparse, bool) + self._np_dtype = np.dtype('float32') + if stepsize_search in ['geometric_progression', 'grid_search']: + self._stepsize_search = stepsize_search + else: + msg = "stepsize_search must be in ['geometric_progression'," \ + " 'grid_search'], but got {}".format(stepsize_search) + LOGGER.error(TAG, msg) + raise ValueError(msg) + + self._num_iterations = check_int_positive('num_iterations', + num_iterations) + self._gamma = check_value_positive('gamma', gamma) + if constraint in ['l2', 'linf']: + self._constraint = constraint + else: + msg = "constraint must be in ['l2', 'linf'], " \ + "but got {}".format(constraint) + LOGGER.error(TAG, msg) + raise ValueError(msg) + self.queries = 0 + self.is_adv = True + self.y_targets = None + self.image_targets = None + self.y_target = None + self.image_target = None + + def _generate_one(self, sample): + """ + Return a tensor that constructs adversarial examples for the given + input. + + Args: + sample (Tensor): Input samples. + + Returns: + Tensor, generated adversarial examples. + """ + shape = list(np.shape(sample)) + dim = int(np.prod(shape)) + + # Set binary search threshold. + if self._constraint == 'l2': + theta = self._gamma / (np.sqrt(dim)*dim) + else: + theta = self._gamma / (dim*dim) + + wrap = self._hsja(sample, self.y_target, self.image_target, dim, theta) + if wrap is None: + self.is_adv = False + else: + self.is_adv = True + return self.is_adv, wrap, self.queries + + def set_target_images(self, target_images): + """ + Setting target images for target attack. + + Args: + target_images (numpy.ndarray): Target images. + """ + self.image_targets = check_numpy_param('target_images', target_images) + + def generate(self, inputs, labels): + """ + Generate adversarial images in a for loop. + + Args: + inputs (numpy.ndarray): Origin images. + labels (numpy.ndarray): Target labels. + + Returns: + - numpy.ndarray, bool values for each attack result. + + - numpy.ndarray, generated adversarial examples. + + - numpy.ndarray, query times for each sample. + + Examples: + >>> generate([[0.1,0.2,0.2],[0.2,0.3,0.4]],[2,6]) + """ + if labels is not None: + inputs, labels = check_pair_numpy_param('inputs', inputs, + 'labels', labels) + + if not self._sparse: + labels = np.argmax(labels, axis=1) + x_adv = [] + is_advs = [] + queries_times = [] + + if labels is not None: + self.y_targets = labels + + for i, x_single in enumerate(inputs): + self.queries = 0 + if self.image_targets is not None: + self.image_target = self.image_targets[i] + if self.y_targets is not None: + self.y_target = self.y_targets[i] + is_adv, adv_img, query_time = self._generate_one(x_single) + x_adv.append(adv_img) + is_advs.append(is_adv) + queries_times.append(query_time) + + return np.asarray(is_advs), \ + np.asarray(x_adv), \ + np.asarray(queries_times) + + def _hsja(self, sample, target_label, target_image, dim, theta): + """ + The main algorithm for HopSkipJumpAttack. + + Args: + sample (numpy.ndarray): Input image. Without the batchsize + dimension. + target_label (int): Integer for targeted attack, None for + nontargeted attack. Without the batchsize dimension. + target_image (numpy.ndarray): An array with the same size as + input sample, or None. Without the batchsize dimension. + + Returns: + numpy.ndarray, perturbed images. + """ + original_label = None + # Original label for untargeted attack. + if target_label is None: + original_label = self._model.predict(sample) + original_label = np.argmax(original_label) + + # Initialize perturbed image. + # untarget attack + if target_image is None: + perturbed = self._initialize(sample, original_label, target_label) + if perturbed is None: + msg = 'Can not find an initial adversarial example' + LOGGER.info(TAG, msg) + return perturbed + else: + # Target attack + perturbed = target_image + + # Project the initial perturbed image to the decision boundary. + perturbed, dist_post_update = self._binary_search_batch(sample, + np.expand_dims(perturbed, 0), + original_label, + target_label, + theta) + + # Calculate the distance of perturbed image and original sample + dist = self._compute_distance(perturbed, sample) + for j in np.arange(self._num_iterations): + current_iteration = j + 1 + + # Select delta. + delta = self._select_delta(dist_post_update, current_iteration, dim, + theta) + # Choose number of evaluations. + num_evals = int(min([self._init_num_evals*np.sqrt(j + 1), + self._max_num_evals])) + + # approximate gradient. + gradf = self._approximate_gradient(perturbed, num_evals, + original_label, target_label, + delta, theta) + if self._constraint == 'linf': + update = np.sign(gradf) + else: + update = gradf + + # search step size. + if self._stepsize_search == 'geometric_progression': + # find step size. + epsilon = self._geometric_progression_for_stepsize( + perturbed, + update, + dist, + current_iteration, + original_label, + target_label) + # Update the sample. + perturbed = _clip_image(perturbed + epsilon*update, + self._clip_min, self._clip_max) + + # Binary search to return to the boundary. + perturbed, dist_post_update = self._binary_search_batch( + sample, + perturbed[None], + original_label, + target_label, + theta) + + elif self._stepsize_search == 'grid_search': + epsilons = np.logspace(-4, 0, num=20, endpoint=True)*dist + epsilons_shape = [20] + len(np.shape(sample))*[1] + perturbeds = perturbed + epsilons.reshape( + epsilons_shape)*update + perturbeds = _clip_image(perturbeds, self._clip_min, + self._clip_max) + idx_perturbed = self._decision_function(perturbeds, + original_label, + target_label) + + if np.sum(idx_perturbed) > 0: + # Select the perturbation that yields the minimum distance + # after binary search. + perturbed, dist_post_update = self._binary_search_batch( + sample, perturbeds[idx_perturbed], + original_label, target_label, theta) + + # compute new distance. + dist = self._compute_distance(perturbed, sample) + + LOGGER.debug(TAG, + 'iteration: %d, %s distance %4f', + j + 1, + self._constraint, dist) + + perturbed = np.expand_dims(perturbed, 0) + return perturbed + + def _decision_function(self, images, original_label, target_label): + """ + Decision function returns 1 if the input sample is on the desired + side of the boundary, and 0 otherwise. + """ + images = _clip_image(images, self._clip_min, self._clip_max) + prob = [] + self.queries += len(images) + for i in range(0, len(images), self._batch_size): + batch = images[i:i + self._batch_size] + length = len(batch) + prob_i = self._model.predict(batch)[:length] + prob.append(prob_i) + prob = np.concatenate(prob) + if target_label is None: + res = np.argmax(prob, axis=1) != original_label + else: + res = np.argmax(prob, axis=1) == target_label + return res + + def _compute_distance(self, original_img, perturbation_img): + """ + Compute the distance between original image and perturbation images. + """ + if self._constraint == 'l2': + distance = np.linalg.norm(original_img - perturbation_img) + else: + distance = np.max(abs(original_img - perturbation_img)) + return distance + + def _approximate_gradient(self, sample, num_evals, original_label, + target_label, delta, theta): + """ + Gradient direction estimation. + """ + # Generate random noise based on constraint. + noise_shape = [num_evals] + list(np.shape(sample)) + if self._constraint == 'l2': + random_noise = np.random.randn(*noise_shape) + else: + random_noise = np.random.uniform(low=-1, high=1, size=noise_shape) + axis = tuple(range(1, 1 + len(np.shape(sample)))) + random_noise = random_noise / np.sqrt( + np.sum(random_noise**2, axis=axis, keepdims=True)) + + # perturbed images + perturbed = sample + delta*random_noise + perturbed = _clip_image(perturbed, self._clip_min, self._clip_max) + random_noise = (perturbed - sample) / theta + + # Whether the perturbed images are on the desired side of the boundary. + decisions = self._decision_function(perturbed, original_label, + target_label) + decision_shape = [len(decisions)] + [1]*len(np.shape(sample)) + # transform decisions value from 1, 0 to 1, -2 + re_decision = 2*np.array(decisions).astype(self._np_dtype).reshape( + decision_shape) - 1.0 + + if np.mean(re_decision) == 1.0: + grad_direction = np.mean(random_noise, axis=0) + elif np.mean(re_decision) == -1.0: + grad_direction = - np.mean(random_noise, axis=0) + else: + re_decision = re_decision - np.mean(re_decision) + grad_direction = np.mean(re_decision*random_noise, axis=0) + + # The gradient direction. + grad_direction = grad_direction / (np.linalg.norm(grad_direction) + 1e-10) + + return grad_direction + + def _project(self, original_image, perturbed_images, alphas): + """ + Projection input samples onto given l2 or linf balls. + """ + alphas_shape = [len(alphas)] + [1]*len(np.shape(original_image)) + alphas = alphas.reshape(alphas_shape) + if self._constraint == 'l2': + projected = (1 - alphas)*original_image + alphas*perturbed_images + else: + projected = _clip_image(perturbed_images, original_image - alphas, + original_image + alphas) + + return projected + + def _binary_search_batch(self, original_image, perturbed_images, + original_label, target_label, theta): + """ + Binary search to approach the model decision boundary. + """ + + # Compute distance between perturbed image and original image. + dists_post_update = np.array([self._compute_distance(original_image, + perturbed_image,) + for perturbed_image in perturbed_images]) + + # Get higher thresholds + if self._constraint == 'l2': + highs = np.ones(len(perturbed_images)) + thresholds = theta + else: + highs = dists_post_update + thresholds = np.minimum(dists_post_update*theta, theta) + + # Get lower thresholds + lows = np.zeros(len(perturbed_images)) + + # Update thresholds. + while np.max((highs - lows) / thresholds) > 1: + mids = (highs + lows) / 2.0 + mid_images = self._project(original_image, perturbed_images, mids) + decisions = self._decision_function(mid_images, original_label, + target_label) + lows = np.where(decisions == [0], mids, lows) + highs = np.where(decisions == [1], mids, highs) + + out_images = self._project(original_image, perturbed_images, highs) + + # Select the best choice based on the distance of the output image. + dists = np.array( + [self._compute_distance(original_image, out_image) for out_image in + out_images]) + idx = np.argmin(dists) + + dist = dists_post_update[idx] + out_image = out_images[idx] + return out_image, dist + + def _initialize(self, sample, original_label, target_label): + """ + Implementation of BlendedUniformNoiseAttack + """ + num_evals = 0 + + while True: + random_noise = np.random.uniform(self._clip_min, self._clip_max, + size=np.shape(sample)) + success = self._decision_function(random_noise[None], + original_label, + target_label) + if success: + break + num_evals += 1 + + if num_evals > 1e3: + return None + + # Binary search. + low = 0.0 + high = 1.0 + while high - low > 0.001: + mid = (high + low) / 2.0 + blended = (1 - mid)*sample + mid*random_noise + success = self._decision_function(blended[None], original_label, + target_label) + if success: + high = mid + else: + low = mid + + initialization = (1 - high)*sample + high*random_noise + return initialization + + def _geometric_progression_for_stepsize(self, perturbed, update, dist, + current_iteration, original_label, + target_label): + """ + Search for stepsize in the way of Geometric progression. + Keep decreasing stepsize by half until reaching the desired side of + the decision boundary. + """ + epsilon = dist / np.sqrt(current_iteration) + while True: + updated = perturbed + epsilon*update + success = self._decision_function(updated, original_label, + target_label) + if success: + break + epsilon = epsilon / 2.0 + + return epsilon + + def _select_delta(self, dist_post_update, current_iteration, dim, theta): + """ + Choose the delta based on the distance between the input sample + and the perturbed sample. + """ + if current_iteration == 1: + delta = 0.1*(self._clip_max - self._clip_min) + else: + if self._constraint == 'l2': + delta = np.sqrt(dim)*theta*dist_post_update + else: + delta = dim*theta*dist_post_update + + return delta diff --git a/mindarmour/attacks/black/natural_evolutionary_strategy.py b/mindarmour/attacks/black/natural_evolutionary_strategy.py new file mode 100644 index 0000000..4f76135 --- /dev/null +++ b/mindarmour/attacks/black/natural_evolutionary_strategy.py @@ -0,0 +1,432 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Natural-evolutionary-strategy Attack. +""" +import time +import numpy as np +from scipy.special import softmax + +from mindarmour.attacks.attack import Attack +from mindarmour.utils.logger import LogUtil +from mindarmour.attacks.black.black_model import BlackModel +from mindarmour.utils._check_param import check_pair_numpy_param, check_model, \ + check_numpy_param, check_int_positive, check_value_positive, check_param_type + + +LOGGER = LogUtil.get_instance() +TAG = 'NES' + + +def _one_hot(index, total): + arr = np.zeros((total)) + arr[index] = 1.0 + return arr + + +def _bound(image, epislon): + lower = np.clip(image - epislon, 0, 1) + upper = np.clip(image + epislon, 0, 1) + return lower, upper + + +class NES(Attack): + """ + The class is an implementation of the Natural Evolutionary Strategies Attack, + including three settings: Query-Limited setting, Partial-Information setting + and Label-Only setting. + + References: `Andrew Ilyas, Logan Engstrom, Anish Athalye, and Jessy Lin. + Black-box adversarial attacks with limited queries and information. In + ICML, July 2018 `_ + + Args: + model (BlackModel): Target model. + scene (str): Scene in 'Label_Only', 'Partial_Info' or + 'Query_Limit'. + max_queries (int): Maximum query numbers to generate an adversarial + example. Default: 500000. + top_k (int): For Partial-Info or Label-Only setting, indicating how + much (Top-k) information is available for the attacker. For + Query-Limited setting, this input should be set as -1. Default: -1. + num_class (int): Number of classes in dataset. Default: 10. + batch_size (int): Batch size. Default: 96. + epsilon (float): Maximum perturbation allowed in attack. Default: 0.3. + samples_per_draw (int): Number of samples draw in antithetic sampling. + Default: 96. + momentum (float): Momentum. Default: 0.9. + learning_rate (float): Learning rate. Default: 1e-2. + max_lr (float): Max Learning rate. Default: 1e-2. + min_lr (float): Min Learning rate. Default: 5e-5. + sigma (float): Step size of random noise. Default: 1e-3. + plateau_length (int): Length of plateau used in Annealing algorithm. + Default: 20. + plateau_drop (float): Drop of plateau used in Annealing algorithm. + Default: 2.0. + adv_thresh (float): Threshold of adversarial. Default: 0.15. + zero_iters (int): Number of points to use for the proxy score. + Default: 10. + starting_eps (float): Starting epsilon used in Label-Only setting. + Default: 1.0. + starting_delta_eps (float): Delta epsilon used in Label-Only setting. + Default: 0.5. + label_only_sigma (float): Sigma used in Label-Only setting. + Default: 1e-3. + conservative (int): Conservation used in epsilon decay, it will + increase if no convergence. Default: 2. + sparse (bool): If True, input labels are sparse-encoded. If False, + input labels are one-hot-encoded. Default: True. + + Examples: + >>> SCENE = 'Label_Only' + >>> TOP_K = 5 + >>> num_class = 5 + >>> nes_instance = NES(user_model, SCENE, top_k=TOP_K) + >>> initial_img = np.asarray(np.random.random((32, 32)), np.float32) + >>> target_image = np.asarray(np.random.random((32, 32)), np.float32) + >>> orig_class = 0 + >>> target_class = 2 + >>> nes_instance.set_target_images(target_image) + >>> tag, adv, queries = nes_instance.generate([initial_img], [target_class]) + """ + + def __init__(self, model, scene, max_queries=10000, top_k=-1, num_class=10, + batch_size=128, epsilon=0.3, samples_per_draw=128, + momentum=0.9, learning_rate=1e-3, max_lr=5e-2, min_lr=5e-4, + sigma=1e-3, plateau_length=20, plateau_drop=2.0, + adv_thresh=0.25, zero_iters=10, starting_eps=1.0, + starting_delta_eps=0.5, label_only_sigma=1e-3, conservative=2, + sparse=True): + super(NES, self).__init__() + self._model = check_model('model', model, BlackModel) + self._scene = scene + + self._max_queries = check_int_positive('max_queries', max_queries) + self._num_class = check_int_positive('num_class', num_class) + self._batch_size = check_int_positive('batch_size', batch_size) + self._samples_per_draw = check_int_positive('samples_per_draw', + samples_per_draw) + self._goal_epsilon = check_value_positive('epsilon', epsilon) + self._momentum = check_value_positive('momentum', momentum) + self._learning_rate = check_value_positive('learning_rate', + learning_rate) + self._max_lr = check_value_positive('max_lr', max_lr) + self._min_lr = check_value_positive('min_lr', min_lr) + self._sigma = check_value_positive('sigma', sigma) + self._plateau_length = check_int_positive('plateau_length', + plateau_length) + self._plateau_drop = check_value_positive('plateau_drop', plateau_drop) + # partial information arguments + self._k = top_k + self._adv_thresh = check_value_positive('adv_thresh', adv_thresh) + # label only arguments + self._zero_iters = check_int_positive('zero_iters', zero_iters) + self._starting_eps = check_value_positive('starting_eps', starting_eps) + self._starting_delta_eps = check_value_positive('starting_delta_eps', + starting_delta_eps) + self._label_only_sigma = check_value_positive('label_only_sigma', + label_only_sigma) + self._conservative = check_int_positive('conservative', conservative) + self._sparse = check_param_type('sparse', sparse, bool) + self.target_imgs = None + self.target_img = None + self.target_class = None + + def generate(self, inputs, labels): + """ + Main algorithm for NES. + + Args: + inputs (numpy.ndarray): Benign input samples. + labels (numpy.ndarray): Target labels. + + Returns: + - numpy.ndarray, bool values for each attack result. + + - numpy.ndarray, generated adversarial examples. + + - numpy.ndarray, query times for each sample. + + Raises: + ValueError: If the top_k less than 0 in Label-Only or Partial-Info + setting. + ValueError: If the target_imgs is None in Label-Only or + Partial-Info setting. + ValueError: If scene is not in ['Label_Only', 'Partial_Info', + 'Query_Limit'] + + Examples: + >>> advs = attack.generate([[0.2, 0.3, 0.4], [0.3, 0.3, 0.2]], + >>> [1, 2]) + """ + inputs, labels = check_pair_numpy_param('inputs', inputs, + 'labels', labels) + if not self._sparse: + labels = np.argmax(labels, axis=1) + + if self._scene == 'Label_Only' or self._scene == 'Partial_Info': + if self._k < 0: + msg = "In 'Label_Only' or 'Partial_Info' mode, " \ + "'top_k' must more than 0." + LOGGER.error(TAG, msg) + raise ValueError(msg) + if self.target_imgs is None: + msg = "In 'Label_Only' or 'Partial_Info' mode, " \ + "'target_imgs' must be set." + LOGGER.error(TAG, msg) + raise ValueError(msg) + + elif self._scene == 'Query_Limit': + self._k = self._num_class + else: + msg = "scene must be string in 'Label_Only', " \ + "'Partial_Info' or 'Query_Limit' " + LOGGER.error(TAG, msg) + raise ValueError(msg) + + is_advs = [] + advs = [] + queries = [] + for sample, label, target_img in zip(inputs, labels, self.target_imgs): + is_adv, adv, query = self._generate_one(sample, label, target_img) + is_advs.append(is_adv) + advs.append(adv) + queries.append(query) + + return is_advs, advs, queries + + def set_target_images(self, target_images): + """ + Set target samples for target attack. + + Args: + target_images (numpy.ndarray): Target samples for target attack. + """ + self.target_imgs = check_numpy_param('target_images', target_images) + + def _generate_one(self, origin_image, target_label, target_image): + """ + Main algorithm for NES. + + Args: + origin_image (numpy.ndarray): Benign input sample. + target_label (int): Target label. + + Returns: + - bool. + - If True: successfully make an adversarial example. + + - If False: unsuccessfully make an adversarial example. + + - numpy.ndarray, an adversarial example. + + - int, number of queries. + """ + self.target_class = target_label + origin_image = check_numpy_param('origin_image', origin_image) + self._epsilon = self._starting_eps + lower, upper = _bound(origin_image, self._epsilon) + goal_epsilon = self._goal_epsilon + delta_epsilon = self._starting_delta_eps + if self._scene == 'Label_Only' or self._scene == 'Partial_Info': + adv = target_image + else: + adv = origin_image.copy() + + # for backtracking and momentum + num_queries = 0 + gradient = 0 + last_ls = [] + max_iters = int(np.ceil(self._max_queries // self._samples_per_draw)) + for i in range(max_iters): + start = time.time() + # early stop + eval_preds = self._model.predict(adv) + eval_preds = np.argmax(eval_preds, axis=1) + padv = np.equal(eval_preds, self.target_class) + if padv and self._epsilon <= goal_epsilon: + LOGGER.debug(TAG, 'early stopping at iteration %d', i) + return True, adv, num_queries + + # antithetic sampling noise + noise_pos = np.random.normal( + size=(self._batch_size // 2,) + origin_image.shape) + noise = np.concatenate((noise_pos, -noise_pos), axis=0) + eval_points = adv + self._sigma*noise + + prev_g = gradient + loss, gradient = self._get_grad(origin_image, eval_points, noise) + gradient = self._momentum*prev_g + (1.0 - self._momentum)*gradient + + # plateau learning rate annealing + last_ls.append(loss) + last_ls = self._plateau_annealing(last_ls) + + # search for learning rate and epsilon decay + current_lr = self._max_lr + prop_delta_eps = 0.0 + if loss < self._adv_thresh and self._epsilon > goal_epsilon: + prop_delta_eps = delta_epsilon + while current_lr >= self._min_lr: + # in partial information only or label only setting + if self._scene == 'Label_Only' or self._scene == 'Partial_Info': + proposed_epsilon = max(self._epsilon - prop_delta_eps, + goal_epsilon) + lower, upper = _bound(origin_image, proposed_epsilon) + proposed_adv = adv - current_lr*np.sign(gradient) + proposed_adv = np.clip(proposed_adv, lower, upper) + num_queries += 1 + + if self._preds_in_top_k(self.target_class, proposed_adv): + # The predicted label of proposed adversarial examples is in + # the top k observations. + if prop_delta_eps > 0: + delta_epsilon = max(prop_delta_eps, 0.1) + last_ls = [] + adv = proposed_adv + self._epsilon = max( + self._epsilon - prop_delta_eps / self._conservative, + goal_epsilon) + break + elif current_lr >= self._min_lr*2: + current_lr = current_lr / 2 + LOGGER.debug(TAG, "backtracking learning rate to %.3f", + current_lr) + else: + prop_delta_eps = prop_delta_eps / 2 + if prop_delta_eps < 2e-3: + LOGGER.debug(TAG, "Did not converge.") + return False, adv, num_queries + current_lr = self._max_lr + LOGGER.debug(TAG, + "backtracking epsilon to %.3f", + self._epsilon - prop_delta_eps) + + # update the number of queries + if self._scene == 'Label_Only': + num_queries += self._samples_per_draw*self._zero_iters + else: + num_queries += self._samples_per_draw + LOGGER.debug(TAG, + 'Step %d: loss %.4f, lr %.2E, eps %.3f, time %.4f.', + i, + loss, + current_lr, + self._epsilon, + time.time() - start) + + return False, adv, num_queries + + def _plateau_annealing(self, last_loss): + last_loss = last_loss[-self._plateau_length:] + if last_loss[-1] > last_loss[0] and len( + last_loss) == self._plateau_length: + if self._max_lr > self._min_lr: + LOGGER.debug(TAG, "Annealing max learning rate.") + self._max_lr = max(self._max_lr / self._plateau_drop, + self._min_lr) + last_loss = [] + return last_loss + + def _softmax_cross_entropy_with_logit(self, logit): + logit = softmax(logit, axis=1) + onehot_label = np.zeros(self._num_class) + onehot_label[self.target_class] = 1 + onehot_labels = np.tile(onehot_label, (len(logit), 1)) + entropy = -onehot_labels*np.log(logit) + loss = np.mean(entropy, axis=1) + return loss + + def _query_limit_loss(self, eval_points, noise): + """ + Loss in Query-Limit setting. + """ + LOGGER.debug(TAG, 'enter the function _query_limit_loss().') + loss = self._softmax_cross_entropy_with_logit( + self._model.predict(eval_points)) + + return loss, noise + + def _partial_info_loss(self, eval_points, noise): + """ + Loss in Partial-Info setting. + """ + LOGGER.debug(TAG, 'enter the function _partial_info_loss.') + logit = self._model.predict(eval_points) + loss = np.sort(softmax(logit, axis=1))[:, -self._k:] + inds = np.argsort(logit)[:, -self._k:] + good_loss = np.where(np.equal(inds, self.target_class), loss, + np.zeros(np.shape(inds))) + good_loss = np.max(good_loss, axis=1) + losses = -np.log(good_loss) + return losses, noise + + def _label_only_loss(self, origin_image, eval_points, noise): + """ + Loss in Label-Only setting. + """ + LOGGER.debug(TAG, 'enter the function _label_only_loss().') + tiled_points = np.tile(np.expand_dims(eval_points, 0), + [self._zero_iters, + *[1]*len(eval_points.shape)]) + noised_eval_im = tiled_points \ + + np.random.randn(self._zero_iters, + self._batch_size, + *origin_image.shape) \ + *self._label_only_sigma + noised_eval_im = np.reshape(noised_eval_im, ( + self._zero_iters*self._batch_size, *origin_image.shape)) + logits = self._model.predict(noised_eval_im) + inds = np.argsort(logits)[:, -self._k:] + real_inds = np.reshape(inds, (self._zero_iters, self._batch_size, -1)) + rank_range = np.arange(1, self._k + 1, 1, dtype=np.float32) + tiled_rank_range = np.tile(np.reshape(rank_range, (1, 1, self._k)), + [self._zero_iters, self._batch_size, 1]) + batches_in = np.where(np.equal(real_inds, self.target_class), + tiled_rank_range, + np.zeros(np.shape(tiled_rank_range))) + loss = 1 - np.mean(batches_in) + return loss, noise + + def _preds_in_top_k(self, target_class, prop_adv_): + # query limit setting + if self._k == self._num_class: + return True + # label only and partial information setting + eval_preds = self._model.predict(prop_adv_) + if not target_class in eval_preds.argsort()[:, -self._k:]: + return False + return True + + def _get_grad(self, origin_image, eval_points, noise): + """Calculate gradient.""" + losses = [] + grads = [] + for _ in range(self._samples_per_draw // self._batch_size): + if self._scene == 'Label_Only': + loss, np_noise = self._label_only_loss(origin_image, + eval_points, + noise) + elif self._scene == 'Partial_Info': + loss, np_noise = self._partial_info_loss(eval_points, noise) + else: + loss, np_noise = self._query_limit_loss(eval_points, noise) + # only support three channel images + losses_tiled = np.tile(np.reshape(loss, (-1, 1, 1, 1)), + (1,) + origin_image.shape) + grad = np.mean(losses_tiled*np_noise, axis=0) / self._sigma + + grads.append(grad) + losses.append(np.mean(loss)) + return np.array(losses).mean(), np.mean(np.array(grads), axis=0) diff --git a/mindarmour/attacks/black/pointwise_attack.py b/mindarmour/attacks/black/pointwise_attack.py new file mode 100644 index 0000000..7d7c554 --- /dev/null +++ b/mindarmour/attacks/black/pointwise_attack.py @@ -0,0 +1,326 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Pointwise-Attack. +""" +import numpy as np + +from mindarmour.attacks.attack import Attack +from mindarmour.attacks.black.black_model import BlackModel +from mindarmour.attacks.black.salt_and_pepper_attack import \ + SaltAndPepperNoiseAttack +from mindarmour.utils._check_param import check_model, check_pair_numpy_param, \ + check_int_positive, check_param_type +from mindarmour.utils.logger import LogUtil + +LOGGER = LogUtil.get_instance() +TAG = 'PointWiseAttack' + + +class PointWiseAttack(Attack): + """ + The Pointwise Attack make sure use the minimum number of changed pixels + to generate adversarial sample for each original sample.Those changed pixels + will use binary seach to make sure the distance between adversarial sample + and original sample is as close as possible. + + References: `L. Schott, J. Rauber, M. Bethge, W. Brendel: "Towards the + first adversarially robust neural network model on MNIST", ICLR (2019) + `_ + + Args: + model (BlackModel): Target model. + max_iter (int): Max rounds of iteration to generate adversarial image. + search_iter (int): Max rounds of binary search. + is_targeted (bool): If True, targeted attack. If False, untargeted + attack. Default: False. + init_attack (Attack): Attack used to find a starting point. Default: + None. + sparse (bool): If True, input labels are sparse-encoded. If False, + input labels are one-hot-encoded. Default: True. + + Examples: + >>> attack = PointWiseAttack(model) + """ + + def __init__(self, + model, + max_iter=1000, + search_iter=10, + is_targeted=False, + init_attack=None, + sparse=True): + super(PointWiseAttack, self).__init__() + self._model = check_model('model', model, BlackModel) + self._max_iter = check_int_positive('max_iter', max_iter) + self._search_iter = check_int_positive('search_iter', search_iter) + self._is_targeted = check_param_type('is_targeted', is_targeted, bool) + if init_attack is None: + self._init_attack = SaltAndPepperNoiseAttack(model, + is_targeted=self._is_targeted) + else: + self._init_attack = init_attack + self._sparse = check_param_type('sparse', sparse, bool) + + def generate(self, inputs, labels): + """ + Generate adversarial examples based on input samples and targeted labels. + + Args: + inputs (numpy.ndarray): Benign input samples used as references to create + adversarial examples. + labels (numpy.ndarray): For targeted attack, labels are adversarial + target labels. For untargeted attack, labels are ground-truth labels. + + Returns: + - numpy.ndarray, bool values for each attack result. + + - numpy.ndarray, generated adversarial examples. + + - numpy.ndarray, query times for each sample. + + Examples: + >>> is_adv_list, adv_list, query_times_each_adv = attack.generate( + >>> [[0.1, 0.2, 0.6], [0.3, 0, 0.4]], + >>> [2, 3]) + """ + arr_x, arr_y = check_pair_numpy_param('inputs', inputs, 'labels', + labels) + if not self._sparse: + arr_y = np.argmax(arr_y, axis=1) + ini_bool, ini_advs, ini_count = self._initialize_starting_point(arr_x, + arr_y) + is_adv_list = list() + adv_list = list() + query_times_each_adv = list() + for sample, sample_label, start_adv, ite_bool, ite_c in zip(arr_x, + arr_y, + ini_advs, + ini_bool, + ini_count): + if ite_bool: + LOGGER.info(TAG, 'Start optimizing.') + ori_label = np.argmax( + self._model.predict(np.expand_dims(sample, axis=0))[0]) + ini_label = np.argmax(self._model.predict(np.expand_dims(start_adv, axis=0))[0]) + is_adv, adv_x, query_times = self._decision_optimize(sample, + sample_label, + start_adv) + adv_label = np.argmax( + self._model.predict(np.expand_dims(adv_x, axis=0))[0]) + LOGGER.debug(TAG, 'before ini attack label is :{}'.format(ori_label)) + LOGGER.debug(TAG, 'after ini attack label is :{}'.format(ini_label)) + LOGGER.debug(TAG, 'INPUT optimize label is :{}'.format(sample_label)) + LOGGER.debug(TAG, 'after pointwise attack label is :{}'.format(adv_label)) + is_adv_list.append(is_adv) + adv_list.append(adv_x) + query_times_each_adv.append(query_times + ite_c) + else: + LOGGER.info(TAG, 'Initial sample is not adversarial, pass.') + is_adv_list.append(False) + adv_list.append(start_adv) + query_times_each_adv.append(ite_c) + is_adv_list = np.array(is_adv_list) + adv_list = np.array(adv_list) + query_times_each_adv = np.array(query_times_each_adv) + LOGGER.debug(TAG, 'ret list is: {}'.format(adv_list)) + return is_adv_list, adv_list, query_times_each_adv + + def _decision_optimize(self, unperturbed_img, input_label, perturbed_img): + """ + Make the perturbed samples more similar to unperturbed samples, + while maintaining the perturbed_label. + + Args: + unperturbed_img (numpy.ndarray): Input sample as reference to create + adversarial example. + input_label (numpy.ndarray): Input label. + perturbed_img (numpy.ndarray): Starting point to optimize. + + Returns: + numpy.ndarray, a generated adversarial example. + + Raises: + ValueError: if input unperturbed and perturbed samples have different size. + """ + query_count = 0 + img_size = unperturbed_img.size + img_shape = unperturbed_img.shape + perturbed_img = perturbed_img.reshape(-1) + unperturbed_img = unperturbed_img.reshape(-1) + recover = np.copy(perturbed_img) + + if unperturbed_img.dtype != perturbed_img.dtype: + msg = 'unperturbed sample and perturbed sample must have the same' \ + ' dtype, but got dtype of unperturbed is: {}, dtype of perturbed ' \ + 'is: {}'.format(unperturbed_img.dtype, perturbed_img.dtype) + LOGGER.error(TAG, msg) + raise ValueError(msg) + + LOGGER.debug(TAG, 'Before optimize, the mse distance between original ' + 'sample and adversarial sample is: {}' + .format(self._distance(perturbed_img, unperturbed_img))) + # recover pixel if image is adversarial + for _ in range(self._max_iter): + is_improve = False + # at the premise of adversarial feature, recover pixels + pixels_ind = np.arange(img_size) + mask = unperturbed_img != perturbed_img + np.random.shuffle(pixels_ind) + for ite_ind in pixels_ind: + if mask[ite_ind]: + recover[ite_ind] = unperturbed_img[ite_ind] + query_count += 1 + is_adv = self._model.is_adversarial( + recover.reshape(img_shape), input_label, self._is_targeted) + if is_adv: + is_improve = True + perturbed_img[ite_ind] = recover[ite_ind] + break + else: + recover[ite_ind] = perturbed_img[ite_ind] + if not is_improve or (self._distance( + perturbed_img, unperturbed_img) <= self._get_threthod()): + break + LOGGER.debug(TAG, 'first round: Query count {}'.format(query_count)) + LOGGER.debug(TAG, 'Starting binary searches.') + # tag the optimized pixels. + mask = unperturbed_img != perturbed_img + for _ in range(self._max_iter): + is_improve = False + pixels_ind = np.arange(img_size) + np.random.shuffle(pixels_ind) + for ite_ind in pixels_ind: + if not mask[ite_ind]: + continue + recover[ite_ind] = unperturbed_img[ite_ind] + query_count += 1 + is_adv = self._model.is_adversarial(recover.reshape(img_shape), + input_label, + self._is_targeted) + if is_adv: + is_improve = True + mask[ite_ind] = True + perturbed_img[ite_ind] = recover[ite_ind] + LOGGER.debug(TAG, + 'Reset {}th pixel value to original, ' + 'mse distance: {}.'.format( + ite_ind, + self._distance(perturbed_img, + unperturbed_img))) + break + else: + # use binary searches + optimized_value, b_query = self._binary_search( + perturbed_img, + unperturbed_img, + ite_ind, + input_label, img_shape) + query_count += b_query + if optimized_value != perturbed_img[ite_ind]: + is_improve = True + mask[ite_ind] = True + perturbed_img[ite_ind] = optimized_value + LOGGER.debug(TAG, + 'Reset {}th pixel value to original, ' + 'mse distance: {}.'.format( + ite_ind, + self._distance(perturbed_img, + unperturbed_img))) + break + if not is_improve or (self._distance( + perturbed_img, unperturbed_img) <= self._get_threthod()): + LOGGER.debug(TAG, 'second optimized finish.') + break + LOGGER.info(TAG, 'Optimized finished, query count is {}'.format(query_count)) + # this method use to optimized the adversarial sample + return True, perturbed_img.reshape(img_shape), query_count + + def _binary_search(self, perturbed_img, unperturbed_img, ite_ind, + input_label, img_shape): + """ + For original pixel of inputs, use binary search to get the nearest pixel + value with original value with adversarial feature. + + Args: + perturbed_img (numpy.ndarray): Adversarial sample. + unperturbed_img (numpy.ndarray): Input sample. + ite_ind (int): The index of pixel in inputs. + input_label (numpy.ndarray): Input labels. + img_shape (tuple): Shape of the original sample. + + Returns: + float, adversarial pixel value. + """ + query_count = 0 + adv_value = perturbed_img[ite_ind] + non_adv_value = unperturbed_img[ite_ind] + for _ in range(self._search_iter): + next_value = (adv_value + non_adv_value) / 2 + recover = np.copy(perturbed_img) + recover[ite_ind] = next_value + query_count += 1 + is_adversarial = self._model.is_adversarial( + recover.reshape(img_shape), input_label, self._is_targeted) + if is_adversarial: + adv_value = next_value + else: + non_adv_value = next_value + return adv_value, query_count + + def _initialize_starting_point(self, inputs, labels): + """ + Use init_attack to generate original adversarial inputs. + + Args: + inputs (numpy.ndarray): Benign input sample used as references to create + adversarial examples. + labels (numpy.ndarray): If is targeted attack, labels is adversarial + labels, if is untargeted attack, labels is true labels. + + Returns: + numpy.ndarray, adversarial image(s) generate by init_attack method. + """ + is_adv, start_adv, query_c = self._init_attack.generate(inputs, labels) + return is_adv, start_adv, query_c + + def _distance(self, perturbed_img, unperturbed_img): + """ + Calculate Mean Squared Error (MSE) to evaluate the optimized process. + + Args: + perturbed_img (numpy.ndarray): Adversarial sample to be optimized. + unperturbed_img (numpy.ndarray): As a reference benigh sample. + + Returns: + float, Calculation of Mean Squared Error (MSE). + """ + return np.square(np.subtract(perturbed_img, unperturbed_img)).mean() + + def _get_threthod(self, method='MSE'): + """ + Return a float number, when distance small than this number, + optimize will abort early. + + Args: + method: distance method. Default: MSE. + + Returns: + float, the optimized level, the smaller of number, the better + of adversarial sample. + """ + predefined_threshold = 0.01 + if method == 'MSE': + return predefined_threshold + return predefined_threshold diff --git a/mindarmour/attacks/black/pso_attack.py b/mindarmour/attacks/black/pso_attack.py new file mode 100644 index 0000000..7fdc812 --- /dev/null +++ b/mindarmour/attacks/black/pso_attack.py @@ -0,0 +1,302 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +PSO-Attack. +""" +import numpy as np + +from mindarmour.attacks.attack import Attack +from mindarmour.utils.logger import LogUtil +from mindarmour.attacks.black.black_model import BlackModel +from mindarmour.utils._check_param import check_model, check_pair_numpy_param, \ + check_numpy_param, check_value_positive, check_int_positive, \ + check_param_type, check_equal_shape, check_param_multi_types + + +LOGGER = LogUtil.get_instance() +TAG = 'PSOAttack' + + +class PSOAttack(Attack): + """ + The PSO Attack represents the black-box attack based on Particle Swarm + Optimization algorithm, which belongs to differential evolution algorithms. + This attack was proposed by Rayan Mosli et al. (2019). + + References: `Rayan Mosli, Matthew Wright, Bo Yuan, Yin Pan, "They Might NOT + Be Giants: Crafting Black-Box Adversarial Examples with Fewer Queries + Using Particle Swarm Optimization", arxiv: 1909.07490, 2019. + `_ + + Args: + model (BlackModel): Target model. + step_size (float): Attack step size. Default: 0.5. + per_bounds (float): Relative variation range of perturbations. Default: 0.6. + c1 (float): Weight coefficient. Default: 2. + c2 (float): Weight coefficient. Default: 2. + c (float): Weight of perturbation loss. Default: 2. + pop_size (int): The number of particles, which should be greater + than zero. Default: 6. + t_max (int): The maximum round of iteration for each adversarial example, + which should be greater than zero. Default: 1000. + pm (float): The probability of mutations. Default: 0.5. + bounds (tuple): Upper and lower bounds of data. In form of (clip_min, + clip_max). Default: None. + targeted (bool): If True, turns on the targeted attack. If False, + turns on untargeted attack. Default: False. + reduction_iters (int): Cycle times in reduction process. Default: 3. + sparse (bool): If True, input labels are sparse-encoded. If False, + input labels are one-hot-encoded. Default: True. + + Examples: + >>> attack = PSOAttack(model) + """ + + def __init__(self, model, step_size=0.5, per_bounds=0.6, c1=2.0, c2=2.0, + c=2.0, pop_size=6, t_max=1000, pm=0.5, bounds=None, + targeted=False, reduction_iters=3, sparse=True): + super(PSOAttack, self).__init__() + self._model = check_model('model', model, BlackModel) + self._step_size = check_value_positive('step_size', step_size) + self._per_bounds = check_value_positive('per_bounds', per_bounds) + self._c1 = check_value_positive('c1', c1) + self._c2 = check_value_positive('c2', c2) + self._c = check_value_positive('c', c) + self._pop_size = check_int_positive('pop_size', pop_size) + self._pm = check_value_positive('pm', pm) + self._bounds = check_param_multi_types('bounds', bounds, [list, tuple]) + for b in self._bounds: + _ = check_param_multi_types('bound', b, [int, float]) + self._targeted = check_param_type('targeted', targeted, bool) + self._t_max = check_int_positive('t_max', t_max) + self._reduce_iters = check_int_positive('reduction_iters', + reduction_iters) + self._sparse = check_param_type('sparse', sparse, bool) + + def _fitness(self, confi_ori, confi_adv, x_ori, x_adv): + """ + Calculate the fitness value for each particle. + + Args: + confi_ori (float): Maximum confidence or target label confidence of + the original benign inputs' prediction confidences. + confi_adv (float): Maximum confidence or target label confidence of + the adversarial samples' prediction confidences. + x_ori (numpy.ndarray): Benign samples. + x_adv (numpy.ndarray): Adversarial samples. + + Returns: + - float, fitness values of adversarial particles. + + - int, query times after reduction. + + Examples: + >>> fitness = self._fitness(2.4, 1.2, [0.2, 0.3, 0.1], [0.21, + >>> 0.34, 0.13]) + """ + x_ori = check_numpy_param('x_ori', x_ori) + x_adv = check_numpy_param('x_adv', x_adv) + fit_value = abs( + confi_ori - confi_adv) - self._c / self._pop_size*np.linalg.norm( + (x_adv - x_ori).reshape(x_adv.shape[0], -1), axis=1) + return fit_value + + def _mutation_op(self, cur_pop): + """ + Generate mutation samples. + """ + cur_pop = check_numpy_param('cur_pop', cur_pop) + perturb_noise = np.random.random(cur_pop.shape) - 0.5 + mutated_pop = perturb_noise*(np.random.random(cur_pop.shape) + < self._pm) + cur_pop + mutated_pop = np.clip(mutated_pop, cur_pop*(1 - self._per_bounds), + cur_pop*(1 + self._per_bounds)) + return mutated_pop + + def _reduction(self, x_ori, q_times, label, best_position): + """ + Decrease the differences between the original samples and adversarial samples. + + Args: + x_ori (numpy.ndarray): Original samples. + q_times (int): Query times. + label (int): Target label ot ground-truth label. + best_position (numpy.ndarray): Adversarial examples. + + Returns: + numpy.ndarray, adversarial examples after reduction. + + Examples: + >>> adv_reduction = self._reduction(self, [0.1, 0.2, 0.3], 20, 1, + >>> [0.12, 0.15, 0.25]) + """ + x_ori = check_numpy_param('x_ori', x_ori) + best_position = check_numpy_param('best_position', best_position) + x_ori, best_position = check_equal_shape('x_ori', x_ori, + 'best_position', best_position) + x_ori_fla = x_ori.flatten() + best_position_fla = best_position.flatten() + pixel_deep = self._bounds[1] - self._bounds[0] + nums_pixel = len(x_ori_fla) + for i in range(nums_pixel): + diff = x_ori_fla[i] - best_position_fla[i] + if abs(diff) > pixel_deep*0.1: + old_poi_fla = np.copy(best_position_fla) + best_position_fla[i] = np.clip( + best_position_fla[i] + diff*0.5, + self._bounds[0], self._bounds[1]) + cur_label = np.argmax( + self._model.predict(np.expand_dims( + best_position_fla.reshape(x_ori.shape), axis=0))[0]) + q_times += 1 + if self._targeted: + if cur_label != label: + best_position_fla = old_poi_fla + else: + if cur_label == label: + best_position_fla = old_poi_fla + return best_position_fla.reshape(x_ori.shape), q_times + + def generate(self, inputs, labels): + """ + Generate adversarial examples based on input data and targeted + labels (or ground_truth labels). + + Args: + inputs (numpy.ndarray): Input samples. + labels (numpy.ndarray): Targeted labels or ground_truth labels. + + Returns: + - numpy.ndarray, bool values for each attack result. + + - numpy.ndarray, generated adversarial examples. + + - numpy.ndarray, query times for each sample. + + Examples: + >>> advs = attack.generate([[0.2, 0.3, 0.4], [0.3, 0.3, 0.2]], + >>> [1, 2]) + """ + inputs, labels = check_pair_numpy_param('inputs', inputs, + 'labels', labels) + if not self._sparse: + labels = np.argmax(labels, axis=1) + # generate one adversarial each time + if self._targeted: + target_labels = labels + adv_list = [] + success_list = [] + query_times_list = [] + pixel_deep = self._bounds[1] - self._bounds[0] + for i in range(inputs.shape[0]): + is_success = False + q_times = 0 + x_ori = inputs[i] + confidences = self._model.predict(np.expand_dims(x_ori, axis=0))[0] + q_times += 1 + true_label = labels[i] + if self._targeted: + t_label = target_labels[i] + confi_ori = confidences[t_label] + else: + confi_ori = max(confidences) + # step1, initializing + # initial global optimum fitness value, cannot set to be 0 + best_fitness = -np.inf + # initial global optimum position + best_position = x_ori + x_copies = np.repeat(x_ori[np.newaxis, :], self._pop_size, axis=0) + cur_noise = np.clip((np.random.random(x_copies.shape) - 0.5) + *self._step_size, + (0 - self._per_bounds)*(x_copies + 0.1), + self._per_bounds*(x_copies + 0.1)) + par = np.clip(x_copies + cur_noise, + x_copies*(1 - self._per_bounds), + x_copies*(1 + self._per_bounds)) + # initial advs + par_ori = np.copy(par) + # initial optimum positions for particles + par_best_poi = np.copy(par) + # initial optimum fitness values + par_best_fit = -np.inf*np.ones(self._pop_size) + # step2, optimization + # initial velocities for particles + v_particles = np.zeros(par.shape) + is_mutation = False + iters = 0 + while iters < self._t_max: + last_best_fit = best_fitness + ran_1 = np.random.random(par.shape) + ran_2 = np.random.random(par.shape) + v_particles = self._step_size*( + v_particles + self._c1*ran_1*(best_position - par)) \ + + self._c2*ran_2*(par_best_poi - par) + par = np.clip(par + v_particles, + (par_ori + 0.1*pixel_deep)*( + 1 - self._per_bounds), + (par_ori + 0.1*pixel_deep)*( + 1 + self._per_bounds)) + if iters > 30 and is_mutation: + par = self._mutation_op(par) + if self._targeted: + confi_adv = self._model.predict(par)[:, t_label] + else: + confi_adv = np.max(self._model.predict(par), axis=1) + q_times += self._pop_size + fit_value = self._fitness(confi_ori, confi_adv, x_ori, par) + for k in range(self._pop_size): + if fit_value[k] > par_best_fit[k]: + par_best_fit[k] = fit_value[k] + par_best_poi[k] = par[k] + if fit_value[k] > best_fitness: + best_fitness = fit_value[k] + best_position = par[k] + iters += 1 + cur_pre = self._model.predict(np.expand_dims(best_position, + axis=0))[0] + is_mutation = False + if (best_fitness - last_best_fit) < last_best_fit*0.05: + is_mutation = True + cur_label = np.argmax(cur_pre) + q_times += 1 + if self._targeted: + if cur_label == t_label: + is_success = True + else: + if cur_label != true_label: + is_success = True + if is_success: + LOGGER.debug(TAG, 'successfully find one adversarial ' + 'sample and start Reduction process') + # step3, reduction + if self._targeted: + best_position, q_times = self._reduction( + x_ori, q_times, t_label, best_position) + else: + best_position, q_times = self._reduction( + x_ori, q_times, true_label, best_position) + break + if not is_success: + LOGGER.debug(TAG, + 'fail to find adversarial sample, iteration ' + 'times is: %d and query times is: %d', + iters, + q_times) + adv_list.append(best_position) + success_list.append(is_success) + query_times_list.append(q_times) + del x_copies, cur_noise, par, par_ori, par_best_poi + return np.asarray(success_list), \ + np.asarray(adv_list), \ + np.asarray(query_times_list) diff --git a/mindarmour/attacks/black/salt_and_pepper_attack.py b/mindarmour/attacks/black/salt_and_pepper_attack.py new file mode 100644 index 0000000..c0ce75a --- /dev/null +++ b/mindarmour/attacks/black/salt_and_pepper_attack.py @@ -0,0 +1,166 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +SaltAndPepperNoise-Attack. +""" +import time + +import numpy as np + +from mindarmour.attacks.attack import Attack +from mindarmour.attacks.black.black_model import BlackModel +from mindarmour.utils._check_param import check_model, check_pair_numpy_param, \ + check_param_type, check_int_positive, check_param_multi_types +from mindarmour.utils._check_param import normalize_value +from mindarmour.utils.logger import LogUtil + +LOGGER = LogUtil.get_instance() +TAG = 'SaltAndPepperNoise-Attack' + + +class SaltAndPepperNoiseAttack(Attack): + """ + Increases the amount of salt and pepper noise to generate adversarial + samples. + + Args: + model (BlackModel): Target model. + bounds (tuple): Upper and lower bounds of data. In form of (clip_min, + clip_max). Default: (0.0, 1.0) + max_iter (int): Max iteration to generate an adversarial example. + Default: 100 + is_targeted (bool): If True, targeted attack. If False, untargeted + attack. Default: False. + sparse (bool): If True, input labels are sparse-encoded. If False, + input labels are one-hot-encoded. Default: True. + + Examples: + >>> attack = SaltAndPepperNoiseAttack(model) + """ + + def __init__(self, model, bounds=(0.0, 1.0), max_iter=100, + is_targeted=False, sparse=True): + super(SaltAndPepperNoiseAttack, self).__init__() + self._model = check_model('model', model, BlackModel) + self._bounds = check_param_multi_types('bounds', bounds, [tuple, list]) + for b in self._bounds: + _ = check_param_multi_types('bound', b, [int, float]) + self._max_iter = check_int_positive('max_iter', max_iter) + self._is_targeted = check_param_type('is_targeted', is_targeted, bool) + self._sparse = check_param_type('sparse', sparse, bool) + + def generate(self, inputs, labels): + """ + Generate adversarial examples based on input data and target labels. + + Args: + inputs (numpy.ndarray): The original, unperturbed inputs. + labels (numpy.ndarray): The target labels. + + Returns: + - numpy.ndarray, bool values for each attack result. + + - numpy.ndarray, generated adversarial examples. + + - numpy.ndarray, query times for each sample. + + Examples: + >>> adv_list = attack.generate(([[0.1, 0.2, 0.6], + >>> [0.3, 0, 0.4]], + >>> [[0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + >>> [0, , 0, 1, 0, 0, 0, 0, 0, 0, 0]]) + """ + arr_x, arr_y = check_pair_numpy_param('inputs', inputs, 'labels', + labels) + if not self._sparse: + arr_y = np.argmax(arr_y, axis=1) + + is_adv_list = list() + adv_list = list() + query_times_each_adv = list() + for sample, label in zip(arr_x, arr_y): + start_t = time.time() + is_adv, perturbed, query_times = self._generate_one(sample, label) + is_adv_list.append(is_adv) + adv_list.append(perturbed) + query_times_each_adv.append(query_times) + LOGGER.info(TAG, 'Finished one sample, adversarial is {}, ' + 'cost time {:.2}s' + .format(is_adv, time.time() - start_t)) + is_adv_list = np.array(is_adv_list) + adv_list = np.array(adv_list) + query_times_each_adv = np.array(query_times_each_adv) + return is_adv_list, adv_list, query_times_each_adv + + def _generate_one(self, one_input, label, epsilons=10): + """ + Increases the amount of salt and pepper noise to generate adversarial + samples. + + Args: + one_input (numpy.ndarray): The original, unperturbed input. + label (numpy.ndarray): The target label. + epsilons (int) : Number of steps to try probability between 0 + and 1. Default: 10 + + Returns: + - numpy.ndarray, bool values for result. + + - numpy.ndarray, adversarial example. + + - numpy.ndarray, query times for this sample. + + Examples: + >>> one_adv = self._generate_one(input, label) + """ + # use binary search to get epsilons + low_ = 0.0 + high_ = 1.0 + query_count = 0 + input_shape = one_input.shape + input_dtype = one_input.dtype + one_input = one_input.reshape(-1) + depth = np.abs(np.subtract(self._bounds[0], self._bounds[1])) + best_adv = np.copy(one_input) + best_eps = high_ + find_adv = False + for _ in range(self._max_iter): + min_eps = low_ + max_eps = (low_ + high_) / 2 + for _ in range(epsilons): + adv = np.copy(one_input) + noise = np.random.uniform(low=low_, high=high_, size=one_input.size) + eps = (min_eps + max_eps) / 2 + # add salt + adv[noise < eps] = -depth + # add pepper + adv[noise >= (high_ - eps)] = depth + # normalized sample + adv = normalize_value(np.expand_dims(adv, axis=0), 'l2').astype(input_dtype) + query_count += 1 + ite_bool = self._model.is_adversarial(adv.reshape(input_shape), + label, + is_targeted=self._is_targeted) + if ite_bool: + find_adv = True + if best_eps > eps: + best_adv = adv + best_eps = eps + max_eps = eps + LOGGER.debug(TAG, 'Attack succeed, epsilon is {}'.format(eps)) + else: + min_eps = eps + if find_adv: + break + return find_adv, best_adv.reshape(input_shape), query_count diff --git a/mindarmour/attacks/carlini_wagner.py b/mindarmour/attacks/carlini_wagner.py new file mode 100644 index 0000000..0411647 --- /dev/null +++ b/mindarmour/attacks/carlini_wagner.py @@ -0,0 +1,419 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Carlini-wagner Attack. +""" +import numpy as np + +from mindspore import Tensor +from mindspore.nn import Cell + +from mindarmour.attacks.attack import Attack +from mindarmour.utils.logger import LogUtil +from mindarmour.utils._check_param import check_numpy_param, check_model, \ + check_pair_numpy_param, check_int_positive, check_param_type, \ + check_param_multi_types, check_value_positive, check_equal_shape +from mindarmour.utils.util import GradWrap +from mindarmour.utils.util import jacobian_matrix + +LOGGER = LogUtil.get_instance() +TAG = 'CW' + + +def _best_logits_of_other_class(logits, target_class, value=1): + """ + Choose the index of the largest logits exclude target class. + + Args: + logits (numpy.ndarray): Predict logits of samples. + target_class (numpy.ndarray): Target labels. + value (float): Maximum value of output logits. Default: 1. + + Returns: + numpy.ndarray, the index of the largest logits exclude the target + class. + + Examples: + >>> other_class = _best_logits_of_other_class([[0.2, 0.3, 0.5], + >>> [0.3, 0.4, 0.3]], [2, 1]) + """ + LOGGER.debug(TAG, "enter the func _best_logits_of_other_class.") + logits, target_class = check_pair_numpy_param('logits', logits, + 'target_class', target_class) + res = np.zeros_like(logits) + for i in range(logits.shape[0]): + res[i][target_class[i]] = value + return np.argmax(logits - res, axis=1) + + +class CarliniWagnerL2Attack(Attack): + """ + The Carlini & Wagner attack using L2 norm. + + References: `Nicholas Carlini, David Wagner: "Towards Evaluating + the Robustness of Neural Networks" `_ + + Args: + network (Cell): Target model. + num_classes (int): Number of labels of model output, which should be + greater than zero. + box_min (float): Lower bound of input of the target model. Default: 0. + box_max (float): Upper bound of input of the target model. Default: 1.0. + bin_search_steps (int): The number of steps for the binary search + used to find the optimal trade-off constant between distance + and confidence. Default: 5. + max_iterations (int): The maximum number of iterations, which should be + greater than zero. Default: 1000. + confidence (float): Confidence of the output of adversarial examples. + Default: 0. + learning_rate (float): The learning rate for the attack algorithm. + Default: 5e-3. + initial_const (float): The initial trade-off constant to use to balance + the relative importance of perturbation norm and confidence + difference. Default: 1e-2. + abort_early_check_ratio (float): Check loss progress every ratio of + all iteration. Default: 5e-2. + targeted (bool): If True, targeted attack. If False, untargeted attack. + Default: False. + fast (bool): If True, return the first found adversarial example. + If False, return the adversarial samples with smaller + perturbations. Default: True. + abort_early (bool): If True, Adam will be aborted if the loss hasn't + decreased for some time. If False, Adam will continue work until the + max iterations is arrived. Default: True. + sparse (bool): If True, input labels are sparse-coded. If False, + input labels are onehot-coded. Default: True. + + Examples: + >>> attack = CarliniWagnerL2Attack(network) + """ + + def __init__(self, network, num_classes, box_min=0.0, box_max=1.0, + bin_search_steps=5, max_iterations=1000, confidence=0, + learning_rate=5e-3, initial_const=1e-2, + abort_early_check_ratio=5e-2, targeted=False, + fast=True, abort_early=True, sparse=True): + LOGGER.info(TAG, "init CW object.") + super(CarliniWagnerL2Attack, self).__init__() + self._network = check_model('network', network, Cell) + self._num_classes = check_int_positive('num_classes', num_classes) + self._min = check_param_type('box_min', box_min, float) + self._max = check_param_type('box_max', box_max, float) + self._bin_search_steps = check_int_positive('search_steps', + bin_search_steps) + self._max_iterations = check_int_positive('max_iterations', + max_iterations) + self._confidence = check_param_multi_types('confidence', confidence, + [int, float]) + self._learning_rate = check_value_positive('learning_rate', + learning_rate) + self._initial_const = check_value_positive('initial_const', + initial_const) + self._abort_early = check_param_type('abort_early', abort_early, bool) + self._fast = check_param_type('fast', fast, bool) + self._abort_early_check_ratio = check_value_positive('abort_early_check_ratio', + abort_early_check_ratio) + self._targeted = check_param_type('targeted', targeted, bool) + self._net_grad = GradWrap(self._network) + self._sparse = check_param_type('sparse', sparse, bool) + self._dtype = None + + def _loss_function(self, logits, new_x, org_x, org_or_target_class, + constant, confidence): + """ + Calculate the value of loss function and gradients of loss w.r.t inputs. + + Args: + logits (numpy.ndarray): The output of network before softmax. + new_x (numpy.ndarray): Adversarial examples. + org_x (numpy.ndarray): Original benign input samples. + org_or_target_class (numpy.ndarray): Original/target labels. + constant (float): A trade-off constant to use to balance loss + and perturbation norm. + confidence (float): Confidence level of the output of adversarial + examples. + + Returns: + numpy.ndarray, norm of perturbation, sum of the loss and the + norm, and gradients of the sum w.r.t inputs. + + Raises: + ValueError: If loss is less than 0. + + Examples: + >>> L2_loss, total_loss, dldx = self._loss_function([0.2 , 0.3, + >>> 0.5], [0.1, 0.2, 0.2, 0.4], [0.12, 0.2, 0.25, 0.4], [1], 2, 0) + """ + LOGGER.debug(TAG, "enter the func _loss_function.") + + logits = check_numpy_param('logits', logits) + org_x = check_numpy_param('org_x', org_x) + new_x, org_or_target_class = check_pair_numpy_param('new_x', + new_x, + 'org_or_target_class', + org_or_target_class) + + new_x, org_x = check_equal_shape('new_x', new_x, 'org_x', org_x) + + other_class_index = _best_logits_of_other_class( + logits, org_or_target_class, value=np.inf) + loss1 = np.sum((new_x - org_x)**2, + axis=tuple(range(len(new_x.shape))[1:])) + loss2 = np.zeros_like(loss1, dtype=self._dtype) + loss2_grade = np.zeros_like(new_x, dtype=self._dtype) + jaco_grad = jacobian_matrix(self._net_grad, new_x, self._num_classes) + if self._targeted: + for i in range(org_or_target_class.shape[0]): + loss2[i] = max(0, logits[i][other_class_index[i]] + - logits[i][org_or_target_class[i]] + + confidence) + loss2_grade[i] = constant[i]*(jaco_grad[other_class_index[ + i]][i] - jaco_grad[org_or_target_class[i]][i]) + else: + for i in range(org_or_target_class.shape[0]): + loss2[i] = max(0, logits[i][org_or_target_class[i]] + - logits[i][other_class_index[i]] + confidence) + loss2_grade[i] = constant[i]*(jaco_grad[org_or_target_class[ + i]][i] - jaco_grad[other_class_index[i]][i]) + total_loss = loss1 + constant*loss2 + loss1_grade = 2*(new_x - org_x) + for i in range(org_or_target_class.shape[0]): + if loss2[i] < 0: + msg = 'loss value should greater than or equal to 0, ' \ + 'but got loss2 {}'.format(loss2[i]) + LOGGER.error(TAG, msg) + raise ValueError(msg) + if loss2[i] == 0: + loss2_grade[i, ...] = 0 + total_loss_grade = loss1_grade + loss2_grade + return loss1, total_loss, total_loss_grade + + def _to_attack_space(self, inputs): + """ + Transform input data into attack space. + + Args: + inputs (numpy.ndarray): Input data. + + Returns: + numpy.ndarray, transformed data which belongs to attack space. + + Examples: + >>> x_att = self._to_attack_space([0.2, 0.3, 0.3]) + """ + LOGGER.debug(TAG, "enter the func _to_attack_space.") + + inputs = check_numpy_param('inputs', inputs) + mean = (self._min + self._max) / 2 + diff = (self._max - self._min) / 2 + inputs = (inputs - mean) / diff + inputs = inputs*0.999999 + return np.arctanh(inputs) + + def _to_model_space(self, inputs): + """ + Transform input data into model space. + + Args: + inputs (numpy.ndarray): Input data. + + Returns: + numpy.ndarray, transformed data which belongs to model space + and the gradient of x_model w.r.t. x_att. + + Examples: + >>> x_att = self._to_model_space([10, 21, 9]) + """ + LOGGER.debug(TAG, "enter the func _to_model_space.") + + inputs = check_numpy_param('inputs', inputs) + inputs = np.tanh(inputs) + the_grad = 1 - np.square(inputs) + mean = (self._min + self._max) / 2 + diff = (self._max - self._min) / 2 + inputs = inputs*diff + mean + the_grad = the_grad*diff + return inputs, the_grad + + def generate(self, inputs, labels): + """ + Generate adversarial examples based on input data and targeted labels. + + Args: + inputs (numpy.ndarray): Input samples. + labels (numpy.ndarray): The ground truth label of input samples + or target labels. + + Returns: + numpy.ndarray, generated adversarial examples. + + Examples: + >>> advs = attack.generate([[0.1, 0.2, 0.6], [0.3, 0, 0.4]], [1, 2]] + """ + + LOGGER.debug(TAG, "enter the func generate.") + inputs, labels = check_pair_numpy_param('inputs', inputs, + 'labels', labels) + if not self._sparse: + labels = np.argmax(labels, axis=1) + self._dtype = inputs.dtype + att_original = self._to_attack_space(inputs) + reconstructed_original, _ = self._to_model_space(att_original) + + # find an adversarial sample + const = np.ones_like(labels, dtype=self._dtype)*self._initial_const + lower_bound = np.zeros_like(labels, dtype=self._dtype) + upper_bound = np.ones_like(labels, dtype=self._dtype)*np.inf + adversarial_res = inputs.copy() + adversarial_loss = np.ones_like(labels, dtype=self._dtype)*np.inf + samples_num = labels.shape[0] + adv_flag = np.zeros_like(labels) + for binary_search_step in range(self._bin_search_steps): + if (binary_search_step == self._bin_search_steps - 1) and \ + (self._bin_search_steps >= 10): + const = min(1e10, upper_bound) + LOGGER.debug(TAG, + 'starting optimization with const = %s', + str(const)) + + att_perturbation = np.zeros_like(att_original, dtype=self._dtype) + loss_at_previous_check = np.ones_like(labels, dtype=self._dtype)*np.inf + + # create a new optimizer to minimize the perturbation + optimizer = _AdamOptimizer(att_perturbation.shape) + + for iteration in range(self._max_iterations): + x_input, dxdp = self._to_model_space( + att_original + att_perturbation) + logits = self._network(Tensor(x_input)).asnumpy() + + current_l2_loss, current_loss, dldx = self._loss_function( + logits, x_input, reconstructed_original, + labels, const, self._confidence) + + # check if attack success (include all examples) + if self._targeted: + is_adv = (np.argmax(logits, axis=1) == labels) + else: + is_adv = (np.argmax(logits, axis=1) != labels) + + for i in range(samples_num): + if is_adv[i]: + adv_flag[i] = True + if current_l2_loss[i] < adversarial_loss[i]: + adversarial_res[i] = x_input[i] + adversarial_loss[i] = current_l2_loss[i] + + if np.all(adv_flag): + if self._fast: + LOGGER.debug(TAG, "succeed find adversarial examples.") + msg = 'iteration: {}, logits_att: {}, ' \ + 'loss: {}, l2_dist: {}' \ + .format(iteration, + np.argmax(logits, axis=1), + current_loss, current_l2_loss) + LOGGER.debug(TAG, msg) + return adversarial_res + + dldx, inputs = check_equal_shape('dldx', dldx, 'inputs', inputs) + + gradient = dldx*dxdp + att_perturbation += \ + optimizer(gradient, self._learning_rate) + + # check if should stop iteration early + flag = True + iter_check = iteration % (np.ceil( + self._max_iterations*self._abort_early_check_ratio)) + if self._abort_early and iter_check == 0: + # check progress + for i in range(inputs.shape[0]): + if current_loss[i] <= .9999*loss_at_previous_check[i]: + flag = False + # stop Adam if all samples has no progress + if flag: + LOGGER.debug(TAG, + 'step:%d, no progress yet, stop iteration', + binary_search_step) + break + loss_at_previous_check = current_loss + + for i in range(samples_num): + # update bound based on search result + if adv_flag[i]: + LOGGER.debug(TAG, + 'example %d, found adversarial with const=%f', + i, const[i]) + upper_bound[i] = const[i] + else: + LOGGER.debug(TAG, + 'example %d, failed to find adversarial' + ' with const=%f', + i, const[i]) + lower_bound[i] = const[i] + + if upper_bound[i] == np.inf: + const[i] *= 10 + else: + const[i] = (lower_bound[i] + upper_bound[i]) / 2 + + return adversarial_res + + +class _AdamOptimizer: + """ + AdamOptimizer is used to calculate the optimum attack step. + + Args: + shape (tuple): The shape of perturbations. + + Examples: + >>> optimizer = _AdamOptimizer(att_perturbation.shape) + """ + + def __init__(self, shape): + self._m = np.zeros(shape) + self._v = np.zeros(shape) + self._t = 0 + + def __call__(self, gradient, learning_rate=0.001, + beta1=0.9, beta2=0.999, epsilon=1e-8): + """ + Calculate the optimum perturbation for each iteration. + + Args: + gradient (numpy.ndarray): The gradient of the loss w.r.t. to the + variable. + learning_rate (float): The learning rate in the current iteration. + Default: 0.001. + beta1 (float): Decay rate for calculating the exponentially + decaying average of past gradients. Default: 0.9. + beta2 (float): Decay rate for calculating the exponentially + decaying average of past squared gradients. Default: 0.999. + epsilon (float): Small value to avoid division by zero. + Default: 1e-8. + + Returns: + numpy.ndarray, perturbations. + + Examples: + >>> perturbs = optimizer([0.2, 0.1, 0.15], 0.005) + """ + gradient = check_numpy_param('gradient', gradient) + self._t += 1 + self._m = beta1*self._m + (1 - beta1)*gradient + self._v = beta2*self._v + (1 - beta2)*gradient**2 + alpha = learning_rate*np.sqrt(1 - beta2**self._t) / (1 - beta1**self._t) + pertur = -alpha*self._m / (np.sqrt(self._v) + epsilon) + return pertur diff --git a/mindarmour/attacks/deep_fool.py b/mindarmour/attacks/deep_fool.py new file mode 100644 index 0000000..5b02719 --- /dev/null +++ b/mindarmour/attacks/deep_fool.py @@ -0,0 +1,154 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +DeepFool Attack. +""" +import numpy as np + +from mindspore import Tensor +from mindspore.nn import Cell + +from mindarmour.attacks.attack import Attack +from mindarmour.utils.logger import LogUtil +from mindarmour.utils.util import GradWrap +from mindarmour.utils.util import jacobian_matrix +from mindarmour.utils._check_param import check_pair_numpy_param, check_model, \ + check_value_positive, check_int_positive, check_norm_level, \ + check_param_multi_types, check_param_type + +LOGGER = LogUtil.get_instance() +TAG = 'DeepFool' + + +class DeepFool(Attack): + """ + DeepFool is an untargeted & iterative attack achieved by moving the benign + sample to the nearest classification boundary and crossing the boundary. + + Reference: `DeepFool: a simple and accurate method to fool deep neural + networks `_ + + Args: + network (Cell): Target model. + num_classes (int): Number of labels of model output, which should be + greater than zero. + max_iters (int): Max iterations, which should be + greater than zero. Default: 50. + overshoot (float): Overshoot parameter. Default: 0.02. + norm_level (int): Order of the vector norm. Possible values: np.inf + or 2. Default: 2. + bounds (tuple): Upper and lower bounds of data range. In form of (clip_min, + clip_max). Default: None. + sparse (bool): If True, input labels are sparse-coded. If False, + input labels are onehot-coded. Default: True. + + Examples: + >>> attack = DeepFool(network) + """ + + def __init__(self, network, num_classes, max_iters=50, overshoot=0.02, + norm_level=2, bounds=None, sparse=True): + super(DeepFool, self).__init__() + self._network = check_model('network', network, Cell) + self._max_iters = check_int_positive('max_iters', max_iters) + self._overshoot = check_value_positive('overshoot', overshoot) + self._norm_level = check_norm_level(norm_level) + self._num_classes = check_int_positive('num_classes', num_classes) + self._net_grad = GradWrap(self._network) + self._bounds = check_param_multi_types('bounds', bounds, [list, tuple]) + self._sparse = check_param_type('sparse', sparse, bool) + for b in self._bounds: + _ = check_param_multi_types('bound', b, [int, float]) + + def generate(self, inputs, labels): + """ + Generate adversarial examples based on input samples and original labels. + + Args: + inputs (numpy.ndarray): Input samples. + labels (numpy.ndarray): Original labels. + + Returns: + numpy.ndarray, adversarial examples. + + Raises: + NotImplementedError: If norm_level is not in [2, np.inf, '2', 'inf']. + + Examples: + >>> advs = generate([[0.2, 0.3, 0.4], [0.3, 0.4, 0.5]], [1, 2]) + """ + inputs, labels = check_pair_numpy_param('inputs', inputs, + 'labels', labels) + if not self._sparse: + labels = np.argmax(labels, axis=1) + inputs_dtype = inputs.dtype + iteration = 0 + origin_labels = labels + cur_labels = origin_labels.copy() + weight = np.squeeze(np.zeros(inputs.shape[1:])) + r_tot = np.zeros(inputs.shape) + x_origin = inputs + while np.any(cur_labels == origin_labels) and iteration < self._max_iters: + preds = self._network(Tensor(inputs)).asnumpy() + grads = jacobian_matrix(self._net_grad, inputs, self._num_classes) + for idx in range(inputs.shape[0]): + diff_w = np.inf + label = origin_labels[idx] + if cur_labels[idx] != label: + continue + for k in range(self._num_classes): + if k == label: + continue + w_k = grads[k, idx, ...] - grads[label, idx, ...] + f_k = preds[idx, k] - preds[idx, label] + if self._norm_level == 2 or self._norm_level == '2': + diff_w_k = abs(f_k) / (np.linalg.norm(w_k) + 1e-8) + elif self._norm_level == np.inf \ + or self._norm_level == 'inf': + diff_w_k = abs(f_k) / (np.linalg.norm(w_k, ord=1) + 1e-8) + else: + msg = 'ord {} is not available.' \ + .format(str(self._norm_level)) + LOGGER.error(TAG, msg) + raise NotImplementedError(msg) + if diff_w_k < diff_w: + diff_w = diff_w_k + weight = w_k + + if self._norm_level == 2 or self._norm_level == '2': + r_i = diff_w*weight / (np.linalg.norm(weight) + 1e-8) + elif self._norm_level == np.inf or self._norm_level == 'inf': + r_i = diff_w*np.sign(weight) \ + / (np.linalg.norm(weight, ord=1) + 1e-8) + else: + msg = 'ord {} is not available in normalization.' \ + .format(str(self._norm_level)) + LOGGER.error(TAG, msg) + raise NotImplementedError(msg) + r_tot[idx, ...] = r_tot[idx, ...] + r_i + + if self._bounds is not None: + clip_min, clip_max = self._bounds + inputs = x_origin + (1 + self._overshoot)*r_tot*(clip_max + - clip_min) + inputs = np.clip(inputs, clip_min, clip_max) + else: + inputs = x_origin + (1 + self._overshoot)*r_tot + cur_labels = np.argmax( + self._network(Tensor(inputs.astype(inputs_dtype))).asnumpy(), + axis=1) + iteration += 1 + inputs = inputs.astype(inputs_dtype) + del preds, grads + return inputs diff --git a/mindarmour/attacks/gradient_method.py b/mindarmour/attacks/gradient_method.py new file mode 100644 index 0000000..e70290e --- /dev/null +++ b/mindarmour/attacks/gradient_method.py @@ -0,0 +1,402 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Gradient-method Attack. +""" +from abc import abstractmethod + +import numpy as np + +from mindspore import Tensor +from mindspore.nn import Cell +from mindspore.nn import SoftmaxCrossEntropyWithLogits + +from mindarmour.attacks.attack import Attack +from mindarmour.utils.util import WithLossCell +from mindarmour.utils.util import GradWrapWithLoss +from mindarmour.utils.logger import LogUtil +from mindarmour.utils._check_param import check_pair_numpy_param, check_model, \ + normalize_value, check_value_positive, check_param_multi_types, \ + check_norm_level, check_param_type + +LOGGER = LogUtil.get_instance() +TAG = 'SingleGrad' + + +class GradientMethod(Attack): + """ + Abstract base class for all single-step gradient-based attacks. + + Args: + network (Cell): Target model. + eps (float): Proportion of single-step adversarial perturbation generated + by the attack to data range. Default: 0.07. + alpha (float): Proportion of single-step random perturbation to data range. + Default: None. + bounds (tuple): Upper and lower bounds of data, indicating the data range. + In form of (clip_min, clip_max). Default: None. + loss_fn (Loss): Loss function for optimization. + """ + + def __init__(self, network, eps=0.07, alpha=None, bounds=None, + loss_fn=None): + super(GradientMethod, self).__init__() + self._network = check_model('network', network, Cell) + self._eps = check_value_positive('eps', eps) + self._dtype = None + if bounds is not None: + self._bounds = check_param_multi_types('bounds', bounds, + [list, tuple]) + for b in self._bounds: + _ = check_param_multi_types('bound', b, [int, float]) + else: + self._bounds = bounds + if alpha is not None: + self._alpha = check_value_positive('alpha', alpha) + else: + self._alpha = alpha + if loss_fn is None: + loss_fn = SoftmaxCrossEntropyWithLogits(is_grad=False, + sparse=False) + with_loss_cell = WithLossCell(self._network, loss_fn) + self._grad_all = GradWrapWithLoss(with_loss_cell) + self._grad_all.set_train() + + def generate(self, inputs, labels): + """ + Generate adversarial examples based on input samples and original/target labels. + + Args: + inputs (numpy.ndarray): Benign input samples used as references to create + adversarial examples. + labels (numpy.ndarray): Original/target labels. + + Returns: + numpy.ndarray, generated adversarial examples. + + Examples: + >>> adv_x = attack.generate([[0.1, 0.2, 0.6], [0.3, 0, 0.4]], + >>> [[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],[0, , 0, 1, 0, 0, 0, 0, 0, 0, + >>> 0]]) + """ + inputs, labels = check_pair_numpy_param('inputs', inputs, + 'labels', labels) + self._dtype = inputs.dtype + gradient = self._gradient(inputs, labels) + # use random method or not + if self._alpha is not None: + random_part = self._alpha*np.sign(np.random.normal( + size=inputs.shape)).astype(self._dtype) + perturbation = (self._eps - self._alpha)*gradient + random_part + else: + perturbation = self._eps*gradient + + if self._bounds is not None: + clip_min, clip_max = self._bounds + perturbation = perturbation*(clip_max - clip_min) + adv_x = inputs + perturbation + adv_x = np.clip(adv_x, clip_min, clip_max) + else: + adv_x = inputs + perturbation + return adv_x + + @abstractmethod + def _gradient(self, inputs, labels): + """ + Calculate gradients based on input samples and original/target labels. + + Args: + inputs (numpy.ndarray): Benign input samples used as references to + create adversarial examples. + labels (numpy.ndarray): Original/target labels. + + Raises: + NotImplementedError: It is an abstract method. + """ + msg = 'The function _gradient() is an abstract method in class ' \ + '`GradientMethod`, and should be implemented in child class.' + LOGGER.error(TAG, msg) + raise NotImplementedError(msg) + + +class FastGradientMethod(GradientMethod): + """ + This attack is a one-step attack based on gradients calculation, and + the norm of perturbations includes L1, L2 and Linf. + + References: `I. J. Goodfellow, J. Shlens, and C. Szegedy, "Explaining + and harnessing adversarial examples," in ICLR, 2015. + `_ + + Args: + network (Cell): Target model. + eps (float): Proportion of single-step adversarial perturbation generated + by the attack to data range. Default: 0.07. + alpha (float): Proportion of single-step random perturbation to data range. + Default: None. + bounds (tuple): Upper and lower bounds of data, indicating the data range. + In form of (clip_min, clip_max). Default: (0.0, 1.0). + norm_level (Union[int, numpy.inf]): Order of the norm. + Possible values: np.inf, 1 or 2. Default: 2. + is_targeted (bool): If True, targeted attack. If False, untargeted + attack. Default: False. + loss_fn (Loss): Loss function for optimization. + + Examples: + >>> attack = FastGradientMethod(network) + """ + + def __init__(self, network, eps=0.07, alpha=None, bounds=(0.0, 1.0), + norm_level=2, is_targeted=False, loss_fn=None): + + super(FastGradientMethod, self).__init__(network, + eps=eps, + alpha=alpha, + bounds=bounds, + loss_fn=loss_fn) + self._norm_level = check_norm_level(norm_level) + self._is_targeted = check_param_type('is_targeted', is_targeted, bool) + + def _gradient(self, inputs, labels): + """ + Calculate gradients based on input samples and original/target labels. + + Args: + inputs (numpy.ndarray): Input sample. + labels (numpy.ndarray): Original/target label. + + Returns: + numpy.ndarray, gradient of inputs. + + Examples: + >>> grad = self._gradient([[0.2, 0.3, 0.4]], + >>> [[0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) + """ + sens = Tensor(np.array([1.0], self._dtype)) + out_grad = self._grad_all(Tensor(inputs), Tensor(labels), sens) + if isinstance(out_grad, tuple): + out_grad = out_grad[0] + gradient = out_grad.asnumpy() + + if self._is_targeted: + gradient = -gradient + return normalize_value(gradient, self._norm_level) + + +class RandomFastGradientMethod(FastGradientMethod): + """ + Fast Gradient Method use Random perturbation. + + References: `Florian Tramer, Alexey Kurakin, Nicolas Papernot, "Ensemble + adversarial training: Attacks and defenses" in ICLR, 2018 + `_ + + Args: + network (Cell): Target model. + eps (float): Proportion of single-step adversarial perturbation generated + by the attack to data range. Default: 0.07. + alpha (float): Proportion of single-step random perturbation to data range. + Default: 0.035. + bounds (tuple): Upper and lower bounds of data, indicating the data range. + In form of (clip_min, clip_max). Default: (0.0, 1.0). + norm_level (Union[int, numpy.inf]): Order of the norm. + Possible values: np.inf, 1 or 2. Default: 2. + is_targeted (bool): If True, targeted attack. If False, untargeted + attack. Default: False. + loss_fn (Loss): Loss function for optimization. + + Raises: + ValueError: eps is smaller than alpha! + + Examples: + >>> attack = RandomFastGradientMethod(network) + """ + + def __init__(self, network, eps=0.07, alpha=0.035, bounds=(0.0, 1.0), + norm_level=2, is_targeted=False, loss_fn=None): + if eps < alpha: + raise ValueError('eps must be larger than alpha!') + super(RandomFastGradientMethod, self).__init__(network, + eps=eps, + alpha=alpha, + bounds=bounds, + norm_level=norm_level, + is_targeted=is_targeted, + loss_fn=loss_fn) + + +class FastGradientSignMethod(GradientMethod): + """ + Use the sign instead of the value of the gradient to the input. This attack is + often referred to as Fast Gradient Sign Method and was introduced previously. + + References: `Ian J. Goodfellow, J. Shlens, and C. Szegedy, "Explaining + and harnessing adversarial examples," in ICLR, 2015 + `_ + + Args: + network (Cell): Target model. + eps (float): Proportion of single-step adversarial perturbation generated + by the attack to data range. Default: 0.07. + alpha (float): Proportion of single-step random perturbation to data range. + Default: None. + bounds (tuple): Upper and lower bounds of data, indicating the data range. + In form of (clip_min, clip_max). Default: (0.0, 1.0). + is_targeted (bool): If True, targeted attack. If False, untargeted + attack. Default: False. + loss_fn (Loss): Loss function for optimization. + + Examples: + >>> attack = FastGradientSignMethod(network) + """ + + def __init__(self, network, eps=0.07, alpha=None, bounds=(0.0, 1.0), + is_targeted=False, loss_fn=None): + super(FastGradientSignMethod, self).__init__(network, + eps=eps, + alpha=alpha, + bounds=bounds, + loss_fn=loss_fn) + self._is_targeted = check_param_type('is_targeted', is_targeted, bool) + + def _gradient(self, inputs, labels): + """ + Calculate gradients based on input samples and original/target + labels. + + Args: + inputs (numpy.ndarray): Input samples. + labels (numpy.ndarray): Original/target labels. + + Returns: + numpy.ndarray, gradient of inputs. + + Examples: + >>> grad = self._gradient([[0.2, 0.3, 0.4]], + >>> [[0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) + """ + sens = Tensor(np.array([1.0], self._dtype)) + out_grad = self._grad_all(Tensor(inputs), Tensor(labels), sens) + if isinstance(out_grad, tuple): + out_grad = out_grad[0] + gradient = out_grad.asnumpy() + if self._is_targeted: + gradient = -gradient + gradient = np.sign(gradient) + return gradient + + +class RandomFastGradientSignMethod(FastGradientSignMethod): + """ + Fast Gradient Sign Method using random perturbation. + + References: `F. Tramer, et al., "Ensemble adversarial training: Attacks + and defenses," in ICLR, 2018 `_ + + Args: + network (Cell): Target model. + eps (float): Proportion of single-step adversarial perturbation generated + by the attack to data range. Default: 0.07. + alpha (float): Proportion of single-step random perturbation to data range. + Default: 0.035. + bounds (tuple): Upper and lower bounds of data, indicating the data range. + In form of (clip_min, clip_max). Default: (0.0, 1.0). + is_targeted (bool): True: targeted attack. False: untargeted attack. + Default: False. + loss_fn (Loss): Loss function for optimization. + + Raises: + ValueError: eps is smaller than alpha! + + Examples: + >>> attack = RandomFastGradientSignMethod(network) + """ + + def __init__(self, network, eps=0.07, alpha=0.035, bounds=(0.0, 1.0), + is_targeted=False, loss_fn=None): + if eps < alpha: + raise ValueError('eps must be larger than alpha!') + super(RandomFastGradientSignMethod, self).__init__(network, + eps=eps, + alpha=alpha, + bounds=bounds, + is_targeted=is_targeted, + loss_fn=loss_fn) + + +class LeastLikelyClassMethod(FastGradientSignMethod): + """ + Least-Likely Class Method. + + References: `F. Tramer, et al., "Ensemble adversarial training: Attacks + and defenses," in ICLR, 2018 `_ + + Args: + network (Cell): Target model. + eps (float): Proportion of single-step adversarial perturbation generated + by the attack to data range. Default: 0.07. + alpha (float): Proportion of single-step random perturbation to data range. + Default: None. + bounds (tuple): Upper and lower bounds of data, indicating the data range. + In form of (clip_min, clip_max). Default: (0.0, 1.0). + loss_fn (Loss): Loss function for optimization. + + Examples: + >>> attack = LeastLikelyClassMethod(network) + """ + + def __init__(self, network, eps=0.07, alpha=None, bounds=(0.0, 1.0), + loss_fn=None): + super(LeastLikelyClassMethod, self).__init__(network, + eps=eps, + alpha=alpha, + bounds=bounds, + is_targeted=True, + loss_fn=loss_fn) + + +class RandomLeastLikelyClassMethod(FastGradientSignMethod): + """ + Least-Likely Class Method use Random perturbation. + + References: `F. Tramer, et al., "Ensemble adversarial training: Attacks + and defenses," in ICLR, 2018 `_ + + Args: + network (Cell): Target model. + eps (float): Proportion of single-step adversarial perturbation generated + by the attack to data range. Default: 0.07. + alpha (float): Proportion of single-step random perturbation to data range. + Default: 0.035. + bounds (tuple): Upper and lower bounds of data, indicating the data range. + In form of (clip_min, clip_max). Default: (0.0, 1.0). + loss_fn (Loss): Loss function for optimization. + + Raises: + ValueError: eps is smaller than alpha! + + Examples: + >>> attack = RandomLeastLikelyClassMethod(network) + """ + + def __init__(self, network, eps=0.07, alpha=0.035, bounds=(0.0, 1.0), + loss_fn=None): + if eps < alpha: + raise ValueError('eps must be larger than alpha!') + super(RandomLeastLikelyClassMethod, self).__init__(network, + eps=eps, + alpha=alpha, + bounds=bounds, + is_targeted=True, + loss_fn=loss_fn) diff --git a/mindarmour/attacks/iterative_gradient_method.py b/mindarmour/attacks/iterative_gradient_method.py new file mode 100644 index 0000000..135ac6d --- /dev/null +++ b/mindarmour/attacks/iterative_gradient_method.py @@ -0,0 +1,432 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Iterative gradient method attack. """ +from abc import abstractmethod + +import numpy as np + +from mindspore.nn import SoftmaxCrossEntropyWithLogits +from mindspore import Tensor +from mindspore.nn import Cell + +from mindarmour.attacks.attack import Attack +from mindarmour.attacks.gradient_method import FastGradientSignMethod +from mindarmour.utils.logger import LogUtil +from mindarmour.utils.util import WithLossCell +from mindarmour.utils.util import GradWrapWithLoss +from mindarmour.utils._check_param import check_pair_numpy_param, \ + normalize_value, check_model, check_value_positive, check_int_positive, \ + check_param_type, check_norm_level, check_param_multi_types + +LOGGER = LogUtil.get_instance() +TAG = 'IterGrad' + + +def _reshape_l1_projection(values, eps=3): + """ + `Implementation of L1 ball projection from:`_. + + .. _`Implementation of L1 ball projection from:`: + https://stanford.edu/~jduchi/projects/DuchiShSiCh08.pdf + + Args: + values (numpy.ndarray): Input data reshape into 2-dims. + eps (float): L1 radius. Default: 3. + + Returns: + numpy.ndarray, containing the projection. + """ + abs_x = np.abs(values) + abs_x = np.sum(abs_x, axis=1) + indexes_b = (abs_x > eps) + x_b = values[indexes_b] + batch_size_b = x_b.shape[0] + if batch_size_b == 0: + return values + + # make the projection on l1 ball for elements outside the ball + b_mu = -np.sort(-np.abs(x_b), axis=1) + b_vv = np.arange(x_b.shape[1]).astype(np.float) + b_st = (np.cumsum(b_mu, axis=1)-eps)/(b_vv+1) + selected = (b_mu - b_st) > 0 + rho = np.sum((np.cumsum((1-selected), axis=1) == 0), axis=1)-1 + theta = np.take_along_axis(b_st, np.expand_dims(rho, axis=1), axis=1) + proj_x_b = np.maximum(0, np.abs(x_b)-theta)*np.sign(x_b) + + # gather all the projected batch + proj_x = np.copy(values) + proj_x[indexes_b] = proj_x_b + return proj_x + + +def _projection(values, eps, norm_level): + """ + Implementation of values normalization within eps. + + Args: + values (numpy.ndarray): Input data. + eps (float): Project radius. + norm_level (Union[int, char, numpy.inf]): Order of the norm. Possible + values: np.inf, 1 or 2. + + Returns: + numpy.ndarray, normalized values. + + Raises: + NotImplementedError: If the norm_level is not in [1, 2, np.inf, '1', + '2', 'inf']. + """ + if norm_level in (1, '1'): + sample_batch = values.shape[0] + x_flat = values.reshape(sample_batch, -1) + proj_flat = _reshape_l1_projection(x_flat, eps) + return proj_flat.reshape(values.shape) + if norm_level in (2, '2'): + return eps*normalize_value(values, norm_level) + if norm_level in (np.inf, 'inf'): + return eps*np.sign(values) + msg = 'Values of `norm_level` different from 1, 2 and `np.inf` are ' \ + 'currently not supported.' + LOGGER.error(TAG, msg) + raise NotImplementedError(msg) + + +class IterativeGradientMethod(Attack): + """ + Abstract base class for all iterative gradient based attacks. + + Args: + network (Cell): Target model. + eps (float): Proportion of adversarial perturbation generated by the + attack to data range. Default: 0.3. + eps_iter (float): Proportion of single-step adversarial perturbation + generated by the attack to data range. Default: 0.1. + bounds (tuple): Upper and lower bounds of data, indicating the data range. + In form of (clip_min, clip_max). Default: (0.0, 1.0). + nb_iter (int): Number of iteration. Default: 5. + loss_fn (Loss): Loss function for optimization. + """ + def __init__(self, network, eps=0.3, eps_iter=0.1, bounds=(0.0, 1.0), nb_iter=5, + loss_fn=None): + super(IterativeGradientMethod, self).__init__() + self._network = check_model('network', network, Cell) + self._eps = check_value_positive('eps', eps) + self._eps_iter = check_value_positive('eps_iter', eps_iter) + self._nb_iter = check_int_positive('nb_iter', nb_iter) + self._bounds = check_param_multi_types('bounds', bounds, [list, tuple]) + for b in self._bounds: + _ = check_param_multi_types('bound', b, [int, float]) + if loss_fn is None: + loss_fn = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=False) + self._loss_grad = GradWrapWithLoss(WithLossCell(self._network, loss_fn)) + self._loss_grad.set_train() + + @abstractmethod + def generate(self, inputs, labels): + """ + Generate adversarial examples based on input samples and original/target labels. + + Args: + inputs (numpy.ndarray): Benign input samples used as references to create + adversarial examples. + labels (numpy.ndarray): Original/target labels. + + Raises: + NotImplementedError: This function is not available in + IterativeGradientMethod. + + Examples: + >>> adv_x = attack.generate([[0.1, 0.9, 0.6], + >>> [0.3, 0, 0.3]], + >>> [[0, , 1, 0, 0, 0, 0, 0, 0, 0], + >>> [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]]) + """ + msg = 'The function generate() is an abstract method in class ' \ + '`IterativeGradientMethod`, and should be implemented ' \ + 'in child class.' + LOGGER.error(TAG, msg) + raise NotImplementedError(msg) + + +class BasicIterativeMethod(IterativeGradientMethod): + """ + The Basic Iterative Method attack, an iterative FGSM method to generate + adversarial examples. + + References: `A. Kurakin, I. Goodfellow, and S. Bengio, "Adversarial examples + in the physical world," in ICLR, 2017 `_ + + Args: + network (Cell): Target model. + eps (float): Proportion of adversarial perturbation generated by the + attack to data range. Default: 0.3. + eps_iter (float): Proportion of single-step adversarial perturbation + generated by the attack to data range. Default: 0.1. + bounds (tuple): Upper and lower bounds of data, indicating the data range. + In form of (clip_min, clip_max). Default: (0.0, 1.0). + is_targeted (bool): If True, targeted attack. If False, untargeted + attack. Default: False. + nb_iter (int): Number of iteration. Default: 5. + loss_fn (Loss): Loss function for optimization. + attack (class): The single step gradient method of each iteration. In + this class, FGSM is used. + + Examples: + >>> attack = BasicIterativeMethod(network) + """ + + def __init__(self, network, eps=0.3, eps_iter=0.1, bounds=(0.0, 1.0), + is_targeted=False, nb_iter=5, loss_fn=None): + super(BasicIterativeMethod, self).__init__(network, + eps=eps, + eps_iter=eps_iter, + bounds=bounds, + nb_iter=nb_iter, + loss_fn=loss_fn) + self._is_targeted = check_param_type('is_targeted', is_targeted, bool) + self._attack = FastGradientSignMethod(self._network, + eps=self._eps_iter, + bounds=self._bounds, + is_targeted=self._is_targeted, + loss_fn=loss_fn) + + def generate(self, inputs, labels): + + """ + Simple iterative FGSM method to generate adversarial examples. + + Args: + inputs (numpy.ndarray): Benign input samples used as references to + create adversarial examples. + labels (numpy.ndarray): Original/target labels. + + Returns: + numpy.ndarray, generated adversarial examples. + + Examples: + >>> adv_x = attack.generate([[0.3, 0.2, 0.6], + >>> [0.3, 0.2, 0.4]], + >>> [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0], + >>> [0, 0, 0, 0, 0, 0, 1, 0, 0, 0]]) + """ + inputs, labels = check_pair_numpy_param('inputs', inputs, + 'labels', labels) + arr_x = inputs + if self._bounds is not None: + clip_min, clip_max = self._bounds + clip_diff = clip_max - clip_min + for _ in range(self._nb_iter): + adv_x = self._attack.generate(inputs, labels) + perturs = np.clip(adv_x - arr_x, (0 - self._eps)*clip_diff, + self._eps*clip_diff) + adv_x = arr_x + perturs + inputs = adv_x + else: + for _ in range(self._nb_iter): + adv_x = self._attack.generate(inputs, labels) + adv_x = np.clip(adv_x, arr_x - self._eps, arr_x + self._eps) + inputs = adv_x + return adv_x + + +class MomentumIterativeMethod(IterativeGradientMethod): + """ + The Momentum Iterative Method attack. + + References: `Y. Dong, et al., "Boosting adversarial attacks with + momentum," arXiv:1710.06081, 2017 `_ + + Args: + network (Cell): Target model. + eps (float): Proportion of adversarial perturbation generated by the + attack to data range. Default: 0.3. + eps_iter (float): Proportion of single-step adversarial perturbation + generated by the attack to data range. Default: 0.1. + bounds (tuple): Upper and lower bounds of data, indicating the data range. + In form of (clip_min, clip_max). Default: (0.0, 1.0). + is_targeted (bool): If True, targeted attack. If False, untargeted + attack. Default: False. + nb_iter (int): Number of iteration. Default: 5. + decay_factor (float): Decay factor in iterations. Default: 1.0. + norm_level (Union[int, numpy.inf]): Order of the norm. Possible values: + np.inf, 1 or 2. Default: 'inf'. + loss_fn (Loss): Loss function for optimization. + """ + + def __init__(self, network, eps=0.3, eps_iter=0.1, bounds=(0.0, 1.0), + is_targeted=False, nb_iter=5, decay_factor=1.0, + norm_level='inf', loss_fn=None): + super(MomentumIterativeMethod, self).__init__(network, + eps=eps, + eps_iter=eps_iter, + bounds=bounds, + nb_iter=nb_iter, + loss_fn=loss_fn) + self._is_targeted = check_param_type('is_targeted', is_targeted, bool) + self._decay_factor = check_value_positive('decay_factor', decay_factor) + self._norm_level = check_norm_level(norm_level) + + def generate(self, inputs, labels): + """ + Generate adversarial examples based on input data and origin/target labels. + + Args: + inputs (numpy.ndarray): Benign input samples used as references to + create adversarial examples. + labels (numpy.ndarray): Original/target labels. + + Returns: + numpy.ndarray, generated adversarial examples. + + Examples: + >>> adv_x = attack.generate([[0.5, 0.2, 0.6], + >>> [0.3, 0, 0.2]], + >>> [[0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + >>> [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]]) + """ + inputs, labels = check_pair_numpy_param('inputs', inputs, + 'labels', labels) + arr_x = inputs + momentum = 0 + if self._bounds is not None: + clip_min, clip_max = self._bounds + clip_diff = clip_max - clip_min + for _ in range(self._nb_iter): + gradient = self._gradient(inputs, labels) + momentum = self._decay_factor*momentum + gradient + adv_x = inputs + self._eps_iter*np.sign(momentum) + perturs = np.clip(adv_x - arr_x, (0 - self._eps)*clip_diff, + self._eps*clip_diff) + adv_x = arr_x + perturs + adv_x = np.clip(adv_x, clip_min, clip_max) + inputs = adv_x + else: + for _ in range(self._nb_iter): + gradient = self._gradient(inputs, labels) + momentum = self._decay_factor*momentum + gradient + adv_x = inputs + self._eps_iter*np.sign(momentum) + adv_x = np.clip(adv_x, arr_x - self._eps, arr_x + self._eps) + inputs = adv_x + + return adv_x + + def _gradient(self, inputs, labels): + """ + Calculate the gradient of input samples. + + Args: + inputs (numpy.ndarray): Input samples. + labels (numpy.ndarray): Original/target labels. + + Returns: + numpy.ndarray, gradient of labels w.r.t inputs. + + Examples: + >>> grad = self._gradient([[0.5, 0.3, 0.4]], + >>> [[0, 0, 0, 1, 0, 0, 0, 0, 0, 0]) + """ + sens = Tensor(np.array([1.0], inputs.dtype)) + # get grad of loss over x + out_grad = self._loss_grad(Tensor(inputs), Tensor(labels), sens) + if isinstance(out_grad, tuple): + out_grad = out_grad[0] + gradient = out_grad.asnumpy() + + if self._is_targeted: + gradient = -gradient + return normalize_value(gradient, self._norm_level) + + +class ProjectedGradientDescent(BasicIterativeMethod): + """ + The Projected Gradient Descent attack is a variant of the Basic Iterative + Method in which, after each iteration, the perturbation is projected on an + lp-ball of specified radius (in addition to clipping the values of the + adversarial sample so that it lies in the permitted data range). This is + the attack proposed by Madry et al. for adversarial training. + + References: `A. Madry, et al., "Towards deep learning models resistant to + adversarial attacks," in ICLR, 2018 `_ + + Args: + network (Cell): Target model. + eps (float): Proportion of adversarial perturbation generated by the + attack to data range. Default: 0.3. + eps_iter (float): Proportion of single-step adversarial perturbation + generated by the attack to data range. Default: 0.1. + bounds (tuple): Upper and lower bounds of data, indicating the data range. + In form of (clip_min, clip_max). Default: (0.0, 1.0). + is_targeted (bool): If True, targeted attack. If False, untargeted + attack. Default: False. + nb_iter (int): Number of iteration. Default: 5. + norm_level (Union[int, numpy.inf]): Order of the norm. Possible values: + np.inf, 1 or 2. Default: 'inf'. + loss_fn (Loss): Loss function for optimization. + """ + + def __init__(self, network, eps=0.3, eps_iter=0.1, bounds=(0.0, 1.0), + is_targeted=False, nb_iter=5, norm_level='inf', loss_fn=None): + super(ProjectedGradientDescent, self).__init__(network, + eps=eps, + eps_iter=eps_iter, + bounds=bounds, + is_targeted=is_targeted, + nb_iter=nb_iter, + loss_fn=loss_fn) + self._norm_level = check_norm_level(norm_level) + + def generate(self, inputs, labels): + """ + Iteratively generate adversarial examples based on BIM method. The + perturbation is normalized by projected method with parameter norm_level . + + Args: + inputs (numpy.ndarray): Benign input samples used as references to + create adversarial examples. + labels (numpy.ndarray): Original/target labels. + + Returns: + numpy.ndarray, generated adversarial examples. + + Examples: + >>> adv_x = attack.generate([[0.6, 0.2, 0.6], + >>> [0.3, 0.3, 0.4]], + >>> [[0, 0, 0, 0, 0, 0, 0, 0, 0, 1], + >>> [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]]) + """ + inputs, labels = check_pair_numpy_param('inputs', inputs, + 'labels', labels) + arr_x = inputs + if self._bounds is not None: + clip_min, clip_max = self._bounds + clip_diff = clip_max - clip_min + for _ in range(self._nb_iter): + adv_x = self._attack.generate(inputs, labels) + perturs = _projection(adv_x - arr_x, + self._eps, + norm_level=self._norm_level) + perturs = np.clip(perturs, (0 - self._eps)*clip_diff, + self._eps*clip_diff) + adv_x = arr_x + perturs + inputs = adv_x + else: + for _ in range(self._nb_iter): + adv_x = self._attack.generate(inputs, labels) + perturs = _projection(adv_x - arr_x, + self._eps, + norm_level=self._norm_level) + adv_x = arr_x + perturs + adv_x = np.clip(adv_x, arr_x - self._eps, arr_x + self._eps) + inputs = adv_x + return adv_x diff --git a/mindarmour/attacks/jsma.py b/mindarmour/attacks/jsma.py new file mode 100644 index 0000000..4e9db36 --- /dev/null +++ b/mindarmour/attacks/jsma.py @@ -0,0 +1,196 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +JSMA-Attack. +""" +import numpy as np + +from mindspore import Tensor +from mindspore.nn import Cell + +from mindarmour.attacks.attack import Attack +from mindarmour.utils.util import GradWrap +from mindarmour.utils.util import jacobian_matrix +from mindarmour.utils.logger import LogUtil +from mindarmour.utils._check_param import check_pair_numpy_param, check_model, \ + check_param_type, check_int_positive, check_value_positive, \ + check_value_non_negative + + +LOGGER = LogUtil.get_instance() +TAG = 'JSMA' + + +class JSMAAttack(Attack): + """ + JSMA is an targeted & iterative attack based on saliency map of + input features. + + Reference: `The limitations of deep learning in adversarial settings + `_ + + Args: + network (Cell): Target model. + num_classes (int): Number of labels of model output, which should be + greater than zero. + box_min (float): Lower bound of input of the target model. Default: 0. + box_max (float): Upper bound of input of the target model. Default: 1.0. + theta (float): Change ratio of one pixel (relative to + input data range). Default: 1.0. + max_iteration (int): Maximum round of iteration. Default: 100. + max_count (int): Maximum times to change each pixel. Default: 3. + increase (bool): If True, increase perturbation. If False, decrease + perturbation. Default: True. + sparse (bool): If True, input labels are sparse-coded. If False, + input labels are onehot-coded. Default: True. + + Examples: + >>> attack = JSMAAttack(network) + """ + + def __init__(self, network, num_classes, box_min=0.0, box_max=1.0, + theta=1.0, max_iteration=1000, max_count=3, increase=True, + sparse=True): + super(JSMAAttack).__init__() + LOGGER.debug(TAG, "init jsma class.") + self._network = check_model('network', network, Cell) + self._min = check_value_non_negative('box_min', box_min) + self._max = check_value_non_negative('box_max', box_max) + self._num_classes = check_int_positive('num_classes', num_classes) + self._theta = check_value_positive('theta', theta) + self._max_iter = check_int_positive('max_iteration', max_iteration) + self._max_count = check_int_positive('max_count', max_count) + self._increase = check_param_type('increase', increase, bool) + self._net_grad = GradWrap(self._network) + self._bit_map = None + self._sparse = check_param_type('sparse', sparse, bool) + + def _saliency_map(self, data, bit_map, target): + """ + Compute the saliency map of all pixels. + + Args: + data (numpy.ndarray): Input sample. + bit_map (numpy.ndarray): Bit map to control modify frequency of + each pixel. + target (int): Target class. + + Returns: + tuple, indices of selected pixel to modify. + + Examples: + >>> p1_ind, p2_ind = self._saliency_map([0.2, 0.3, 0.5], + >>> [1, 0, 1], 1) + """ + jaco_grad = jacobian_matrix(self._net_grad, data, self._num_classes) + jaco_grad = jaco_grad.reshape(self._num_classes, -1) + alpha = jaco_grad[target]*bit_map + alpha_trans = np.reshape(alpha, (alpha.shape[0], 1)) + alpha_two_dim = alpha + alpha_trans + # pixel influence on other classes except target class + other_grads = [jaco_grad[class_ind] for class_ind in range( + self._num_classes)] + beta = np.sum(other_grads, axis=0)*bit_map - alpha + beta_trans = np.reshape(beta, (beta.shape[0], 1)) + beta_two_dim = beta + beta_trans + + if self._increase: + alpha_two_dim = (alpha_two_dim > 0)*alpha_two_dim + beta_two_dim = (beta_two_dim < 0)*beta_two_dim + else: + alpha_two_dim = (alpha_two_dim < 0)*alpha_two_dim + beta_two_dim = (beta_two_dim > 0)*beta_two_dim + + sal_map = (-1*alpha_two_dim*beta_two_dim) + two_dim_index = np.argmax(sal_map) + p1_ind = two_dim_index % len(data.flatten()) + p2_ind = two_dim_index // len(data.flatten()) + return p1_ind, p2_ind + + def _generate_one(self, data, target): + """ + Generate one adversarial example. + + Args: + data (numpy.ndarray): Input sample (only one). + target (int): Target label. + + Returns: + numpy.ndarray, adversarial example or zeros (if failed). + + Examples: + >>> adv = self._generate_one([0.2, 0.3 ,0.4], 1) + """ + ori_shape = data.shape + temp = data.flatten() + bit_map = np.ones_like(temp) + fake_res = np.zeros_like(data) + counter = np.zeros_like(temp) + perturbed = np.copy(temp) + for _ in range(self._max_iter): + pre_logits = self._network(Tensor(np.expand_dims( + perturbed.reshape(ori_shape), axis=0))) + per_pred = np.argmax(pre_logits.asnumpy()) + if per_pred == target: + LOGGER.debug(TAG, 'find one adversarial sample successfully.') + return perturbed.reshape(ori_shape) + if np.all(bit_map == 0): + LOGGER.debug(TAG, 'fail to find adversarial sample') + return perturbed.reshape(ori_shape) + p1_ind, p2_ind = self._saliency_map(perturbed.reshape( + ori_shape)[np.newaxis, :], bit_map, target) + if self._increase: + perturbed[p1_ind] += self._theta*(self._max - self._min) + perturbed[p2_ind] += self._theta*(self._max - self._min) + else: + perturbed[p1_ind] -= self._theta*(self._max - self._min) + perturbed[p2_ind] -= self._theta*(self._max - self._min) + counter[p1_ind] += 1 + counter[p2_ind] += 1 + if (perturbed[p1_ind] >= self._max) or ( + perturbed[p1_ind] <= self._min) \ + or (counter[p1_ind] > self._max_count): + bit_map[p1_ind] = 0 + if (perturbed[p2_ind] >= self._max) or ( + perturbed[p2_ind] <= self._min) \ + or (counter[p2_ind] > self._max_count): + bit_map[p2_ind] = 0 + perturbed = np.clip(perturbed, self._min, self._max) + LOGGER.debug(TAG, 'fail to find adversarial sample.') + return fake_res + + def generate(self, inputs, labels): + """ + Generate adversarial examples in batch. + + Args: + inputs (numpy.ndarray): Input samples. + labels (numpy.ndarray): Target labels. + + Returns: + numpy.ndarray, adversarial samples. + + Examples: + >>> advs = generate([[0.2, 0.3, 0.4], [0.3, 0.4, 0.5]], [1, 2]) + """ + inputs, labels = check_pair_numpy_param('inputs', inputs, + 'labels', labels) + if not self._sparse: + labels = np.argmax(labels, axis=1) + LOGGER.debug(TAG, 'start to generate adversarial samples.') + res = [] + for i in range(inputs.shape[0]): + res.append(self._generate_one(inputs[i], labels[i])) + LOGGER.debug(TAG, 'finished.') + return np.asarray(res) diff --git a/mindarmour/attacks/lbfgs.py b/mindarmour/attacks/lbfgs.py new file mode 100644 index 0000000..42ded16 --- /dev/null +++ b/mindarmour/attacks/lbfgs.py @@ -0,0 +1,224 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +LBFGS-Attack. +""" +import numpy as np +import scipy.optimize as so + +from mindspore import Tensor +from mindspore.nn import Cell +from mindspore.nn import SoftmaxCrossEntropyWithLogits + +from mindarmour.attacks.attack import Attack +from mindarmour.utils.logger import LogUtil +from mindarmour.utils.util import WithLossCell +from mindarmour.utils.util import GradWrapWithLoss +from mindarmour.utils._check_param import check_pair_numpy_param, check_model, \ + check_int_positive, check_value_positive, check_param_type, \ + check_param_multi_types + +LOGGER = LogUtil.get_instance() +TAG = 'LBFGS' + + +class LBFGS(Attack): + """ + Uses L-BFGS-B to minimize the distance between the input and the adversarial example. + + References: `Pedro Tabacof, Eduardo Valle. "Exploring the Space of + Adversarial Images" `_ + + Args: + network (Cell): The network of attacked model. + eps (float): Attack step size. Default: 1e-5. + bounds (tuple): Upper and lower bounds of data. Default: (0.0, 1.0) + is_targeted (bool): If True, targeted attack. If False, untargeted + attack. Default: True. + nb_iter (int): Number of iteration of lbfgs-optimizer, which should be + greater than zero. Default: 150. + search_iters (int): Number of changes in step size, which should be + greater than zero. Default: 30. + loss_fn (Functions): Loss function of substitute model. Default: None. + sparse (bool): If True, input labels are sparse-coded. If False, + input labels are onehot-coded. Default: False. + + Examples: + >>> attack = LBFGS(network) + """ + def __init__(self, network, eps=1e-5, bounds=(0.0, 1.0), is_targeted=True, + nb_iter=150, search_iters=30, loss_fn=None, sparse=False): + super(LBFGS, self).__init__() + self._network = check_model('network', network, Cell) + self._eps = check_value_positive('eps', eps) + self._is_targeted = check_param_type('is_targeted', is_targeted, bool) + self._nb_iter = check_int_positive('nb_iter', nb_iter) + self._search_iters = check_int_positive('search_iters', search_iters) + if loss_fn is None: + loss_fn = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=False) + with_loss_cell = WithLossCell(self._network, loss_fn) + self._grad_all = GradWrapWithLoss(with_loss_cell) + self._dtype = None + self._bounds = check_param_multi_types('bounds', bounds, [list, tuple]) + self._sparse = check_param_type('sparse', sparse, bool) + for b in self._bounds: + _ = check_param_multi_types('bound', b, [int, float]) + box_max, box_min = bounds + if box_max < box_min: + self._box_min = box_max + self._box_max = box_min + else: + self._box_min = box_min + self._box_max = box_max + + def generate(self, inputs, labels): + """ + Generate adversarial examples based on input data and target labels. + + Args: + inputs (numpy.ndarray): Benign input samples used as references to create + adversarial examples. + labels (numpy.ndarray): Original/target labels. + + Returns: + numpy.ndarray, generated adversarial examples. + + Examples: + >>> adv = attack.generate([[0.1, 0.2, 0.6], [0.3, 0, 0.4]], [2, 2]) + """ + LOGGER.debug(TAG, 'start to generate adv image.') + arr_x, arr_y = check_pair_numpy_param('inputs', inputs, 'labels', labels) + self._dtype = arr_x.dtype + adv_list = list() + for original_x, label_y in zip(arr_x, arr_y): + adv_list.append(self._optimize( + original_x, label_y, epsilon=self._eps)) + return np.array(adv_list) + + def _forward_one(self, cur_input): + """Forward one sample in model.""" + cur_input = np.expand_dims(cur_input, axis=0) + out_logits = self._network(Tensor(cur_input)).asnumpy() + return out_logits + + def _gradient(self, cur_input, labels, shape): + """ Return model gradient to minimize loss in l-bfgs-b.""" + label_dtype = labels.dtype + sens = Tensor(np.array([1], self._dtype)) + labels = np.expand_dims(labels, axis=0).astype(label_dtype) + # input shape should like original shape + reshape_input = np.expand_dims(cur_input.reshape(shape), + axis=0) + out_grad = self._grad_all(Tensor(reshape_input), Tensor(labels), sens) + if isinstance(out_grad, tuple): + out_grad = out_grad[0] + return out_grad.asnumpy() + + def _loss(self, cur_input, start_input, cur_eps, shape, labels): + """ + The l-bfgs-b loss used is Mean Square Error distance from original + input plus crossentropy loss. + """ + cur_input = cur_input.astype(self._dtype) + mse_distance = np.mean(np.square(start_input - cur_input)) / \ + ((self._box_max - self._box_min)**2) + logits = self._forward_one(cur_input.reshape(shape)).flatten() + logits = logits - np.max(logits) + if self._sparse: + target_class = labels + else: + target_class = np.argmax(labels) + if self._is_targeted: + crossentropy = np.log(np.sum(np.exp(logits))) - logits[target_class] + gradient = self._gradient(cur_input, labels, shape).flatten() + else: + crossentropy = logits[target_class] - np.log(np.sum(np.exp(logits))) + gradient = -self._gradient(cur_input, labels, shape).flatten() + + return (mse_distance + cur_eps*crossentropy).astype(self._dtype), \ + gradient.astype(np.float64) + + def _lbfgsb(self, start_input, cur_eps, shape, labels, bounds): + """ + A wrapper. + Method reference to `scipy.optimize.fmin_l_bfgs_b`_ + + .. _`scipy.optimize.fmin_l_bfgs_b`: https://docs.scipy.org/doc/scipy/ + reference/generated/scipy.optimize.fmin_l_bfgs_b.html + """ + approx_grad_eps = (self._box_max - self._box_min) / 100 + max_matrix_variable = 15 + cur_input, _, detail_info = so.fmin_l_bfgs_b( + self._loss, + start_input, + args=(start_input, cur_eps, shape, labels), + approx_grad=False, + bounds=bounds, + m=max_matrix_variable, + maxiter=self._nb_iter, + epsilon=approx_grad_eps) + + LOGGER.debug(TAG, str(detail_info)) + # LBFGS-B does not always exactly respect the boundaries + if np.amax(cur_input) > self._box_max or np.amin( + cur_input) < self._box_min: # pragma: no coverage + LOGGER.debug(TAG, + 'Input out of bounds (min, max = %s, %s).' + ' Performing manual clip.', + np.amin(cur_input), + np.amax(cur_input)) + cur_input = np.clip(cur_input, self._box_min, self._box_max) + cur_input = cur_input.astype(self._dtype) + cur_input = cur_input.reshape(shape) + adv_prediction = self._forward_one(cur_input) + + LOGGER.debug(TAG, 'input one sample label is :{}'.format(labels)) + if not self._sparse: + labels = np.argmax(labels) + if self._is_targeted: + return cur_input, np.argmax(adv_prediction) == labels + return cur_input, np.argmax(adv_prediction) != labels + + def _optimize(self, start_input, labels, epsilon): + """ + Given loss fuction and gradient, use l_bfgs_b algorithm to update input + sample. The epsilon will be doubled until an adversarial example is found. + + Args: + start_input (numpy.ndarray): Benign input samples used as references + to create adversarial examples. + labels (numpy.ndarray): Target labels. + epsilon: (float): Attack step size. + max_iter (int): Number of iteration. + """ + # store the shape for later and operate on the flattened input + ori_shape = start_input.shape + start_input = start_input.flatten().astype(self._dtype) + bounds = [self._bounds]*len(start_input) + + # finding initial cur_eps + iter_c = epsilon + for _ in range(self._search_iters): + iter_c = 2*iter_c + generate_x, is_adversarial = self._lbfgsb(start_input, + iter_c, + ori_shape, + labels, + bounds) + LOGGER.debug(TAG, 'Tested iter_c = %f', iter_c) + if is_adversarial: + LOGGER.debug(TAG, 'find adversarial successfully.') + return generate_x + LOGGER.debug(TAG, 'failed to not adversarial.') + return generate_x diff --git a/mindarmour/defenses/__init__.py b/mindarmour/defenses/__init__.py new file mode 100644 index 0000000..b9d59c8 --- /dev/null +++ b/mindarmour/defenses/__init__.py @@ -0,0 +1,15 @@ +""" +This module includes classical defense algorithms in defencing adversarial +examples and enhancing model security and trustworthy. +""" +from .adversarial_defense import AdversarialDefense +from .adversarial_defense import AdversarialDefenseWithAttacks +from .adversarial_defense import EnsembleAdversarialDefense +from .natural_adversarial_defense import NaturalAdversarialDefense +from .projected_adversarial_defense import ProjectedAdversarialDefense + +__all__ = ['AdversarialDefense', + 'AdversarialDefenseWithAttacks', + 'NaturalAdversarialDefense', + 'ProjectedAdversarialDefense', + 'EnsembleAdversarialDefense'] diff --git a/mindarmour/defenses/adversarial_defense.py b/mindarmour/defenses/adversarial_defense.py new file mode 100644 index 0000000..e4a7ba5 --- /dev/null +++ b/mindarmour/defenses/adversarial_defense.py @@ -0,0 +1,169 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Adversarial Defense. +""" +import numpy as np + +from mindspore import Tensor +from mindspore.nn import Cell +from mindspore.nn.optim.momentum import Momentum +from mindspore.nn import SoftmaxCrossEntropyWithLogits +from mindspore.nn import WithLossCell, TrainOneStepCell + +from mindarmour.utils._check_param import check_pair_numpy_param, check_model, \ + check_param_in_range, check_param_type, check_param_multi_types +from mindarmour.defenses.defense import Defense + + +class AdversarialDefense(Defense): + """ + Adversarial training using given adversarial examples. + + Args: + network (Cell): A MindSpore network to be defensed. + loss_fn (Functions): Loss function. Default: None. + optimizer (Cell): Optimizer used to train the network. Default: None. + + Examples: + >>> class Net(Cell): + >>> def __init__(self): + >>> super(Net, self).__init__() + >>> self._reshape = P.Reshape() + >>> self._full_con_1 = Dense(28*28, 120) + >>> self._full_con_2 = Dense(120, 84) + >>> self._full_con_3 = Dense(84, 10) + >>> self._relu = ReLU() + >>> + >>> def construct(self, x): + >>> out = self._reshape(x, (-1, 28*28)) + >>> out = self._full_con_1(out) + >>> out = self.relu(out) + >>> out = self._full_con_2(out) + >>> out = self.relu(out) + >>> out = self._full_con_3(out) + >>> return out + >>> + >>> net = Net() + >>> lr = 0.0001 + >>> momentum = 0.9 + >>> loss_fn = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + >>> optimizer = Momentum(net.trainable_params(), lr, momentum) + >>> adv_defense = AdversarialDefense(net, loss_fn, optimizer) + >>> inputs = np.random.rand(32, 1, 28, 28).astype(np.float32) + >>> labels = np.random.randint(0, 10).astype(np.int32) + >>> adv_defense.defense(inputs, labels) + """ + + def __init__(self, network, loss_fn=None, optimizer=None): + super(AdversarialDefense, self).__init__(network) + network = check_model('network', network, Cell) + if loss_fn is None: + loss_fn = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + + if optimizer is None: + optimizer = Momentum( + params=network.trainable_params(), + learning_rate=0.01, + momentum=0.9) + + loss_net = WithLossCell(network, loss_fn) + self._train_net = TrainOneStepCell(loss_net, optimizer) + self._train_net.set_train() + + def defense(self, inputs, labels): + """ + Enhance model via training with input samples. + + Args: + inputs (numpy.ndarray): Input samples. + labels (numpy.ndarray): Labels of input samples. + + Returns: + numpy.ndarray, loss of defense operation. + """ + inputs, labels = check_pair_numpy_param('inputs', inputs, 'labels', + labels) + loss = self._train_net(Tensor(inputs), Tensor(labels)) + return loss.asnumpy() + + +class AdversarialDefenseWithAttacks(AdversarialDefense): + """ + Adversarial defense with attacks. + + Args: + network (Cell): A MindSpore network to be defensed. + attacks (list[Attack]): List of attack method. + loss_fn (Functions): Loss function. Default: None. + optimizer (Cell): Optimizer used to train the network. Default: None. + bounds (tuple): Upper and lower bounds of data. In form of (clip_min, + clip_max). Default: (0.0, 1.0). + replace_ratio (float): Ratio of replacing original samples with + adversarial, which must be between 0 and 1. Default: 0.5. + + Raises: + ValueError: If replace_ratio is not between 0 and 1. + + Examples: + >>> net = Net() + >>> fgsm = FastGradientSignMethod(net) + >>> pgd = ProjectedGradientDescent(net) + >>> ead = AdversarialDefenseWithAttacks(net, [fgsm, pgd]) + >>> ead.defense(inputs, labels) + """ + + def __init__(self, network, attacks, loss_fn=None, optimizer=None, + bounds=(0.0, 1.0), replace_ratio=0.5): + super(AdversarialDefenseWithAttacks, self).__init__(network, + loss_fn, + optimizer) + self._attacks = check_param_type('attacks', attacks, list) + self._bounds = check_param_multi_types('bounds', bounds, [tuple, list]) + for elem in self._bounds: + _ = check_param_multi_types('bound', elem, [int, float]) + self._replace_ratio = check_param_in_range('replace_ratio', + replace_ratio, + 0, 1) + + def defense(self, inputs, labels): + """ + Enhance model via training with adversarial examples generated from input samples. + + Args: + inputs (numpy.ndarray): Input samples. + labels (numpy.ndarray): Labels of input samples. + + Returns: + numpy.ndarray, loss of adversarial defense operation. + """ + inputs, labels = check_pair_numpy_param('inputs', inputs, 'labels', + labels) + + x_len = inputs.shape[0] + n_adv = int(np.ceil(self._replace_ratio*x_len)) + n_adv_per_attack = int(n_adv / len(self._attacks)) + + adv_ids = np.random.choice(x_len, size=n_adv, replace=False) + start = 0 + for attack in self._attacks: + idx = adv_ids[start:start + n_adv_per_attack] + inputs[idx] = attack.generate(inputs[idx], labels[idx]) + start += n_adv_per_attack + + loss = self._train_net(Tensor(inputs), Tensor(labels)) + return loss.asnumpy() + + +EnsembleAdversarialDefense = AdversarialDefenseWithAttacks diff --git a/mindarmour/defenses/defense.py b/mindarmour/defenses/defense.py new file mode 100644 index 0000000..938b115 --- /dev/null +++ b/mindarmour/defenses/defense.py @@ -0,0 +1,86 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Base Class of Defense. +""" +from abc import abstractmethod + +from mindarmour.utils.logger import LogUtil +from mindarmour.utils._check_param import check_pair_numpy_param, \ + check_int_positive + +LOGGER = LogUtil.get_instance() +TAG = 'Defense' + + +class Defense: + """ + The abstract base class for all defense classes defending adversarial + examples. + + Args: + network (Cell): A MindSpore-style deep learning model to be defensed. + """ + + def __init__(self, network): + self._network = network + + @abstractmethod + def defense(self, inputs, labels): + """ + Defense model with samples. + + Args: + inputs (numpy.ndarray): Samples based on which adversarial + examples are generated. + labels (numpy.ndarray): Labels of input samples. + + Raises: + NotImplementedError: It is an abstract method. + """ + msg = 'The function defense() is an abstract function in class ' \ + '`Defense` and should be implemented in child class.' + LOGGER.error(TAG, msg) + raise NotImplementedError(msg) + + def batch_defense(self, inputs, labels, batch_size=32, epochs=5): + """ + Defense model with samples in batch. + + Args: + inputs (numpy.ndarray): Samples based on which adversarial + examples are generated. + labels (numpy.ndarray): Labels of input samples. + batch_size (int): Number of samples in one batch. + epochs (int): Number of epochs. + + Returns: + numpy.ndarray, loss of batch_defense operation. + + Raises: + ValueError: If batch_size is 0. + """ + inputs, labels = check_pair_numpy_param('inputs', inputs, 'labels', + labels) + x_len = len(inputs) + batch_size = check_int_positive('batch_size', batch_size) + + iters_per_epoch = int(x_len / batch_size) + loss = None + for _ in range(epochs): + for step in range(iters_per_epoch): + x_batch = inputs[step*batch_size:(step + 1)*batch_size] + y_batch = labels[step*batch_size:(step + 1)*batch_size] + loss = self.defense(x_batch, y_batch) + return loss diff --git a/mindarmour/defenses/natural_adversarial_defense.py b/mindarmour/defenses/natural_adversarial_defense.py new file mode 100644 index 0000000..f2c1132 --- /dev/null +++ b/mindarmour/defenses/natural_adversarial_defense.py @@ -0,0 +1,56 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Natural Adversarial Defense. +""" +from mindarmour.defenses.adversarial_defense import \ + AdversarialDefenseWithAttacks +from mindarmour.attacks.gradient_method import FastGradientSignMethod + + +class NaturalAdversarialDefense(AdversarialDefenseWithAttacks): + """ + Adversarial training based on FGSM. + + Reference: `A. Kurakin, et al., "Adversarial machine learning at scale," in + ICLR, 2017. `_ + + Args: + network (Cell): A MindSpore network to be defensed. + loss_fn (Functions): Loss function. Default: None. + optimizer (Cell): Optimizer used to train the network. Default: None. + bounds (tuple): Upper and lower bounds of data. In form of (clip_min, + clip_max). Default: (0.0, 1.0). + replace_ratio (float): Ratio of replacing original samples with + adversarial samples. Default: 0.5. + eps (float): Step size of the attack method(FGSM). Default: 0.1. + + Examples: + >>> net = Net() + >>> adv_defense = NaturalAdversarialDefense(net) + >>> adv_defense.defense(inputs, labels) + """ + def __init__(self, network, loss_fn=None, optimizer=None, + bounds=(0.0, 1.0), replace_ratio=0.5, eps=0.1): + attack = FastGradientSignMethod(network, + eps=eps, + alpha=None, + bounds=bounds) + super(NaturalAdversarialDefense, self).__init__( + network, + [attack], + loss_fn=loss_fn, + optimizer=optimizer, + bounds=bounds, + replace_ratio=replace_ratio) diff --git a/mindarmour/defenses/projected_adversarial_defense.py b/mindarmour/defenses/projected_adversarial_defense.py new file mode 100644 index 0000000..e39c0e6 --- /dev/null +++ b/mindarmour/defenses/projected_adversarial_defense.py @@ -0,0 +1,69 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Projected Adversarial Defense. +""" +from mindarmour.defenses.adversarial_defense import \ + AdversarialDefenseWithAttacks +from mindarmour.attacks.iterative_gradient_method import \ + ProjectedGradientDescent + + +class ProjectedAdversarialDefense(AdversarialDefenseWithAttacks): + """ + Adversarial training based on PGD. + + Reference: `A. Madry, et al., "Towards deep learning models resistant to + adversarial attacks," in ICLR, 2018. `_ + + Args: + network (Cell): A MindSpore network to be defensed. + loss_fn (Functions): Loss function. Default: None. + optimizer (Cell): Optimizer used to train the nerwork. Default: None. + bounds (tuple): Upper and lower bounds of input data. In form of + (clip_min, clip_max). Default: (0.0, 1.0). + replace_ratio (float): Ratio of replacing original samples with + adversarial samples. Default: 0.5. + eps (float): PGD attack parameters, epsilon. Default: 0.3. + eps_iter (int): PGD attack parameters, inner loop epsilon. + Default:0.1. + nb_iter (int): PGD attack parameters, number of iteration. + Default: 5. + norm_level (str): Norm type. 'inf' or 'l2'. Default: 'inf'. + + Examples: + >>> net = Net() + >>> adv_defense = ProjectedAdversarialDefense(net) + >>> adv_defense.defense(inputs, labels) + """ + def __init__(self, + network, + loss_fn=None, + optimizer=None, + bounds=(0.0, 1.0), + replace_ratio=0.5, + eps=0.3, + eps_iter=0.1, + nb_iter=5, + norm_level='inf'): + attack = ProjectedGradientDescent(network, + eps=eps, + eps_iter=eps_iter, + nb_iter=nb_iter, + bounds=bounds, + norm_level=norm_level, + loss_fn=loss_fn) + super(ProjectedAdversarialDefense, self).__init__( + network, [attack], loss_fn=loss_fn, optimizer=optimizer, + bounds=bounds, replace_ratio=replace_ratio) diff --git a/mindarmour/detectors/__init__.py b/mindarmour/detectors/__init__.py new file mode 100644 index 0000000..86b8631 --- /dev/null +++ b/mindarmour/detectors/__init__.py @@ -0,0 +1,18 @@ +""" +This module includes detector methods on distinguishing adversarial examples +from benign examples. +""" +from .mag_net import ErrorBasedDetector +from .mag_net import DivergenceBasedDetector +from .ensemble_detector import EnsembleDetector +from .region_based_detector import RegionBasedDetector +from .spatial_smoothing import SpatialSmoothing +from . import black +from .black.similarity_detector import SimilarityDetector + +__all__ = ['ErrorBasedDetector', + 'DivergenceBasedDetector', + 'RegionBasedDetector', + 'SpatialSmoothing', + 'EnsembleDetector', + 'SimilarityDetector'] diff --git a/mindarmour/detectors/black/__init__.py b/mindarmour/detectors/black/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mindarmour/detectors/black/similarity_detector.py b/mindarmour/detectors/black/similarity_detector.py new file mode 100644 index 0000000..49a1bfb --- /dev/null +++ b/mindarmour/detectors/black/similarity_detector.py @@ -0,0 +1,284 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Similarity Detector. +""" +import itertools +import numpy as np + +from mindspore import Tensor +from mindspore import Model + +from mindarmour.detectors.detector import Detector +from mindarmour.utils.logger import LogUtil +from mindarmour.utils._check_param import check_model, check_numpy_param, \ + check_int_positive, check_value_positive, check_param_type, \ + check_param_in_range + +LOGGER = LogUtil.get_instance() +TAG = 'SimilarityDetector' + + +def _pairwise_distances(x_input, y_input): + """ + Compute the Euclidean Distance matrix from a vector array x_input and + y_input. + + Args: + x_input (numpy.ndarray): input data, [n_samples_x, n_features] + y_input (numpy.ndarray): input data, [n_samples_y, n_features] + + Returns: + numpy.ndarray, distance matrix, [n_samples_a, n_samples_b] + """ + out = np.empty((x_input.shape[0], y_input.shape[0]), dtype='float') + iterator = itertools.product( + range(x_input.shape[0]), range(y_input.shape[0])) + for i, j in iterator: + out[i, j] = np.linalg.norm(x_input[i] - y_input[j]) + return out + + +class SimilarityDetector(Detector): + """ + The detector measures similarity among adjacent queries and rejects queries + which are remarkably similar to previous queries. + + Reference: `Stateful Detection of Black-Box Adversarial Attacks by Steven + Chen, Nicholas Carlini, and David Wagner. at arxiv 2019 + `_ + + Args: + trans_model (Model): A MindSpore model to encode input data into lower + dimension vector. + max_k_neighbor (int): The maximum number of the nearest neighbors. + Default: 1000. + chunk_size (int): Buffer size. Default: 1000. + max_buffer_size (int): Maximum buffer size. Default: 10000. + tuning (bool): Calculate the average distance for the nearest k + neighbours, if tuning is true, k=K. If False k=1,...,K. + Default: False. + fpr (float): False positive ratio on legitimate query sequences. + Default: 0.001 + + Examples: + >>> detector = SimilarityDetector(model) + >>> detector.fit(Tensor(ori), Tensor(labels)) + >>> adv_ids = detector.detect(Tensor(adv)) + """ + + def __init__(self, trans_model, max_k_neighbor=1000, chunk_size=1000, + max_buffer_size=10000, tuning=False, fpr=0.001): + super(SimilarityDetector, self).__init__() + self._max_k_neighbor = check_int_positive('max_k_neighbor', + max_k_neighbor) + self._trans_model = check_model('trans_model', trans_model, Model) + self._tuning = check_param_type('tuning', tuning, bool) + self._chunk_size = check_int_positive('chunk_size', chunk_size) + self._max_buffer_size = check_int_positive('max_buffer_size', + max_buffer_size) + self._fpr = check_param_in_range('fpr', fpr, 0, 1) + self._num_of_neighbors = None + self._threshold = None + self._num_queries = 0 + # Stores recently processed queries + self._buffer = [] + # Tracks indexes of detected queries + self._detected_queries = [] + + def fit(self, inputs, labels=None): + """ + Process input training data to calculate the threshold. + A proper threshold should make sure the false positive + rate is under a given value. + + Args: + inputs (numpy.ndarray): Training data to calculate the threshold. + labels (numpy.ndarray): Labels of training data. + + Returns: + - list[int], number of the nearest neighbors. + + - list[float], calculated thresholds for different K. + + Raises: + ValueError: The number of training data is less than + max_k_neighbor! + """ + data = check_numpy_param('inputs', inputs) + data_len = data.shape[0] + if data_len < self._max_k_neighbor: + raise ValueError('The number of training data must be larger than ' + 'max_k_neighbor!') + data = self._trans_model.predict(Tensor(data)).asnumpy() + data = data.reshape((data.shape[0], -1)) + distances = [] + for i in range(data.shape[0] // self._chunk_size): + distance_mat = _pairwise_distances( + x_input=data[i*self._chunk_size:(i + 1)*self._chunk_size, :], + y_input=data) + distance_mat = np.sort(distance_mat, axis=-1) + distances.append(distance_mat[:, :self._max_k_neighbor]) + # the rest + distance_mat = _pairwise_distances(x_input=data[(data.shape[0] // + self._chunk_size)* + self._chunk_size:, :], + y_input=data) + distance_mat = np.sort(distance_mat, axis=-1) + distances.append(distance_mat[:, :self._max_k_neighbor]) + + distance_matrix = np.concatenate(distances, axis=0) + + start = 1 if self._tuning else self._max_k_neighbor + + thresholds = [] + num_nearest_neighbors = [] + for k in range(start, self._max_k_neighbor + 1): + avg_dist = distance_matrix[:, :k].mean(axis=-1) + index = int(len(avg_dist)*self._fpr) + threshold = np.sort(avg_dist, axis=None)[index] + num_nearest_neighbors.append(k) + thresholds.append(threshold) + if thresholds: + self._threshold = thresholds[-1] + self._num_of_neighbors = num_nearest_neighbors[-1] + return num_nearest_neighbors, thresholds + + def detect(self, inputs): + """ + Process queries to detect black-box attack. + + Args: + inputs (numpy.ndarray): Query sequence. + + Raises: + ValueError: The parameters of threshold or num_of_neighbors is + not available. + """ + if self._threshold is None or self._num_of_neighbors is None: + msg = 'Explicit detection threshold and number of nearest ' \ + 'neighbors must be provided using set_threshold(), ' \ + 'or call fit() to calculate.' + LOGGER.error(TAG, msg) + raise ValueError(msg) + queries = check_numpy_param('inputs', inputs) + queries = self._trans_model.predict(Tensor(queries)).asnumpy() + queries = queries.reshape((queries.shape[0], -1)) + for query in queries: + self._process_query(query) + + def _process_query(self, query): + """ + Process each query to detect black-box attack. + + Args: + query (numpy.ndarray): Query input. + """ + if len(self._buffer) < self._num_of_neighbors: + self._buffer.append(query) + self._num_queries += 1 + return + k = self._num_of_neighbors + + if self._buffer: + queries = np.stack(self._buffer, axis=0) + dists = np.linalg.norm(queries - query, axis=-1) + + k_nearest_dists = np.partition(dists, k - 1)[:k, None] + k_avg_dist = np.mean(k_nearest_dists) + + self._buffer.append(query) + self._num_queries += 1 + + if len(self._buffer) >= self._max_buffer_size: + self.clear_buffer() + + # an attack is detected + if k_avg_dist < self._threshold: + self._detected_queries.append(self._num_queries) + self.clear_buffer() + + def clear_buffer(self): + """ + Clear the buffer memory. + + """ + while self._buffer: + self._buffer.pop() + + def set_threshold(self, num_of_neighbors, threshold): + """ + Set the parameters num_of_neighbors and threshold. + + Args: + num_of_neighbors (int): Number of the nearest neighbors. + threshold (float): Detection threshold. Default: None. + """ + self._num_of_neighbors = check_int_positive('num_of_neighbors', + num_of_neighbors) + self._threshold = check_value_positive('threshold', threshold) + + def get_detection_interval(self): + """ + Get the interval between adjacent detections. + + Returns: + list[int], number of queries between adjacent detections. + """ + detected_queries = self._detected_queries + interval = [] + for i in range(len(detected_queries) - 1): + interval.append(detected_queries[i + 1] - detected_queries[i]) + return interval + + def get_detected_queries(self): + """ + Get the indexes of detected queries. + + Returns: + list[int], sequence number of detected malicious queries. + """ + detected_queries = self._detected_queries + return detected_queries + + def detect_diff(self, inputs): + """ + Detect adversarial samples from input samples, like the predict_proba + function in common machine learning model. + + Args: + inputs (Union[numpy.ndarray, list, tuple]): Data been used as + references to create adversarial examples. + + Raises: + NotImplementedError: This function is not available + in class `SimilarityDetector`. + """ + msg = 'The function detect_diff() is not available in the class ' \ + '`SimilarityDetector`.' + LOGGER.error(TAG, msg) + raise NotImplementedError(msg) + + def transform(self, inputs): + """ + Filter adversarial noises in input samples. + + Raises: + NotImplementedError: This function is not available + in class `SimilarityDetector`. + """ + msg = 'The function transform() is not available in the class ' \ + '`SimilarityDetector`.' + LOGGER.error(TAG, msg) + raise NotImplementedError(msg) diff --git a/mindarmour/detectors/detector.py b/mindarmour/detectors/detector.py new file mode 100644 index 0000000..2a75c56 --- /dev/null +++ b/mindarmour/detectors/detector.py @@ -0,0 +1,101 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Base Class of Detector. +""" +from abc import abstractmethod + +from mindarmour.utils.logger import LogUtil + +LOGGER = LogUtil.get_instance() +TAG = 'Detector' + + +class Detector: + """ + The abstract base class for all adversarial example detectors. + """ + def __init__(self): + pass + + + @abstractmethod + def fit(self, inputs, labels=None): + """ + Fit a threshold and refuse adversarial examples whose difference from + their denoised versions are larger than the threshold. The threshold is + determined by a certain false positive rate when applying to normal samples. + + Args: + inputs (numpy.ndarray): The input samples to calculate the threshold. + labels (numpy.ndarray): Labels of training data. + + Raises: + NotImplementedError: It is an abstract method. + """ + msg = 'The function fit() is an abstract function in class ' \ + '`Detector` and should be implemented in child class.' + LOGGER.error(TAG, msg) + raise NotImplementedError(msg) + + @abstractmethod + def detect(self, inputs): + """ + Detect adversarial examples from input samples. + + Args: + inputs (Union[numpy.ndarray, list, tuple]): The input samples to be + detected. + + Raises: + NotImplementedError: It is an abstract method. + """ + msg = 'The function detect() is an abstract function in class ' \ + '`Detector` and should be implemented in child class.' + LOGGER.error(TAG, msg) + raise NotImplementedError(msg) + + @abstractmethod + def detect_diff(self, inputs): + """ + Calculate the difference between the input samples and de-noised samples. + + Args: + inputs (Union[numpy.ndarray, list, tuple]): The input samples to be + detected. + + Raises: + NotImplementedError: It is an abstract method. + + """ + msg = 'The function detect_diff() is an abstract function in class ' \ + '`Detector` and should be implemented in child class.' + LOGGER.error(TAG, msg) + raise NotImplementedError(msg) + + @abstractmethod + def transform(self, inputs): + """ + Filter adversarial noises in input samples. + + Args: + inputs (Union[numpy.ndarray, list, tuple]): The input samples to be + transformed. + Raises: + NotImplementedError: It is an abstract method. + """ + msg = 'The function transform() is an abstract function in class ' \ + '`Detector` and should be implemented in child class.' + LOGGER.error(TAG, msg) + raise NotImplementedError(msg) diff --git a/mindarmour/detectors/ensemble_detector.py b/mindarmour/detectors/ensemble_detector.py new file mode 100644 index 0000000..f1e8ba8 --- /dev/null +++ b/mindarmour/detectors/ensemble_detector.py @@ -0,0 +1,126 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Ensemble Detector. +""" +import numpy as np + +from mindarmour.detectors.detector import Detector +from mindarmour.utils.logger import LogUtil +from mindarmour.utils._check_param import check_numpy_param, \ + check_param_multi_types + + +LOGGER = LogUtil.get_instance() +TAG = 'EnsembleDetector' + + +class EnsembleDetector(Detector): + """ + Ensemble detector. + + Args: + detectors (Union[tuple, list]): List of detector methods. + policy (str): Decision policy, could be 'vote', 'all' or 'any'. + Default: 'vote' + """ + + def __init__(self, detectors, policy="vote"): + super(EnsembleDetector, self).__init__() + self._detectors = check_param_multi_types('detectors', detectors, + [list, tuple]) + self._num_detectors = len(detectors) + self._policy = policy + + def fit(self, inputs, labels=None): + """ + Fit detector like a machine learning model. This method is not available + in this class. + + Args: + inputs (numpy.ndarray): Data to calculate the threshold. + labels (numpy.ndarray): Labels of data. + + Raises: + NotImplementedError: This function is not available in ensemble. + """ + msg = 'The function fit() is not available in the class ' \ + '`EnsembleDetector`.' + LOGGER.error(TAG, msg) + raise NotImplementedError(msg) + + def detect(self, inputs): + """ + Detect adversarial examples from input samples. + + Args: + inputs (numpy.ndarray): Input samples. + + Returns: + list[int], whether a sample is adversarial. if res[i]=1, then the + input sample with index i is adversarial. + + Raises: + ValueError: If policy is not supported. + """ + + inputs = check_numpy_param('inputs', inputs) + x_len = inputs.shape[0] + counts = np.zeros(x_len) + res = np.zeros(x_len, dtype=np.int) + for detector in list(self._detectors): + idx = detector.detect(inputs) + counts[idx] += 1 + + if self._policy == "vote": + idx_adv = np.argwhere(counts > self._num_detectors / 2) + elif self._policy == "all": + idx_adv = np.argwhere(counts == self._num_detectors) + elif self._policy == "any": + idx_adv = np.argwhere(counts > 0) + else: + msg = 'Policy {} is not supported.'.format(self._policy) + LOGGER.error(TAG, msg) + raise ValueError(msg) + res[idx_adv] = 1 + return list(res) + + def detect_diff(self, inputs): + """ + This method is not available in this class. + + Args: + inputs (Union[numpy.ndarray, list, tuple]): Data been used as + references to create adversarial examples. + + Raises: + NotImplementedError: This function is not available in ensemble. + """ + msg = 'The function detect_diff() is not available in the class ' \ + '`EnsembleDetector`.' + LOGGER.error(TAG, msg) + raise NotImplementedError(msg) + + def transform(self, inputs): + """ + Filter adversarial noises in input samples. + This method is not available in this class. + + Raises: + NotImplementedError: This function is not available in ensemble. + """ + msg = 'The function transform() is not available in the class ' \ + '`EnsembleDetector`.' + LOGGER.error(TAG, msg) + raise NotImplementedError(msg) diff --git a/mindarmour/detectors/mag_net.py b/mindarmour/detectors/mag_net.py new file mode 100644 index 0000000..27abec6 --- /dev/null +++ b/mindarmour/detectors/mag_net.py @@ -0,0 +1,228 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Error-Based detector. +""" +import numpy as np +from scipy import stats +from scipy.special import softmax + +from mindspore import Tensor +from mindspore import Model + +from mindarmour.detectors.detector import Detector +from mindarmour.utils.logger import LogUtil +from mindarmour.utils._check_param import check_numpy_param, check_model, \ + check_param_in_range, check_param_multi_types, check_int_positive, \ + check_value_positive + +LOGGER = LogUtil.get_instance() +TAG = 'MagNet' + + +class ErrorBasedDetector(Detector): + """ + The detector reconstructs input samples, measures reconstruction errors and + rejects samples with large reconstruction errors. + + Reference: `MagNet: a Two-Pronged Defense against Adversarial Examples, + by Dongyu Meng and Hao Chen, at CCS 2017. + `_ + + Args: + auto_encoder (Model): An (trained) auto encoder which + represents the input by reduced encoding. + false_positive_rate (float): Detector's false positive rate. + Default: 0.01. + bounds (tuple): (clip_min, clip_max). Default: (0.0, 1.0). + + """ + + def __init__(self, auto_encoder, false_positive_rate=0.01, + bounds=(0.0, 1.0)): + super(ErrorBasedDetector, self).__init__() + self._auto_encoder = check_model('auto_encoder', auto_encoder, Model) + self._false_positive_rate = check_param_in_range('false_positive_rate', + false_positive_rate, + 0, 1) + self._threshold = 0.0 + self._bounds = check_param_multi_types('bounds', bounds, [list, tuple]) + for b in self._bounds: + _ = check_param_multi_types('bound', b, [int, float]) + + def fit(self, inputs, labels=None): + """ + Find a threshold for a given dataset to distinguish adversarial examples. + + Args: + inputs (numpy.ndarray): Input samples. + labels (numpy.ndarray): Labels of input samples. Default: None. + + Returns: + float, threshold to distinguish adversarial samples from benign ones. + """ + inputs = check_numpy_param('inputs', inputs) + + marks = self.detect_diff(inputs) + num = int(inputs.shape[0]*self._false_positive_rate) + marks = np.sort(marks) + if num <= len(marks): + self._threshold = marks[-num] + return self._threshold + + def detect(self, inputs): + """ + Detect if input samples are adversarial or not. + + Args: + inputs (numpy.ndarray): Suspicious samples to be judged. + + Returns: + list[int], whether a sample is adversarial. if res[i]=1, then the + input sample with index i is adversarial. + """ + inputs = check_numpy_param('inputs', inputs) + dist = self.detect_diff(inputs) + res = [0]*len(dist) + for i, elem in enumerate(dist): + if elem > self._threshold: + res[i] = 1 + return res + + def detect_diff(self, inputs): + """ + Detect the distance between the original samples and reconstructed samples. + + Args: + inputs (numpy.ndarray): Input samples. + + Returns: + float, the distance between reconstructed and original samples. + """ + inputs = check_numpy_param('inputs', inputs) + x_trans = self._auto_encoder.predict(Tensor(inputs)) + diff = np.abs(inputs - x_trans.asnumpy()) + dims = tuple(np.arange(len(inputs.shape))[1:]) + marks = np.mean(np.power(diff, 2), axis=dims) + return marks + + def transform(self, inputs): + """ + Reconstruct input samples. + + Args: + inputs (numpy.ndarray): Input samples. + + Returns: + numpy.ndarray, reconstructed images. + """ + inputs = check_numpy_param('inputs', inputs) + x_trans = self._auto_encoder.predict(Tensor(inputs)) + if self._bounds is not None: + clip_min, clip_max = self._bounds + x_trans = np.clip(x_trans.asnumpy(), clip_min, clip_max) + return x_trans + + def set_threshold(self, threshold): + """ + Set the parameters threshold. + + Args: + threshold (float): Detection threshold. Default: None. + """ + self._threshold = check_value_positive('threshold', threshold) + + +class DivergenceBasedDetector(ErrorBasedDetector): + """ + This class implement a divergence-based detector. + + Reference: `MagNet: a Two-Pronged Defense against Adversarial Examples, + by Dongyu Meng and Hao Chen, at CCS 2017. + `_ + + Args: + auto_encoder (Model): Encoder model. + model (Model): Targeted model. + option (str): Method used to calculate Divergence. Default: "jsd". + t (int): Temperature used to overcome numerical problem. Default: 1. + bounds (tuple): Upper and lower bounds of data. + In form of (clip_min, clip_max). Default: (0.0, 1.0). + """ + + def __init__(self, auto_encoder, model, option="jsd", + t=1, bounds=(0.0, 1.0)): + super(DivergenceBasedDetector, self).__init__(auto_encoder, + bounds=bounds) + self._auto_encoder = auto_encoder + self._model = check_model('targeted model', model, Model) + self._threshold = 0.0 + self._option = option + self._t = check_int_positive('t', t) + self._bounds = check_param_multi_types('bounds', bounds, [tuple, list]) + for b in self._bounds: + _ = check_param_multi_types('bound', b, [int, float]) + + def detect_diff(self, inputs): + """ + Detect the distance between original samples and reconstructed samples. + + The distance is calculated by JSD. + + Args: + inputs (numpy.ndarray): Input samples. + + Returns: + float, the distance. + + Raises: + NotImplementedError: If the param `option` is not supported. + """ + inputs = check_numpy_param('inputs', inputs) + x_len = inputs.shape[0] + x_transformed = self._auto_encoder.predict(Tensor(inputs)) + x_origin = self._model.predict(Tensor(inputs)) + x_trans = self._model.predict(x_transformed) + + y_pred = softmax(x_origin.asnumpy() / self._t, axis=1) + y_trans_pred = softmax(x_trans.asnumpy() / self._t, axis=1) + + if self._option == 'jsd': + marks = [_jsd(y_pred[i], y_trans_pred[i]) for i in range(x_len)] + else: + msg = '{} is not implemented.'.format(self._option) + LOGGER.error(TAG, msg) + raise NotImplementedError(msg) + return np.array(marks) + + +def _jsd(prob_dist_p, prob_dist_q): + """ + Compute the Jensen-Shannon Divergence between two probability distributions + with equal weights. + + Args: + prob_dist_p (numpy.ndarray): Probability distribution p. + prob_dist_q (numpy.ndarray): Probability distribution q. + + Returns: + float, the Jensen-Shannon Divergence. + """ + prob_dist_p = check_numpy_param('prob_dist_p', prob_dist_p) + prob_dist_q = check_numpy_param('prob_dist_q', prob_dist_q) + norm_dist_p = prob_dist_p / (np.linalg.norm(prob_dist_p, ord=1) + 1e-12) + norm_dist_q = prob_dist_q / (np.linalg.norm(prob_dist_q, ord=1) + 1e-12) + norm_mean = 0.5*(norm_dist_p + norm_dist_q) + return 0.5*(stats.entropy(norm_dist_p, norm_mean) + + stats.entropy(norm_dist_q, norm_mean)) diff --git a/mindarmour/detectors/region_based_detector.py b/mindarmour/detectors/region_based_detector.py new file mode 100644 index 0000000..24c6f27 --- /dev/null +++ b/mindarmour/detectors/region_based_detector.py @@ -0,0 +1,235 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Region-Based detector +""" +import time + +import numpy as np + +from mindspore import Model +from mindspore import Tensor + +from mindarmour.detectors.detector import Detector +from mindarmour.utils.logger import LogUtil +from mindarmour.utils._check_param import check_numpy_param, check_param_type, \ + check_pair_numpy_param, check_model, check_int_positive, \ + check_value_positive, check_value_non_negative, check_param_in_range, \ + check_equal_shape + +LOGGER = LogUtil.get_instance() +TAG = 'RegionBasedDetector' + + +class RegionBasedDetector(Detector): + """ + This class implement a region-based detector. + + Reference: `Mitigating evasion attacks to deep neural networks via + region-based classification `_ + + Args: + model (Model): Target model. + number_points (int): The number of samples generate from the + hyper cube of original sample. Default: 10. + initial_radius (float): Initial radius of hyper cube. Default: 0.0. + max_radius (float): Maximum radius of hyper cube. Default: 1.0. + search_step (float): Incremental during search of radius. Default: 0.01. + degrade_limit (float): Acceptable decrease of classification accuracy. + Default: 0.0. + sparse (bool): If True, input labels are sparse-encoded. If False, + input labels are one-hot-encoded. Default: False. + + Examples: + >>> detector = RegionBasedDetector(model) + >>> detector.fit(Tensor(ori), Tensor(labels)) + >>> adv_ids = detector.detect(Tensor(adv)) + """ + + def __init__(self, model, number_points=10, initial_radius=0.0, + max_radius=1.0, search_step=0.01, degrade_limit=0.0, + sparse=False): + super(RegionBasedDetector, self).__init__() + self._model = check_model('targeted model', model, Model) + self._number_points = check_int_positive('number_points', number_points) + self._initial_radius = check_value_non_negative('initial_radius', + initial_radius) + self._max_radius = check_value_positive('max_radius', max_radius) + self._search_step = check_value_positive('search_step', search_step) + self._degrade_limit = check_value_non_negative('degrade_limit', + degrade_limit) + self._sparse = check_param_type('sparse', sparse, bool) + self._radius = None + + def set_radius(self, radius): + """Set radius.""" + + self._radius = check_param_in_range('radius', radius, + self._initial_radius, + self._max_radius) + + def fit(self, inputs, labels=None): + """ + Train detector to decide the best radius. + + Args: + inputs (numpy.ndarray): Benign samples. + labels (numpy.ndarray): Ground truth labels of the input samples. + Default:None. + + Returns: + float, the best radius. + """ + inputs, labels = check_pair_numpy_param('inputs', inputs, + 'labels', labels) + LOGGER.debug(TAG, 'enter fit() function.') + time_start = time.time() + search_iters = (self._max_radius + - self._initial_radius) / self._search_step + search_iters = np.round(search_iters).astype(int) + radius = self._initial_radius + pred = self._model.predict(Tensor(inputs)) + raw_preds = np.argmax(pred.asnumpy(), axis=1) + if not self._sparse: + labels = np.argmax(labels, axis=1) + raw_preds, labels = check_equal_shape('raw_preds', raw_preds, 'labels', + labels) + raw_acc = np.sum(raw_preds == labels) / inputs.shape[0] + + for _ in range(search_iters): + rc_preds = self._rc_forward(inputs, radius) + rc_preds, labels = check_equal_shape('rc_preds', rc_preds, 'labels', + labels) + def_acc = np.sum(rc_preds == labels) / inputs.shape[0] + if def_acc >= raw_acc - self._degrade_limit: + radius += self._search_step + continue + break + + self._radius = radius - self._search_step + LOGGER.debug(TAG, 'best radius is: %s', self._radius) + LOGGER.debug(TAG, + 'time used to train detector of %d samples is: %s seconds', + inputs.shape[0], + time.time() - time_start) + return self._radius + + def _generate_hyper_cube(self, inputs, radius): + """ + Generate random samples in the hyper cubes around input samples. + + Args: + inputs (numpy.ndarray): Input samples. + radius (float): The scope to generate hyper cubes around input samples. + + Returns: + numpy.ndarray, randomly chosen samples in the hyper cubes. + """ + LOGGER.debug(TAG, 'enter _generate_hyper_cube().') + res = [] + for _ in range(self._number_points): + res.append(np.clip((inputs + np.random.uniform( + -radius, radius, len(inputs))), 0.0, 1.0).astype(inputs.dtype)) + return np.asarray(res) + + def _rc_forward(self, inputs, radius): + """ + Generate region-based predictions for input samples. + + Args: + inputs (numpy.ndarray): Input samples. + radius (float): The scope to generate hyper cubes around input samples. + + Returns: + numpy.ndarray, classification result for input samples. + """ + LOGGER.debug(TAG, 'enter _rc_forward().') + res = [] + for _, elem in enumerate(inputs): + hyper_cube_x = self._generate_hyper_cube(elem, radius) + hyper_cube_preds = [] + for ite_hyper_cube_x in hyper_cube_x: + model_inputs = Tensor(np.expand_dims(ite_hyper_cube_x, axis=0)) + ite_preds = self._model.predict(model_inputs).asnumpy()[0] + hyper_cube_preds.append(ite_preds) + pred_labels = np.argmax(hyper_cube_preds, axis=1) + bin_count = np.bincount(pred_labels) + # count the number of different class and choose the max one + # as final class + hyper_cube_tag = np.argmax(bin_count, axis=0) + res.append(hyper_cube_tag) + return np.asarray(res) + + def detect(self, inputs): + """ + Tell whether input samples are adversarial or not. + + Args: + inputs (numpy.ndarray): Suspicious samples to be judged. + + Returns: + list[int], whether a sample is adversarial. if res[i]=1, then the + input sample with index i is adversarial. + """ + LOGGER.debug(TAG, 'enter detect().') + self._radius = check_param_type('radius', self._radius, float) + inputs = check_numpy_param('inputs', inputs) + time_start = time.time() + res = [1]*inputs.shape[0] + raw_preds = np.argmax(self._model.predict(Tensor(inputs)).asnumpy(), + axis=1) + rc_preds = self._rc_forward(inputs, self._radius) + for i in range(inputs.shape[0]): + if raw_preds[i] == rc_preds[i]: + res[i] = 0 + LOGGER.debug(TAG, + 'time used to detect %d samples is : %s seconds', + inputs.shape[0], + time.time() - time_start) + return res + + def detect_diff(self, inputs): + """ + Return raw prediction results and region-based prediction results. + + Args: + inputs (numpy.ndarray): Input samples. + + Returns: + numpy.ndarray, raw prediction results and region-based prediction results of input samples. + """ + LOGGER.debug(TAG, 'enter detect_diff().') + inputs = check_numpy_param('inputs', inputs) + + raw_preds = self._model.predict(Tensor(inputs)) + rc_preds = self._rc_forward(inputs, self._radius) + + return raw_preds.asnumpy(), rc_preds + + def transform(self, inputs): + """ + Generate hyper cube for input samples. + + Args: + inputs (numpy.ndarray): Input samples. + + Returns: + numpy.ndarray, hyper cube corresponds to every sample. + """ + LOGGER.debug(TAG, 'enter transform().') + inputs = check_numpy_param('inputs', inputs) + res = [] + for _, elem in enumerate(inputs): + res.append(self._generate_hyper_cube(elem, self._radius)) + return np.asarray(res) diff --git a/mindarmour/detectors/spatial_smoothing.py b/mindarmour/detectors/spatial_smoothing.py new file mode 100644 index 0000000..045b676 --- /dev/null +++ b/mindarmour/detectors/spatial_smoothing.py @@ -0,0 +1,171 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Spatial-Smoothing detector. +""" +import numpy as np +from scipy import ndimage + +from mindspore import Model +from mindspore import Tensor + +from mindarmour.detectors.detector import Detector +from mindarmour.utils.logger import LogUtil +from mindarmour.utils._check_param import check_model, check_numpy_param, \ + check_pair_numpy_param, check_int_positive, check_param_type, \ + check_param_in_range, check_equal_shape, check_value_positive + +LOGGER = LogUtil.get_instance() +TAG = 'SpatialSmoothing' + + +def _median_filter_np(inputs, size=2): + """median filter using numpy""" + return ndimage.filters.median_filter(inputs, size=size, mode='reflect') + + +class SpatialSmoothing(Detector): + """ + Detect method based on spatial smoothing. + + Args: + model (Model): Target model. + ksize (int): Smooth window size. Default: 3. + is_local_smooth (bool): If True, trigger local smooth. If False, none + local smooth. Default: True. + metric (str): Distance method. Default: 'l1'. + false_positive_ratio (float): False positive rate over + benign samples. Default: 0.05. + + Examples: + >>> detector = SpatialSmoothing(model) + >>> detector.fit(Tensor(ori), Tensor(labels)) + >>> adv_ids = detector.detect(Tensor(adv)) + """ + + def __init__(self, model, ksize=3, is_local_smooth=True, + metric='l1', false_positive_ratio=0.05): + super(SpatialSmoothing, self).__init__() + self._ksize = check_int_positive('ksize', ksize) + self._is_local_smooth = check_param_type('is_local_smooth', + is_local_smooth, + bool) + self._model = check_model('model', model, Model) + self._metric = metric + self._fpr = check_param_in_range('false_positive_ratio', + false_positive_ratio, + 0, 1) + self._threshold = None + + def fit(self, inputs, labels=None): + """ + Train detector to decide the threshold. The proper threshold make + sure the actual false positive rate over benign sample is less than + the given value. + + Args: + inputs (numpy.ndarray): Benign samples. + labels (numpy.ndarray): Default None. + + Returns: + float, threshold, distance larger than which is reported + as positive, i.e. adversarial. + """ + inputs = check_numpy_param('inputs', inputs) + raw_pred = self._model.predict(Tensor(inputs)) + smoothing_pred = self._model.predict(Tensor(self.transform(inputs))) + + dist = self._dist(raw_pred.asnumpy(), smoothing_pred.asnumpy()) + index = int(len(dist)*(1 - self._fpr)) + threshold = np.sort(dist, axis=None)[index] + self._threshold = threshold + return self._threshold + + def detect(self, inputs): + """ + Detect if an input sample is an adversarial example. + + Args: + inputs (numpy.ndarray): Suspicious samples to be judged. + + Returns: + list[int], whether a sample is adversarial. if res[i]=1, then the + input sample with index i is adversarial. + """ + inputs = check_numpy_param('inputs', inputs) + raw_pred = self._model.predict(Tensor(inputs)) + smoothing_pred = self._model.predict(Tensor(self.transform(inputs))) + dist = self._dist(raw_pred.asnumpy(), smoothing_pred.asnumpy()) + + res = [0]*len(dist) + for i, elem in enumerate(dist): + if elem > self._threshold: + res[i] = 1 + + return res + + def detect_diff(self, inputs): + """ + Return the raw distance value (before apply the threshold) between + the input sample and its smoothed counterpart. + + Args: + inputs (numpy.ndarray): Suspicious samples to be judged. + + Returns: + float, distance. + """ + inputs = check_numpy_param('inputs', inputs) + raw_pred = self._model.predict(Tensor(inputs)) + smoothing_pred = self._model.predict(Tensor(self.transform(inputs))) + dist = self._dist(raw_pred.asnumpy(), smoothing_pred.asnumpy()) + return dist + + def transform(self, inputs): + inputs = check_numpy_param('inputs', inputs) + return _median_filter_np(inputs, self._ksize) + + def set_threshold(self, threshold): + """ + Set the parameters threshold. + + Args: + threshold (float): Detection threshold. Default: None. + """ + self._threshold = check_value_positive('threshold', threshold) + + def _dist(self, before, after): + """ + Calculate the distance between the model outputs of a raw sample and + its smoothed counterpart. + + Args: + before (numpy.ndarray): Model output of raw samples. + after (numpy.ndarray): Model output of smoothed counterparts. + + Returns: + float, distance based on specified norm. + """ + before, after = check_pair_numpy_param('before', before, 'after', after) + before, after = check_equal_shape('before', before, 'after', after) + res = [] + diff = after - before + for _, elem in enumerate(diff): + if self._metric == 'l1': + res.append(np.linalg.norm(elem, ord=1)) + elif self._metric == 'l2': + res.append(np.linalg.norm(elem, ord=2)) + else: + res.append(np.linalg.norm(elem, ord=1)) + return res diff --git a/mindarmour/evaluations/__init__.py b/mindarmour/evaluations/__init__.py new file mode 100644 index 0000000..ebd1d8d --- /dev/null +++ b/mindarmour/evaluations/__init__.py @@ -0,0 +1,14 @@ +""" +This module includes various metrics to evaluate the result of attacks or +defenses. +""" +from .attack_evaluation import AttackEvaluate +from .defense_evaluation import DefenseEvaluate +from .visual_metrics import RadarMetric +from . import black +from .black.defense_evaluation import BlackDefenseEvaluate + +__all__ = ['AttackEvaluate', + 'BlackDefenseEvaluate', + 'DefenseEvaluate', + 'RadarMetric'] diff --git a/mindarmour/evaluations/attack_evaluation.py b/mindarmour/evaluations/attack_evaluation.py new file mode 100644 index 0000000..b828dde --- /dev/null +++ b/mindarmour/evaluations/attack_evaluation.py @@ -0,0 +1,275 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Attack evaluation. +""" + +import numpy as np + +from scipy.ndimage.filters import convolve + +from mindarmour.utils.logger import LogUtil +from mindarmour.utils._check_param import check_pair_numpy_param, \ + check_param_type, check_numpy_param, check_equal_shape + +LOGGER = LogUtil.get_instance() +TAG = 'AttackEvaluate' + + +def _compute_ssim(img_1, img_2, kernel_sigma=1.5, kernel_width=11): + """ + compute structural similarity. + Args: + img_1 (numpy.ndarray): The first image to be compared. + img_2 (numpy.ndarray): The second image to be compared. + kernel_sigma (float): Gassian kernel param. Default: 1.5. + kernel_width (int): Another Gassian kernel param. Default: 11. + + Returns: + float, structural similarity. + """ + img_1, img_2 = check_equal_shape('images_1', img_1, 'images_2', img_2) + + if len(img_1.shape) > 2: + total_ssim = 0 + for i in range(img_1.shape[2]): + total_ssim += _compute_ssim(img_1[:, :, i], img_2[:, :, i]) + return total_ssim / 3 + + # Create gaussian kernel + gaussian_kernel = np.zeros((kernel_width, kernel_width)) + for i in range(kernel_width): + for j in range(kernel_width): + gaussian_kernel[i, j] = (1 / (2*np.pi*(kernel_sigma**2)))*np.exp( + - (((i - 5)**2) + ((j - 5)**2)) / (2*(kernel_sigma**2))) + + img_1 = img_1.astype(np.float32) + img_2 = img_2.astype(np.float32) + + img_sq_1 = img_1**2 + img_sq_2 = img_2**2 + img_12 = img_1*img_2 + + # Mean + img_mu_1 = convolve(img_1, gaussian_kernel) + img_mu_2 = convolve(img_2, gaussian_kernel) + + # Mean square + img_mu_sq_1 = img_mu_1**2 + img_mu_sq_2 = img_mu_2**2 + img_mu_12 = img_mu_1*img_mu_2 + + # Variances + img_sigma_sq_1 = convolve(img_sq_1, gaussian_kernel) + img_sigma_sq_2 = convolve(img_sq_2, gaussian_kernel) + + # Covariance + img_sigma_12 = convolve(img_12, gaussian_kernel) + + # Centered squares of variances + img_sigma_sq_1 = img_sigma_sq_1 - img_mu_sq_1 + img_sigma_sq_2 = img_sigma_sq_2 - img_mu_sq_2 + img_sigma_12 = img_sigma_12 - img_mu_12 + + k_1 = 0.01 + k_2 = 0.03 + c_1 = (k_1*255)**2 + c_2 = (k_2*255)**2 + + # Calculate ssim + num_ssim = (2*img_mu_12 + c_1)*(2*img_sigma_12 + c_2) + den_ssim = (img_mu_sq_1 + img_mu_sq_2 + c_1)*(img_sigma_sq_1 + + img_sigma_sq_2 + c_2) + res = np.average(num_ssim / den_ssim) + return res + + +class AttackEvaluate: + """ + Evaluation metrics of attack methods. + + Args: + inputs (numpy.ndarray): Original samples. + labels (numpy.ndarray): Original samples' label by one-hot format. + adv_inputs (numpy.ndarray): Adversarial samples generated from original + samples. + adv_preds (numpy.ndarray): Probability of all output classes of + adversarial examples. + targeted (bool): If True, it is a targeted attack. If False, it is an + untargeted attack. Default: False. + target_label (numpy.ndarray): Targeted classes of adversarial examples, + which is one dimension whose size is adv_inputs.shape[0]. + Default: None. + + Raises: + ValueError: If target_label is None when targeted is True. + + Examples: + >>> x = np.random.normal(size=(3, 512, 512, 3)) + >>> adv_x = np.random.normal(size=(3, 512, 512, 3)) + >>> y = np.array([[0.1, 0.1, 0.2, 0.6], + >>> [0.1, 0.7, 0.0, 0.2], + >>> [0.8, 0.1, 0.0, 0.1]]) + >>> adv_y = np.array([[0.1, 0.1, 0.2, 0.6], + >>> [0.1, 0.0, 0.8, 0.1], + >>> [0.0, 0.9, 0.1, 0.0]]) + >>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y) + >>> mr = attack_eval.mis_classification_rate() + """ + + def __init__(self, inputs, labels, adv_inputs, adv_preds, + targeted=False, target_label=None): + self._inputs, self._labels = check_pair_numpy_param('inputs', + inputs, + 'labels', + labels) + self._adv_inputs, self._adv_preds = check_pair_numpy_param('adv_inputs', + adv_inputs, + 'adv_preds', + adv_preds) + targeted = check_param_type('targeted', targeted, bool) + self._targeted = targeted + if target_label is not None: + target_label = check_numpy_param('target_label', target_label) + self._target_label = target_label + self._true_label = np.argmax(self._labels, axis=1) + self._adv_label = np.argmax(self._adv_preds, axis=1) + + idxes = np.arange(self._adv_preds.shape[0]) + if self._targeted: + if target_label is None: + msg = 'targeted attack need target_label, but got None.' + LOGGER.error(TAG, msg) + raise ValueError(msg) + self._adv_preds, self._target_label = check_pair_numpy_param('adv_pred', + self._adv_preds, + 'target_label', + target_label) + self._success_idxes = idxes[self._adv_label == self._target_label] + else: + self._success_idxes = idxes[self._adv_label != self._true_label] + + def mis_classification_rate(self): + """ + Calculate misclassification rate(MR). + + Returns: + float, ranges between (0, 1). The higher, the more successful the attack is. + """ + return self._success_idxes.shape[0]*1.0 / self._inputs.shape[0] + + def avg_conf_adv_class(self): + """ + Calculate average confidence of adversarial class (ACAC). + + Returns: + float, ranges between (0, 1). The higher, the more successful the attack is. + """ + idxes = self._success_idxes + success_num = idxes.shape[0] + if success_num == 0: + return 0 + if self._targeted: + return np.mean(self._adv_preds[idxes, self._target_label[idxes]]) + return np.mean(self._adv_preds[idxes, self._adv_label[idxes]]) + + def avg_conf_true_class(self): + """ + Calculate average confidence of true class (ACTC). + + Returns: + float, ranges between (0, 1). The lower, the more successful the attack is. + """ + idxes = self._success_idxes + success_num = idxes.shape[0] + if success_num == 0: + return 0 + return np.mean(self._adv_preds[idxes, self._true_label[idxes]]) + + def avg_lp_distance(self): + """ + Calculate average lp distance (lp-dist). + + Returns: + - float, return average l0, l2, or linf distance of all success + adversarial examples, return value includes following cases. + + - If return value :math:`>=` 0, average lp distance. The lower, + the more successful the attack is. + + - If return value is -1, there is no success adversarial examples. + """ + idxes = self._success_idxes + success_num = idxes.shape[0] + if success_num == 0: + return -1, -1, -1 + l0_dist = 0 + l2_dist = 0 + linf_dist = 0 + avoid_zero_div = 1e-14 + for i in idxes: + diff = (self._adv_inputs[i] - self._inputs[i]).flatten() + data = self._inputs[i].flatten() + l0_dist += np.linalg.norm(diff, ord=0) \ + / (np.linalg.norm(data, ord=0) + avoid_zero_div) + l2_dist += np.linalg.norm(diff, ord=2) \ + / (np.linalg.norm(data, ord=2) + avoid_zero_div) + linf_dist += np.linalg.norm(diff, ord=np.inf) \ + / (np.linalg.norm(data, ord=np.inf) + avoid_zero_div) + + return l0_dist / success_num, l2_dist / success_num, \ + linf_dist / success_num + + def avg_ssim(self): + """ + Calculate average structural similarity (ASS). + + Returns: + - float, average structural similarity. + + - If return value ranges between (0, 1), the higher, the more + successful the attack is. + + - If return value is -1: there is no success adversarial examples. + """ + success_num = self._success_idxes.shape[0] + if success_num == 0: + return -1 + + total_ssim = 0.0 + for _, i in enumerate(self._success_idxes): + total_ssim += _compute_ssim(self._adv_inputs[i], self._inputs[i]) + + return total_ssim / success_num + + def nte(self): + """ + Calculate noise tolerance estimation (NTE). + + References: `Towards Imperceptible and Robust Adversarial Example Attacks + against Neural Networks `_ + + + Returns: + float, ranges between (0, 1). The higher, the more successful the + attack is. + """ + idxes = self._success_idxes + success_num = idxes.shape[0] + adv_y = self._adv_preds[idxes] + adv_y_2 = np.copy(adv_y) + adv_y_2[range(success_num), np.argmax(adv_y_2, axis=1)] = 0 + net = np.mean(np.abs(np.max(adv_y_2, axis=1) - np.max(adv_y, axis=1))) + + return net diff --git a/mindarmour/evaluations/black/__init__.py b/mindarmour/evaluations/black/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mindarmour/evaluations/black/defense_evaluation.py b/mindarmour/evaluations/black/defense_evaluation.py new file mode 100644 index 0000000..8f923e3 --- /dev/null +++ b/mindarmour/evaluations/black/defense_evaluation.py @@ -0,0 +1,204 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Evaluating Defense against Black-box Attacks. +""" +import numpy as np + +from mindarmour.utils.logger import LogUtil +from mindarmour.utils._check_param import check_pair_numpy_param, \ + check_equal_length, check_int_positive, check_numpy_param + +LOGGER = LogUtil.get_instance() +TAG = 'BlackDefenseEvaluate' + + +class BlackDefenseEvaluate: + """ + Evaluation metrics of anti-black-box defense method. + + Args: + raw_preds (numpy.ndarray): Predict results of some certain samples on + raw model. + def_preds (numpy.ndarray): Predict results of some certain samples on + defensed model. + raw_query_counts (numpy.ndarray): Number of queries to generate + adversarial examples on raw model, which is one dimensional whose + size is raw_preds.shape[0]. For benign samples, query count must be + set to 0. + def_query_counts (numpy.ndarray): Number of queries to generate + adversarial examples on defensed model, which is one dimensional + whose size is raw_preds.shape[0]. + For benign samples, query count must be set to 0. + raw_query_time (numpy.ndarray): The total time duration to generate + an adversarial example on raw model, which is one dimensional + whose size is raw_preds.shape[0]. + def_query_time (numpy.ndarray): The total time duration to generate an + adversarial example on defensed model, which is one dimensional + whose size is raw_preds.shape[0]. + def_detection_counts (numpy.ndarray): Total number of detected queries + during each adversarial example generation, which is one dimensional + whose size is raw_preds.shape[0]. For a benign sample, the + def_detection_counts is set to 1 if the query is identified as + suspicious, and 0 otherwise. + true_labels (numpy.ndarray): True labels in one-dim whose size is + raw_preds.shape[0]. + max_queries (int): Attack budget, the maximum number of queries. + + Examples: + >>> raw_preds = np.array([[0.1, 0.1, 0.2, 0.6], + >>> [0.1, 0.7, 0.0, 0.2], + >>> [0.8, 0.1, 0.0, 0.1]]) + >>> def_preds = np.array([[0.1, 0.1, 0.1, 0.7], + >>> [0.1, 0.6, 0.2, 0.1], + >>> [0.1, 0.2, 0.1, 0.6]]) + >>> raw_query_counts = np.array([0,20,10]) + >>> def_query_counts = np.array([0,50,60]) + >>> raw_query_time = np.array([0.1, 2, 1]) + >>> def_query_time = np.array([0.2, 6, 5]) + >>> def_detection_counts = np.array([1, 5, 10]) + >>> true_labels = np.array([3, 1, 0]) + >>> max_queries = 100 + >>> def_eval = BlackDefenseEvaluat(raw_preds, + >>> def_preds, + >>> raw_query_counts, + >>> def_query_counts, + >>> raw_query_time, + >>> def_query_time, + >>> def_detection_counts, + >>> true_labels, + >>> max_queries) + >>> def_eval.qcv() + """ + + def __init__(self, raw_preds, def_preds, raw_query_counts, def_query_counts, + raw_query_time, def_query_time, def_detection_counts, + true_labels, max_queries): + self._raw_preds, self._def_preds = check_pair_numpy_param('raw_preds', + raw_preds, + 'def_preds', + def_preds) + self._num_samples = self._raw_preds.shape[0] + self._raw_query_counts, _ = check_equal_length('raw_query_counts', + raw_query_counts, + 'number of sample', + self._raw_preds) + self._def_query_counts, _ = check_equal_length('def_query_counts', + def_query_counts, + 'number of sample', + self._raw_preds) + self._raw_query_time, _ = check_equal_length('raw_query_time', + raw_query_time, + 'number of sample', + self._raw_preds) + self._def_query_time, _ = check_equal_length('def_query_time', + def_query_time, + 'number of sample', + self._raw_preds) + + self._num_adv_samples = self._raw_query_counts[ + self._raw_query_counts > 0].shape[0] + + self._num_adv_samples = check_int_positive( + 'the number of adversarial samples', + self._num_adv_samples) + + self._num_ben_samples = self._num_samples - self._num_adv_samples + self._max_queries = check_int_positive('max_queries', max_queries) + + self._def_detection_counts = check_numpy_param('def_detection_counts', + def_detection_counts) + self._true_labels = check_numpy_param('true_labels', true_labels) + + def qcv(self): + """ + Calculate query count variance (QCV). + + Returns: + float, the higher, the stronger the defense is. If num_adv_samples=0, + return -1. + """ + if self._num_adv_samples == 0: + return -1 + avg_def_query_count = \ + np.sum(self._def_query_counts) / self._num_adv_samples + avg_raw_query_count = \ + np.sum(self._raw_query_counts) / self._num_adv_samples + + if (avg_def_query_count == self._max_queries) \ + and (avg_raw_query_count < self._max_queries): + query_variance = 1 + else: + query_variance = \ + min(avg_def_query_count - avg_raw_query_count, + self._max_queries) / self._max_queries + return query_variance + + def asv(self): + """ + Calculate attack success rate variance (ASV). + + Returns: + float, the lower, the stronger the defense is. If num_adv_samples=0, + return -1. + """ + adv_def_preds = self._def_preds[self._def_query_counts > 0] + adv_raw_preds = self._raw_preds[self._raw_query_counts > 0] + adv_true_labels = self._true_labels[self._raw_query_counts > 0] + + def_succ_num = np.sum(np.argmax(adv_def_preds, axis=1) + != adv_true_labels) + raw_succ_num = np.sum(np.argmax(adv_raw_preds, axis=1) + != adv_true_labels) + if self._num_adv_samples == 0: + return -1 + return (raw_succ_num - def_succ_num) / self._num_adv_samples + + def fpr(self): + """ + Calculate false positive rate (FPR) of the query-based detector. + + Returns: + float, the lower, the higher usability the defense is. If + num_adv_samples=0, return -1. + """ + + ben_detect_counts = \ + self._def_detection_counts[self._def_query_counts == 0] + num_fp = ben_detect_counts[ben_detect_counts > 0].shape[0] + if self._num_ben_samples == 0: + return -1 + return num_fp / self._num_ben_samples + + def qrv(self): + """ + Calculate the benign query response time variance (QRV). + + Returns: + float, the lower, the higher usability the defense is. If + num_adv_samples=0, return -1. + """ + if self._num_ben_samples == 0: + return -1 + raw_num_queries = self._num_ben_samples + def_num_queries = self._num_ben_samples + + ben_raw_query_time = self._raw_query_time[self._raw_query_counts == 0] + ben_def_query_time = self._def_query_time[self._def_query_counts == 0] + + avg_raw_query_time = np.sum(ben_raw_query_time) / raw_num_queries + avg_def_query_time = np.sum(ben_def_query_time) / def_num_queries + + return (avg_def_query_time - + avg_raw_query_time) / (avg_raw_query_time + 1e-12) diff --git a/mindarmour/evaluations/defense_evaluation.py b/mindarmour/evaluations/defense_evaluation.py new file mode 100644 index 0000000..06f738f --- /dev/null +++ b/mindarmour/evaluations/defense_evaluation.py @@ -0,0 +1,152 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Defense Evaluation. +""" +import numpy as np + +import scipy.stats as st + +from mindarmour.utils.logger import LogUtil +from mindarmour.utils._check_param import check_numpy_param +from mindarmour.utils._check_param import check_pair_numpy_param + +LOGGER = LogUtil.get_instance() +TAG = 'DefenseEvaluate' + + +class DefenseEvaluate: + """ + Evaluation metrics of defense methods. + + Args: + raw_preds (numpy.ndarray): Prediction results of some certain samples + on raw model. + def_preds (numpy.ndarray): Prediction results of some certain samples on + defensed model. + true_labels (numpy.ndarray): Ground-truth labels of samples, a + one-dimension array whose size is raw_preds.shape[0]. + + Examples: + >>> raw_preds = np.array([[0.1, 0.1, 0.2, 0.6], + >>> [0.1, 0.7, 0.0, 0.2], + >>> [0.8, 0.1, 0.0, 0.1]]) + >>> def_preds = np.array([[0.1, 0.1, 0.1, 0.7], + >>> [0.1, 0.6, 0.2, 0.1], + >>> [0.1, 0.2, 0.1, 0.6]]) + >>> true_labels = np.array([3, 1, 0]) + >>> def_eval = DefenseEvaluate(raw_preds, + >>> def_preds, + >>> true_labels) + >>> def_eval.cav() + """ + def __init__(self, raw_preds, def_preds, true_labels): + self._raw_preds, self._def_preds = check_pair_numpy_param('raw_preds', + raw_preds, + 'def_preds', + def_preds) + self._true_labels = check_numpy_param('true_labels', true_labels) + self._num_samples = len(true_labels) + + def cav(self): + """ + Calculate classification accuracy variance (CAV). + + Returns: + float, the higher, the more successful the defense is. + """ + def_succ_num = np.sum(np.argmax(self._def_preds, axis=1) + == self._true_labels) + raw_succ_num = np.sum(np.argmax(self._raw_preds, axis=1) + == self._true_labels) + + return (def_succ_num - raw_succ_num) / self._num_samples + + def crr(self): + """ + Calculate classification rectify ratio (CRR). + + Returns: + float, the higher, the more successful the defense is. + """ + cond1 = np.argmax(self._def_preds, axis=1) == self._true_labels + cond2 = np.argmax(self._raw_preds, axis=1) != self._true_labels + rectify_num = np.sum(cond1*cond2) + + return rectify_num*1.0 / self._num_samples + + def csr(self): + """ + Calculate classification sacrifice ratio (CSR), the lower the better. + + Returns: + float, the lower, the more successful the defense is. + """ + cond1 = np.argmax(self._def_preds, axis=1) != self._true_labels + cond2 = np.argmax(self._raw_preds, axis=1) == self._true_labels + sacrifice_num = np.sum(cond1*cond2) + + return sacrifice_num*1.0 / self._num_samples + + def ccv(self): + """ + Calculate classification confidence variance (CCV). + + Returns: + - float, the lower, the more successful the defense is. + + - If return value == -1, len(idxes) == 0. + """ + idxes = np.arange(self._num_samples) + cond1 = np.argmax(self._def_preds, axis=1) == self._true_labels + cond2 = np.argmax(self._raw_preds, axis=1) == self._true_labels + idxes = idxes[cond1*cond2] + + def_max = np.max(self._def_preds, axis=1) + raw_max = np.max(self._raw_preds, axis=1) + + if idxes.shape[0] == 0: + return -1 + conf_variance = np.mean(np.abs(def_max[idxes] - raw_max[idxes])) + + return conf_variance + + def cos(self): + """ + References: `Calculate classification output stability (COS) + `_ + + Returns: + float. + - If return value >= 0, is effective defense. The lower, the + more successful the defense. + + - If return value == -1, idxes == 0. + """ + idxes = np.arange(self._num_samples) + cond1 = np.argmax(self._def_preds, axis=1) == self._true_labels + cond2 = np.argmax(self._raw_preds, axis=1) == self._true_labels + idxes = idxes[cond1*cond2] + if idxes.size == 0: + return -1 + def_preds = self._def_preds[idxes] + raw_preds = self._raw_preds[idxes] + + js_total = 0.0 + mean_value = 0.5*(def_preds + raw_preds) + for i, value in enumerate(mean_value): + js_total += 0.5*st.entropy(def_preds[i], value) \ + + 0.5*st.entropy(raw_preds[i], value) + + return js_total / len(idxes) diff --git a/mindarmour/evaluations/visual_metrics.py b/mindarmour/evaluations/visual_metrics.py new file mode 100644 index 0000000..8f5e02e --- /dev/null +++ b/mindarmour/evaluations/visual_metrics.py @@ -0,0 +1,141 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Radar map. +""" +from math import pi + +import numpy as np + +import matplotlib.pyplot as plt + +from mindarmour.utils.logger import LogUtil +from mindarmour.utils._check_param import check_param_type, check_numpy_param, \ + check_param_multi_types, check_equal_length + +LOGGER = LogUtil.get_instance() +TAG = 'RadarMetric' + + +class RadarMetric: + """ + Radar chart to show the robustness of a model by multiple metrics. + + Args: + metrics_name (Union[tuple, list]): An array of names of metrics to show. + metrics_data (numpy.ndarray): The (normalized) values of each metrics of + multiple radar curves, like [[0.5, 0.8, ...], [0.2,0.6,...], ...]. + Each set of values corresponds to one radar curve. + labels (Union[tuple, list]): Legends of all radar curves. + title (str): Title of the chart. + scale (str): Scalar to adjust axis ticks, such as 'hide', 'norm', + 'sparse' or 'dense'. Default: 'hide'. + + Raises: + ValueError: If scale not in ['hide', 'norm', 'sparse', 'dense']. + + Examples: + >>> metrics_name = ['MR', 'ACAC', 'ASS', 'NTE', 'ACTC'] + >>> def_metrics = [0.9, 0.85, 0.6, 0.7, 0.8] + >>> raw_metrics = [0.5, 0.3, 0.55, 0.65, 0.7] + >>> metrics_data = [def_metrics, raw_metrics] + >>> metrics_labels = ['before', 'after'] + >>> rm = RadarMetric(metrics_name, + >>> metrics_data, + >>> metrics_labels, + >>> title='', + >>> scale='sparse') + >>> rm.show() + """ + + def __init__(self, metrics_name, metrics_data, labels, title, scale='hide'): + + self._metrics_name = check_param_multi_types('metrics_name', + metrics_name, + [tuple, list]) + self._metrics_data = check_numpy_param('metrics_data', metrics_data) + self._labels = check_param_multi_types('labels', labels, (tuple, list)) + + _, _ = check_equal_length('metrics_name', metrics_name, + 'metrics_data', self._metrics_data[0]) + _, _ = check_equal_length('labels', labels, 'metrics_data', metrics_data) + self._title = check_param_type('title', title, str) + if scale in ['hide', 'norm', 'sparse', 'dense']: + self._scale = scale + else: + msg = "scale must be in ['hide', 'norm', 'sparse', 'dense'], but " \ + "got {}".format(scale) + LOGGER.error(TAG, msg) + raise ValueError(msg) + + self._nb_var = len(metrics_name) + # divide the plot / number of variable + self._angles = [n / self._nb_var*2.0*pi for n in + range(self._nb_var)] + self._angles += self._angles[:1] + + # add one more point + data = [self._metrics_data, self._metrics_data[:, [0]]] + self._metrics_data = np.concatenate(data, axis=1) + + def show(self): + """ + Show the radar chart. + """ + # Initialise the spider plot + plt.clf() + axis_pic = plt.subplot(111, polar=True) + axis_pic.spines['polar'].set_visible(False) + axis_pic.set_yticklabels([]) + + # If you want the first axis to be on top: + axis_pic.set_theta_offset(pi / 2) + axis_pic.set_theta_direction(-1) + + # Draw one axe per variable + add labels labels yet + plt.xticks(self._angles[:-1], self._metrics_name) + + # Draw y labels + axis_pic.set_rlabel_position(0) + if self._scale == 'hide': + plt.yticks([0.0], color="grey", size=7) + elif self._scale == 'norm': + plt.yticks([0.2, 0.4, 0.6, 0.8], + ["0.2", "0.4", "0.6", "0.8"], + color="grey", size=7) + elif self._scale == 'sparse': + plt.yticks([0.5], ["0.5"], color="grey", size=7) + elif self._scale == 'dense': + ticks = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + labels = ["0.1", "0.2", "0.3", "0.4", "0.5", "0.6", + "0.7", "0.8", "0.9"] + plt.yticks(ticks, labels, color="grey", size=7) + else: + # default + plt.yticks([0.0], color="grey", size=7) + plt.ylim(0, 1) + + # plot border + axis_pic.plot(self._angles, [1]*(self._nb_var + 1), color='grey', + linewidth=1, linestyle='solid') + + for i in range(len(self._labels)): + axis_pic.plot(self._angles, self._metrics_data[i], linewidth=1, + linestyle='solid', label=self._labels[i]) + axis_pic.fill(self._angles, self._metrics_data[i], alpha=0.1) + + # Add legend + plt.legend(loc='upper right', bbox_to_anchor=(0., 0.)) + plt.title(self._title, y=1.1, color='g') + plt.show() diff --git a/mindarmour/utils/__init__.py b/mindarmour/utils/__init__.py new file mode 100644 index 0000000..0a7c1c5 --- /dev/null +++ b/mindarmour/utils/__init__.py @@ -0,0 +1,7 @@ +""" +Util methods of MindArmour.""" +from .logger import LogUtil +from .util import GradWrap +from .util import GradWrapWithLoss + +__all__ = ['LogUtil', 'GradWrapWithLoss', 'GradWrap'] diff --git a/mindarmour/utils/_check_param.py b/mindarmour/utils/_check_param.py new file mode 100644 index 0000000..3ac0eba --- /dev/null +++ b/mindarmour/utils/_check_param.py @@ -0,0 +1,269 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" check parameters for MindArmour. """ +import numpy as np + +from mindarmour.utils.logger import LogUtil + +LOGGER = LogUtil.get_instance() +TAG = 'check parameters' + + +def _check_array_not_empty(arg_name, arg_value): + """Check parameter is empty or not.""" + if isinstance(arg_value, (tuple, list)): + if not arg_value: + msg = '{} must not be empty'.format(arg_name) + LOGGER.error(TAG, msg) + raise ValueError(msg) + + if isinstance(arg_value, np.ndarray): + if arg_value.size <= 0: + msg = '{} must not be empty'.format(arg_name) + LOGGER.error(TAG, msg) + raise ValueError(msg) + return arg_value + + +def check_param_type(arg_name, arg_value, valid_type): + """Check parameter type.""" + if not isinstance(arg_value, valid_type): + msg = '{} must be {}, but got {}'.format(arg_name, + valid_type, + type(arg_value).__name__) + LOGGER.error(TAG, msg) + raise ValueError(msg) + + return arg_value + + +def check_param_multi_types(arg_name, arg_value, valid_types): + """Check parameter type.""" + if not isinstance(arg_value, tuple(valid_types)): + msg = 'type of {} must be in {}, but got {}' \ + .format(arg_name, valid_types, type(arg_value).__name__) + LOGGER.error(TAG, msg) + raise ValueError(msg) + + return arg_value + + +def check_int_positive(arg_name, arg_value): + """Check positive integer.""" + arg_value = check_param_type(arg_name, arg_value, int) + if arg_value <= 0: + msg = '{} must be greater than 0, but got {}'.format(arg_name, + arg_value) + LOGGER.error(TAG, msg) + raise ValueError(msg) + return arg_value + + +def check_value_non_negative(arg_name, arg_value): + """Check non negative value.""" + arg_value = check_param_multi_types(arg_name, arg_value, (int, float)) + if float(arg_value) < 0.0: + msg = '{} must not be less than 0, but got {}'.format(arg_name, + arg_value) + LOGGER.error(TAG, msg) + raise ValueError(msg) + return arg_value + + +def check_value_positive(arg_name, arg_value): + """Check positive value.""" + arg_value = check_param_multi_types(arg_name, arg_value, (int, float)) + if float(arg_value) <= 0.0: + msg = '{} must be greater than zero, but got {}'.format(arg_name, + arg_value) + LOGGER.error(TAG, msg) + raise ValueError(msg) + return arg_value + + +def check_param_in_range(arg_name, arg_value, lower, upper): + """ + Check range of parameter. + """ + if arg_value <= lower or arg_value >= upper: + msg = '{} must be between {} and {}, but got {}'.format(arg_name, + lower, + upper, + arg_value) + LOGGER.error(TAG, msg) + raise ValueError(msg) + + return arg_value + + +def check_model(model_name, model, model_type): + """ + Check the type of input `model` . + + Args: + model_name (str): Name of model. + model (Object): Model object. + model_type (Class): Class of model. + + Returns: + Object, if the type of `model` is `model_type`, return `model` itself. + + Raises: + ValueError: If model is not an instance of `model_type` . + """ + if isinstance(model, model_type): + return model + msg = '{} should be an instance of {}, but got {}' \ + .format(model_name, + model_type, + type(model).__name__) + LOGGER.error(TAG, msg) + raise ValueError(msg) + + +def check_numpy_param(arg_name, arg_value): + """ + None-check and Numpy-check for `value` . + + Args: + arg_name (str): Name of parameter. + arg_value (Union[list, tuple, numpy.ndarray]): Value for check. + + Returns: + numpy.ndarray, if `value` is not empty, return `value` with type of + numpy.ndarray. + + Raises: + ValueError: If value is empty. + ValueError: If value type is not in (list, tuple, numpy.ndarray). + """ + _ = _check_array_not_empty(arg_name, arg_value) + if isinstance(arg_value, (list, tuple)): + arg_value = np.asarray(arg_value) + elif isinstance(arg_value, np.ndarray): + arg_value = np.copy(arg_value) + else: + msg = 'type of {} must be in (list, tuple, numpy.ndarray)'.format( + arg_name) + LOGGER.error(TAG, msg) + raise ValueError(msg) + return arg_value + + +def check_pair_numpy_param(inputs_name, inputs, labels_name, labels): + """ + Dimension-equivalence check for `inputs` and `labels`. + + Args: + inputs_name (str): Name of inputs. + inputs (Union[list, tuple, numpy.ndarray]): Inputs. + labels_name (str): Name of labels. + labels (Union[list, tuple, numpy.ndarray]): Labels of `inputs`. + + Returns: + - Union[list, tuple, numpy.ndarray], if `inputs` 's dimension equals to + `labels`, return inputs with type of numpy.ndarray. + + - Union[list, tuple, numpy.ndarray], if `inputs` 's dimension equals to + `labels` , return labels with type of numpy.ndarray. + + Raises: + ValueError: If inputs.shape[0] is not equal to labels.shape[0]. + """ + inputs = check_numpy_param(inputs_name, inputs) + labels = check_numpy_param(labels_name, labels) + if inputs.shape[0] != labels.shape[0]: + msg = '{} shape[0] must equal {} shape[0], bot got shape of ' \ + 'inputs {}, shape of labels {}'.format(inputs_name, labels_name, + inputs.shape, labels.shape) + LOGGER.error(TAG, msg) + raise ValueError(msg) + return inputs, labels + + +def check_equal_length(para_name1, value1, para_name2, value2): + """check weather the two parameters have equal length.""" + if len(value1) != len(value2): + msg = 'The dimension of {0} must equal to the ' \ + '{1}, but got {0} is {2}, ' \ + '{1} is {3}'.format(para_name1, para_name2, len(value1), + len(value2)) + LOGGER.error(TAG, msg) + raise ValueError(msg) + return value1, value2 + + +def check_equal_shape(para_name1, value1, para_name2, value2): + """check weather the two parameters have equal shape.""" + if value1.shape != value2.shape: + msg = 'The shape of {0} must equal to the ' \ + '{1}, but got {0} is {2}, ' \ + '{1} is {3}'.format(para_name1, para_name2, value1.shape[0], + value2.shape[0]) + LOGGER.error(TAG, msg) + raise ValueError(msg) + return value1, value2 + + +def check_norm_level(norm_level): + """ + check norm_level of regularization. + """ + accept_norm = [1, 2, '1', '2', 'l1', 'l2', 'inf', 'linf', np.inf] + if norm_level not in accept_norm: + msg = 'norm_level must be in {}, but got {}'.format(accept_norm, + norm_level) + LOGGER.error(TAG, msg) + raise ValueError(msg) + return norm_level + + +def normalize_value(value, norm_level): + """ + Normalize gradients for gradient attacks. + + Args: + value (numpy.ndarray): Inputs. + norm_level (Union[int, str]): Normalized level. + + Returns: + numpy.ndarray, normalized value. + + Raises: + NotImplementedError: If norm_level is not in [1, 2 , np.inf, '1', '2', + 'inf] + """ + norm_level = check_norm_level(norm_level) + ori_shape = value.shape + value_reshape = value.reshape((value.shape[0], -1)) + avoid_zero_div = 1e-12 + if norm_level in (1, '1', 'l1'): + norm = np.linalg.norm(value_reshape, ord=1, axis=1, keepdims=True) + \ + avoid_zero_div + norm_value = value_reshape / norm + elif norm_level in (2, '2', 'l2'): + norm = np.linalg.norm(value_reshape, ord=2, axis=1, keepdims=True) + \ + avoid_zero_div + norm_value = value_reshape / norm + elif norm_level in (np.inf, 'inf'): + norm = np.max(abs(value_reshape), axis=1, keepdims=True) + \ + avoid_zero_div + norm_value = value_reshape / norm + else: + msg = 'Values of `norm_level` different from 1, 2 and ' \ + '`np.inf` are currently not supported, but got {}.' \ + .format(norm_level) + LOGGER.error(TAG, msg) + raise NotImplementedError(msg) + return norm_value.reshape(ori_shape) diff --git a/mindarmour/utils/logger.py b/mindarmour/utils/logger.py new file mode 100644 index 0000000..432def9 --- /dev/null +++ b/mindarmour/utils/logger.py @@ -0,0 +1,154 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Util for log module. """ +import logging + +_LOGGER = logging.getLogger('MA') + + +def _find_caller(): + """ + Bind findCaller() method, which is used to find the stack frame of the + caller so that we can note the source file name, line number and + function name. + """ + return _LOGGER.findCaller() + + +class LogUtil: + """ + Logging module. + + Raises: + SyntaxError: If create this class. + """ + _instance = None + _logger = None + _extra_fmt = ' [%s] [%s] ' + + def __init__(self): + raise SyntaxError('can not instance, please use get_instance.') + + @staticmethod + def get_instance(): + """ + Get instance of class `LogUtil`. + + Returns: + Object, instance of class `LogUtil`. + """ + if LogUtil._instance is None: + LogUtil._instance = object.__new__(LogUtil) + LogUtil._logger = _LOGGER + LogUtil._init_logger() + return LogUtil._instance + + @staticmethod + def _init_logger(): + """ + Initialize logger. + """ + LogUtil._logger.setLevel(logging.WARNING) + + log_fmt = '[%(levelname)s] %(name)s(%(process)d:%(thread)d,' \ + '%(processName)s):%(asctime)s%(message)s' + log_fmt = logging.Formatter(log_fmt) + + # create console handler with a higher log level + console_handler = logging.StreamHandler() + console_handler.setFormatter(log_fmt) + + # add the handlers to the logger + LogUtil._logger.handlers = [] + LogUtil._logger.addHandler(console_handler) + + LogUtil._logger.propagate = False + + def set_level(self, level): + """ + Set the logging level of this logger, level must be an integer or a + string. + + Args: + level (Union[int, str]): Level of logger. + """ + self._logger.setLevel(level) + + def add_handler(self, handler): + """ + Add other handler supported by logging module. + + Args: + handler (logging.Handler): Other handler supported by logging module. + + Raises: + ValueError: If handler is not an instance of logging.Handler. + """ + if isinstance(handler, logging.Handler): + self._logger.addHandler(handler) + else: + raise ValueError('handler must be an instance of logging.Handler,' + ' but got {}'.format(type(handler))) + + def debug(self, tag, msg, *args): + """ + Log '[tag] msg % args' with severity 'DEBUG'. + + Args: + tag (str): Logger tag. + msg (str): Logger message. + args (Any): Auxiliary value. + """ + caller_info = _find_caller() + file_info = ':'.join([caller_info[0], str(caller_info[1])]) + self._logger.debug(self._extra_fmt + msg, file_info, tag, *args) + + def info(self, tag, msg, *args): + """ + Log '[tag] msg % args' with severity 'INFO'. + + Args: + tag (str): Logger tag. + msg (str): Logger message. + args (Any): Auxiliary value. + """ + caller_info = _find_caller() + file_info = ':'.join([caller_info[0], str(caller_info[1])]) + self._logger.info(self._extra_fmt + msg, file_info, tag, *args) + + def warn(self, tag, msg, *args): + """ + Log '[tag] msg % args' with severity 'WARNING'. + + Args: + tag (str): Logger tag. + msg (str): Logger message. + args (Any): Auxiliary value. + """ + caller_info = _find_caller() + file_info = ':'.join([caller_info[0], str(caller_info[1])]) + self._logger.warning(self._extra_fmt + msg, file_info, tag, *args) + + def error(self, tag, msg, *args): + """ + Log '[tag] msg % args' with severity 'ERROR'. + + Args: + tag (str): Logger tag. + msg (str): Logger message. + args (Any): Auxiliary value. + """ + caller_info = _find_caller() + file_info = ':'.join([caller_info[0], str(caller_info[1])]) + self._logger.error(self._extra_fmt + msg, file_info, tag, *args) diff --git a/mindarmour/utils/util.py b/mindarmour/utils/util.py new file mode 100644 index 0000000..094177b --- /dev/null +++ b/mindarmour/utils/util.py @@ -0,0 +1,147 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Util for MindArmour. """ +import numpy as np + +from mindspore import Tensor +from mindspore.nn import Cell +from mindspore.ops.composite import GradOperation + +from mindarmour.utils.logger import LogUtil + +LOGGER = LogUtil.get_instance() +TAG = 'util' + + +def jacobian_matrix(grad_wrap_net, inputs, num_classes): + """ + Calculate the Jacobian matrix for inputs. + + Args: + grad_wrap_net (Cell): A network wrapped by GradWrap. + inputs (numpy.ndarray): Input samples. + num_classes (int): Number of labels of model output. + + Returns: + numpy.ndarray, the Jacobian matrix of inputs. (labels, batch_size, ...) + + Raises: + ValueError: If grad_wrap_net is not a instance of class `GradWrap`. + """ + if not isinstance(grad_wrap_net, GradWrap): + msg = 'grad_wrap_net be and instance of class `GradWrap`.' + LOGGER.error(TAG, msg) + raise ValueError(msg) + grad_wrap_net.set_train() + grads_matrix = [] + for idx in range(num_classes): + sens = np.zeros((inputs.shape[0], num_classes)).astype(np.float32) + sens[:, idx] = 1.0 + grads = grad_wrap_net(Tensor(inputs), Tensor(sens)) + grads_matrix.append(grads.asnumpy()) + return np.asarray(grads_matrix) + + +class WithLossCell(Cell): + """ + Wrap the network with loss function. + + Args: + network (Cell): The target network to wrap. + loss_fn (Function): The loss function is used for computing loss. + + Examples: + >>> data = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32)*0.01) + >>> label = Tensor(np.ones([1, 10]).astype(np.float32)) + >>> net = NET() + >>> loss_fn = nn.SoftmaxCrossEntropyWithLogits() + >>> loss_net = WithLossCell(net, loss_fn) + >>> loss_out = loss_net(data, label) + """ + def __init__(self, network, loss_fn): + super(WithLossCell, self).__init__() + self._network = network + self._loss_fn = loss_fn + + def construct(self, data, label): + """ + Compute loss based on the wrapped loss cell. + + Args: + data (Tensor): Tensor data to train. + label (Tensor): Tensor label data. + + Returns: + Tensor, compute result. + """ + out = self._network(data) + return self._loss_fn(out, label) + + +class GradWrapWithLoss(Cell): + """ + Construct a network to compute the gradient of loss function in input space + and weighted by `weight`. + """ + + def __init__(self, network): + super(GradWrapWithLoss, self).__init__() + self._grad_all = GradOperation(name="get_all", + get_all=True, + sens_param=True) + self._network = network + + def construct(self, inputs, labels, weight): + """ + Compute gradient of `inputs` with labels and weight. + + Args: + inputs (Tensor): Inputs of network. + labels (Tensor): Labels of inputs. + weight (Tensor): Weight of each gradient, `weight` has the same + shape with labels. + + Returns: + Tensor, gradient matrix. + """ + gout = self._grad_all(self._network)(inputs, labels, weight) + return gout[0] + + +class GradWrap(Cell): + """ + Construct a network to compute the gradient of network outputs in input + space and weighted by `weight`, expressed as a jacobian matrix. + """ + + def __init__(self, network): + super(GradWrap, self).__init__() + self.grad = GradOperation(name="grad", get_all=False, + sens_param=True) + self.network = network + + def construct(self, inputs, weight): + """ + Compute jacobian matrix. + + Args: + inputs (Tensor): Inputs of network. + weight (Tensor): Weight of each gradient, `weight` has the same + shape with labels. + + Returns: + Tensor, Jacobian matrix. + """ + gout = self.grad(self.network)(inputs, weight) + return gout diff --git a/package.sh b/package.sh new file mode 100644 index 0000000..d840869 --- /dev/null +++ b/package.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +BASEPATH=$(cd "$(dirname $0)"; pwd) +OUTPUT_PATH="${BASEPATH}/output" +PYTHON=$(which python3) + +mk_new_dir() { + local create_dir="$1" # the target to make + + if [[ -d "${create_dir}" ]];then + rm -rf "${create_dir}" + fi + + mkdir -pv "${create_dir}" +} + +mk_new_dir "${OUTPUT_PATH}" + +${PYTHON} ${BASEPATH}/setup.py bdist_wheel + +mv ${BASEPATH}/dist/*whl ${OUTPUT_PATH} + +echo "------Successfully created mindarmour package------" diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..7862fb0 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,7 @@ +numpy >= 1.17.0 +scipy >= 1.3.3 +matplotlib >= 3.1.3 +pytest >= 4.3.1 +wheel >= 0.32.0 +setuptools >= 40.8.0 +mindspore diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..e5caab3 --- /dev/null +++ b/setup.py @@ -0,0 +1,102 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import stat +from setuptools import find_packages +from setuptools import setup +from setuptools.command.egg_info import egg_info +from setuptools.command.build_py import build_py + +version = '0.1.0' +cur_dir = os.path.dirname(os.path.realpath(__file__)) +pkg_dir = os.path.join(cur_dir, 'build') + +try: + from wheel.bdist_wheel import bdist_wheel as _bdist_wheel + + + class bdist_wheel(_bdist_wheel): + def finalize_options(self): + _bdist_wheel.finalize_options(self) + self.root_is_pure = False +except ImportError: + bdist_wheel = None + + +def write_version(file): + file.write("__version__ = '{}'\n".format(version)) + + +def build_depends(): + """generate python file""" + version_file = os.path.join(cur_dir, 'mindarmour/', 'version.py') + with open(version_file, 'w') as f: + write_version(f) + + +build_depends() + + +def update_permissions(path): + """ + Update permissions. + + Args: + path (str): Target directory path. + """ + for dirpath, dirnames, filenames in os.walk(path): + for dirname in dirnames: + dir_fullpath = os.path.join(dirpath, dirname) + os.chmod(dir_fullpath, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC | stat.S_IRGRP | stat.S_IXGRP) + for filename in filenames: + file_fullpath = os.path.join(dirpath, filename) + os.chmod(file_fullpath, stat.S_IREAD) + + +class EggInfo(egg_info): + """Egg info.""" + def run(self): + super().run() + egg_info_dir = os.path.join(cur_dir, 'mindarmour.egg-info') + update_permissions(egg_info_dir) + + +class BuildPy(build_py): + """BuildPy.""" + def run(self): + super().run() + mindarmour_dir = os.path.join(pkg_dir, 'lib', 'mindarmour') + update_permissions(mindarmour_dir) + + +setup( + name='mindarmour', + version='0.1.0', + description="A smart AI security and trustworthy tool box.", + packages=find_packages(), + include_package_data=True, + zip_safe=False, + cmdclass={ + 'egg_info': EggInfo, + 'build_py': BuildPy, + 'bdist_wheel': bdist_wheel + }, + install_requires=[ + 'scipy >= 1.3.3', + 'numpy >= 1.17.0', + 'matplotlib >= 3.1.3', + 'mindspore' + ], +) +print(find_packages()) diff --git a/tests/st/resnet50/resnet_cifar10.py b/tests/st/resnet50/resnet_cifar10.py new file mode 100644 index 0000000..080cfc1 --- /dev/null +++ b/tests/st/resnet50/resnet_cifar10.py @@ -0,0 +1,311 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np +import math + +from mindspore import nn +from mindspore.ops import operations as P +from mindspore.common.tensor import Tensor +from mindspore import context + + +def variance_scaling_raw(shape): + value = np.random.normal(size=shape).astype(np.float32) + return Tensor(value) + + +def weight_variable(shape): + value = np.random.normal(size=shape).astype(np.float32) + return Tensor(value) + + +def sweight_variable(shape): + value = np.random.uniform(size=shape).astype(np.float32) + return Tensor(value) + + +def weight_variable_0(shape): + zeros = np.zeros(shape).astype(np.float32) + return Tensor(zeros) + + +def weight_variable_1(shape): + ones = np.ones(shape).astype(np.float32) + return Tensor(ones) + + +def conv3x3(in_channels, out_channels, stride=1, padding=0): + """3x3 convolution """ + weight_shape = (out_channels, in_channels, 3, 3) + weight = variance_scaling_raw(weight_shape) + return nn.Conv2d(in_channels, out_channels, + kernel_size=3, stride=stride, padding=padding, weight_init=weight, has_bias=False, pad_mode="same") + + +def conv1x1(in_channels, out_channels, stride=1, padding=0): + """1x1 convolution""" + weight_shape = (out_channels, in_channels, 1, 1) + weight = variance_scaling_raw(weight_shape) + return nn.Conv2d(in_channels, out_channels, + kernel_size=1, stride=stride, padding=padding, weight_init=weight, has_bias=False, pad_mode="same") + + +def conv7x7(in_channels, out_channels, stride=1, padding=0): + """1x1 convolution""" + weight_shape = (out_channels, in_channels, 7, 7) + weight = variance_scaling_raw(weight_shape) + return nn.Conv2d(in_channels, out_channels, + kernel_size=7, stride=stride, padding=padding, weight_init=weight, has_bias=False, pad_mode="same") + + +def bn_with_initialize(out_channels): + shape = (out_channels) + mean = weight_variable_0(shape) + var = weight_variable_1(shape) + beta = weight_variable_0(shape) + gamma = sweight_variable(shape) + bn = nn.BatchNorm2d(out_channels, momentum=0.99, eps=0.00001, gamma_init=gamma, + beta_init=beta, moving_mean_init=mean, moving_var_init=var) + return bn + + +def bn_with_initialize_last(out_channels): + shape = (out_channels) + mean = weight_variable_0(shape) + var = weight_variable_1(shape) + beta = weight_variable_0(shape) + gamma = sweight_variable(shape) + bn = nn.BatchNorm2d(out_channels, momentum=0.99, eps=0.00001, gamma_init=gamma, + beta_init=beta, moving_mean_init=mean, moving_var_init=var) + return bn + + +def fc_with_initialize(input_channels, out_channels): + weight_shape = (out_channels, input_channels) + + weight = np.random.normal(size=weight_shape).astype(np.float32) + weight = Tensor(weight) + + bias_shape = (out_channels) + bias_value = np.random.uniform(size=bias_shape).astype(np.float32) + bias = Tensor(bias_value) + + return nn.Dense(input_channels, out_channels, weight, bias) + + +class ResidualBlock(nn.Cell): + expansion = 4 + + def __init__(self, + in_channels, + out_channels, + stride=1, + down_sample=False): + super(ResidualBlock, self).__init__() + + out_chls = out_channels // self.expansion + self.conv1 = conv1x1(in_channels, out_chls, stride=stride, padding=0) + self.bn1 = bn_with_initialize(out_chls) + + self.conv2 = conv3x3(out_chls, out_chls, stride=1, padding=0) + self.bn2 = bn_with_initialize(out_chls) + + self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0) + self.bn3 = bn_with_initialize_last(out_channels) + + self.relu = P.ReLU() + self.add = P.TensorAdd() + + def construct(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + out = self.add(out, identity) + out = self.relu(out) + + return out + + +class ResidualBlockWithDown(nn.Cell): + expansion = 4 + + def __init__(self, + in_channels, + out_channels, + stride=1, + down_sample=False): + super(ResidualBlockWithDown, self).__init__() + + out_chls = out_channels // self.expansion + self.conv1 = conv1x1(in_channels, out_chls, stride=stride, padding=0) + self.bn1 = bn_with_initialize(out_chls) + + self.conv2 = conv3x3(out_chls, out_chls, stride=1, padding=0) + self.bn2 = bn_with_initialize(out_chls) + + self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0) + self.bn3 = bn_with_initialize_last(out_channels) + + self.relu = P.ReLU() + self.downSample = down_sample + + self.conv_down_sample = conv1x1(in_channels, out_channels, stride=stride, padding=0) + self.bn_down_sample = bn_with_initialize(out_channels) + self.add = P.TensorAdd() + + def construct(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + identity = self.conv_down_sample(identity) + identity = self.bn_down_sample(identity) + + out = self.add(out, identity) + out = self.relu(out) + + return out + + +class MakeLayer0(nn.Cell): + + def __init__(self, block, layer_num, in_channels, out_channels, stride): + super(MakeLayer0, self).__init__() + self.a = ResidualBlockWithDown(in_channels, out_channels, stride=1, down_sample=True) + self.b = block(out_channels, out_channels, stride=stride) + self.c = block(out_channels, out_channels, stride=1) + + def construct(self, x): + x = self.a(x) + x = self.b(x) + x = self.c(x) + + return x + + +class MakeLayer1(nn.Cell): + + def __init__(self, block, layer_num, in_channels, out_channels, stride): + super(MakeLayer1, self).__init__() + self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True) + self.b = block(out_channels, out_channels, stride=1) + self.c = block(out_channels, out_channels, stride=1) + self.d = block(out_channels, out_channels, stride=1) + + def construct(self, x): + x = self.a(x) + x = self.b(x) + x = self.c(x) + x = self.d(x) + + return x + + +class MakeLayer2(nn.Cell): + + def __init__(self, block, layer_num, in_channels, out_channels, stride): + super(MakeLayer2, self).__init__() + self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True) + self.b = block(out_channels, out_channels, stride=1) + self.c = block(out_channels, out_channels, stride=1) + self.d = block(out_channels, out_channels, stride=1) + self.e = block(out_channels, out_channels, stride=1) + self.f = block(out_channels, out_channels, stride=1) + + def construct(self, x): + x = self.a(x) + x = self.b(x) + x = self.c(x) + x = self.d(x) + x = self.e(x) + x = self.f(x) + + return x + + +class MakeLayer3(nn.Cell): + + def __init__(self, block, layer_num, in_channels, out_channels, stride): + super(MakeLayer3, self).__init__() + self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True) + self.b = block(out_channels, out_channels, stride=1) + self.c = block(out_channels, out_channels, stride=1) + + def construct(self, x): + x = self.a(x) + x = self.b(x) + x = self.c(x) + + return x + + +class ResNet(nn.Cell): + + def __init__(self, block, layer_num, num_classes=100): + super(ResNet, self).__init__() + self.num_classes = num_classes + + self.conv1 = conv7x7(3, 64, stride=2, padding=0) + + self.bn1 = bn_with_initialize(64) + self.relu = P.ReLU() + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same") + + self.layer1 = MakeLayer0(block, layer_num[0], in_channels=64, out_channels=256, stride=1) + self.layer2 = MakeLayer1(block, layer_num[1], in_channels=256, out_channels=512, stride=2) + self.layer3 = MakeLayer2(block, layer_num[2], in_channels=512, out_channels=1024, stride=2) + self.layer4 = MakeLayer3(block, layer_num[3], in_channels=1024, out_channels=2048, stride=2) + + self.pool = P.ReduceMean(keep_dims=True) + self.squeeze = P.Squeeze(axis=(2, 3)) + self.fc = fc_with_initialize(512*block.expansion, num_classes) + + def construct(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.pool(x, (2, 3)) + x = self.squeeze(x) + x = self.fc(x) + return x + + +def resnet50_cifar10(num_classes): + return ResNet(ResidualBlock, [3, 4, 6, 3], num_classes) diff --git a/tests/st/resnet50/test_cifar10_attack_fgsm.py b/tests/st/resnet50/test_cifar10_attack_fgsm.py new file mode 100644 index 0000000..51e741b --- /dev/null +++ b/tests/st/resnet50/test_cifar10_attack_fgsm.py @@ -0,0 +1,76 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Fuction: + Test fgsm attack about resnet50 network +Usage: + py.test test_cifar10_attack_fgsm.py +""" +import os +import numpy as np + +import pytest + +from mindspore import Tensor +from mindspore import context +from mindspore.nn import Cell +from mindspore.common import dtype as mstype +from mindspore.ops import operations as P +from mindspore.ops import functional as F + +from mindarmour.attacks.gradient_method import FastGradientSignMethod + +from resnet_cifar10 import resnet50_cifar10 + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + + +class CrossEntropyLoss(Cell): + def __init__(self): + super(CrossEntropyLoss, self).__init__() + self.cross_entropy = P.SoftmaxCrossEntropyWithLogits() + self.mean = P.ReduceMean() + self.one_hot = P.OneHot() + self.on_value = Tensor(1.0, mstype.float32) + self.off_value = Tensor(0.0, mstype.float32) + + def construct(self, logits, label): + label = self.one_hot(label, F.shape(logits)[1], self.on_value, self.off_value) + loss = self.cross_entropy(logits, label)[0] + loss = self.mean(loss, (-1,)) + return loss + + +@pytest.mark.level0 +@pytest.mark.env_single +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_ascend_inference +def test_fast_gradient_sign_method(): + """ + FGSM-Attack test + """ + context.set_context(mode=context.GRAPH_MODE) + # get network + net = resnet50_cifar10(10) + + # create test data + test_images = np.random.rand(64, 3, 224, 224).astype(np.float32) + test_labels = np.random.randint(10, size=64).astype(np.int32) + # attacking + loss_fn = CrossEntropyLoss() + attack = FastGradientSignMethod(net, eps=0.1, loss_fn=loss_fn) + adv_data = attack.batch_generate(test_images, test_labels, batch_size=32) + assert np.any(adv_data != test_images) diff --git a/tests/ut/python/attacks/black/test_genetic_attack.py b/tests/ut/python/attacks/black/test_genetic_attack.py new file mode 100644 index 0000000..8ae7fb7 --- /dev/null +++ b/tests/ut/python/attacks/black/test_genetic_attack.py @@ -0,0 +1,144 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Genetic-Attack test. +""" +import numpy as np +import pytest + +import mindspore.ops.operations as M +from mindspore import Tensor +from mindspore.nn import Cell +from mindspore import context + +from mindarmour.attacks.black.genetic_attack import GeneticAttack +from mindarmour.attacks.black.black_model import BlackModel + + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + +# for user +class ModelToBeAttacked(BlackModel): + """model to be attack""" + + def __init__(self, network): + super(ModelToBeAttacked, self).__init__() + self._network = network + + def predict(self, inputs): + """predict""" + result = self._network(Tensor(inputs.astype(np.float32))) + return result.asnumpy() + + +class SimpleNet(Cell): + """ + Construct the network of target model. + + Examples: + >>> net = SimpleNet() + """ + + def __init__(self): + """ + Introduce the layers used for network construction. + """ + super(SimpleNet, self).__init__() + self._softmax = M.Softmax() + + def construct(self, inputs): + """ + Construct network. + + Args: + inputs (Tensor): Input data. + """ + out = self._softmax(inputs) + return out + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_genetic_attack(): + """ + Genetic_Attack test + """ + batch_size = 6 + + net = SimpleNet() + inputs = np.random.rand(batch_size, 10) + + model = ModelToBeAttacked(net) + labels = np.random.randint(low=0, high=10, size=batch_size) + labels = np.eye(10)[labels] + labels = labels.astype(np.float32) + + attack = GeneticAttack(model, pop_size=6, mutation_rate=0.05, + per_bounds=0.1, step_size=0.25, temp=0.1, + sparse=False) + _, adv_data, _ = attack.generate(inputs, labels) + assert np.any(inputs != adv_data) + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_supplement(): + batch_size = 6 + + net = SimpleNet() + inputs = np.random.rand(batch_size, 10) + + model = ModelToBeAttacked(net) + labels = np.random.randint(low=0, high=10, size=batch_size) + labels = np.eye(10)[labels] + labels = labels.astype(np.float32) + + attack = GeneticAttack(model, pop_size=6, mutation_rate=0.05, + per_bounds=0.1, step_size=0.25, temp=0.1, + adaptive=True, + sparse=False) + # raise error + _, adv_data, _ = attack.generate(inputs, labels) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_value_error(): + """test that exception is raised for invalid labels""" + batch_size = 6 + + net = SimpleNet() + inputs = np.random.rand(batch_size, 10) + + model = ModelToBeAttacked(net) + labels = np.random.randint(low=0, high=10, size=batch_size) + # labels = np.eye(10)[labels] + labels = labels.astype(np.float32) + + attack = GeneticAttack(model, pop_size=6, mutation_rate=0.05, + per_bounds=0.1, step_size=0.25, temp=0.1, + adaptive=True, + sparse=False) + # raise error + with pytest.raises(ValueError) as e: + assert attack.generate(inputs, labels) diff --git a/tests/ut/python/attacks/black/test_hsja.py b/tests/ut/python/attacks/black/test_hsja.py new file mode 100644 index 0000000..c67354a --- /dev/null +++ b/tests/ut/python/attacks/black/test_hsja.py @@ -0,0 +1,166 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import os +import numpy as np +import pytest + +from mindspore import Tensor +from mindspore import context +from mindspore.train.serialization import load_checkpoint, load_param_into_net + +from mindarmour.attacks.black.hop_skip_jump_attack import HopSkipJumpAttack +from mindarmour.attacks.black.black_model import BlackModel + +from mindarmour.utils.logger import LogUtil +sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), + "../../../../../")) +from example.mnist_demo.lenet5_net import LeNet5 + +context.set_context(mode=context.GRAPH_MODE) +context.set_context(device_target="Ascend") + +LOGGER = LogUtil.get_instance() +TAG = 'HopSkipJumpAttack' + + +class ModelToBeAttacked(BlackModel): + """model to be attack""" + + def __init__(self, network): + super(ModelToBeAttacked, self).__init__() + self._network = network + + def predict(self, inputs): + """predict""" + if len(inputs.shape) == 3: + inputs = inputs[np.newaxis, :] + result = self._network(Tensor(inputs.astype(np.float32))) + return result.asnumpy() + + +def random_target_labels(true_labels): + target_labels = [] + for label in true_labels: + while True: + target_label = np.random.randint(0, 10) + if target_label != label: + target_labels.append(target_label) + break + return target_labels + + +def create_target_images(dataset, data_labels, target_labels): + res = [] + for label in target_labels: + for i in range(len(data_labels)): + if data_labels[i] == label: + res.append(dataset[i]) + break + return np.array(res) + +# public variable +def get_model(): + # upload trained network + current_dir = os.path.dirname(os.path.abspath(__file__)) + ckpt_name = os.path.join(current_dir, + '../../test_data/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt') + net = LeNet5() + load_dict = load_checkpoint(ckpt_name) + load_param_into_net(net, load_dict) + net.set_train(False) + model = ModelToBeAttacked(net) + return model + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_hsja_mnist_attack(): + """ + hsja-Attack test + """ + current_dir = os.path.dirname(os.path.abspath(__file__)) + + + # get test data + test_images_set = np.load(os.path.join(current_dir, + '../../test_data/test_images.npy')) + test_labels_set = np.load(os.path.join(current_dir, + '../../test_data/test_labels.npy')) + # prediction accuracy before attack + model = get_model() + batch_num = 1 # the number of batches of attacking samples + predict_labels = [] + i = 0 + + for img in test_images_set: + i += 1 + pred_labels = np.argmax(model.predict(img), axis=1) + predict_labels.append(pred_labels) + if i >= batch_num: + break + predict_labels = np.concatenate(predict_labels) + true_labels = test_labels_set[:batch_num] + accuracy = np.mean(np.equal(predict_labels, true_labels)) + LOGGER.info(TAG, "prediction accuracy before attacking is : %s", + accuracy) + test_images = test_images_set[:batch_num] + + # attacking + norm = 'l2' + search = 'grid_search' + target = False + + attack = HopSkipJumpAttack(model, constraint=norm, stepsize_search=search) + if target: + target_labels = random_target_labels(true_labels) + target_images = create_target_images(test_images_set, test_labels_set, + target_labels) + LOGGER.info(TAG, 'len target labels : %s', len(target_labels)) + LOGGER.info(TAG, 'len target_images : %s', len(target_images)) + LOGGER.info(TAG, 'len test_images : %s', len(test_images)) + attack.set_target_images(target_images) + success_list, adv_data, _ = attack.generate(test_images, target_labels) + else: + success_list, adv_data, query_list = attack.generate(test_images, None) + assert (adv_data != test_images).any() + + adv_datas = [] + gts = [] + for success, adv, gt in zip(success_list, adv_data, true_labels): + if success: + adv_datas.append(adv) + gts.append(gt) + if len(gts) > 0: + adv_datas = np.concatenate(np.asarray(adv_datas), axis=0) + gts = np.asarray(gts) + pred_logits_adv = model.predict(adv_datas) + pred_lables_adv = np.argmax(pred_logits_adv, axis=1) + accuracy_adv = np.mean(np.equal(pred_lables_adv, gts)) + LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s', + accuracy_adv) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_value_error(): + model = get_model() + norm = 'l2' + with pytest.raises(ValueError) as e: + assert HopSkipJumpAttack(model, constraint=norm, stepsize_search='bad-search') diff --git a/tests/ut/python/attacks/black/test_nes.py b/tests/ut/python/attacks/black/test_nes.py new file mode 100644 index 0000000..33f0f3d --- /dev/null +++ b/tests/ut/python/attacks/black/test_nes.py @@ -0,0 +1,217 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import numpy as np +import os +import pytest + +from mindspore import Tensor +from mindspore import context +from mindspore.train.serialization import load_checkpoint, load_param_into_net + +from mindarmour.attacks.black.natural_evolutionary_strategy import NES +from mindarmour.attacks.black.black_model import BlackModel + +from mindarmour.utils.logger import LogUtil +sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), + "../../../../../")) +from example.mnist_demo.lenet5_net import LeNet5 + +context.set_context(mode=context.GRAPH_MODE) +context.set_context(device_target="Ascend") + +LOGGER = LogUtil.get_instance() +TAG = 'HopSkipJumpAttack' + + +class ModelToBeAttacked(BlackModel): + """model to be attack""" + + def __init__(self, network): + super(ModelToBeAttacked, self).__init__() + self._network = network + + def predict(self, inputs): + """predict""" + if len(inputs.shape) == 3: + inputs = inputs[np.newaxis, :] + result = self._network(Tensor(inputs.astype(np.float32))) + return result.asnumpy() + + +def random_target_labels(true_labels): + target_labels = [] + for label in true_labels: + while True: + target_label = np.random.randint(0, 10) + if target_label != label: + target_labels.append(target_label) + break + return target_labels + + +def _pseudorandom_target(index, total_indices, true_class): + """ pseudo random_target """ + rng = np.random.RandomState(index) + target = true_class + while target == true_class: + target = rng.randint(0, total_indices) + return target + + +def create_target_images(dataset, data_labels, target_labels): + res = [] + for label in target_labels: + for i in range(len(data_labels)): + if data_labels[i] == label: + res.append(dataset[i]) + break + return np.array(res) + +def get_model(current_dir): + ckpt_name = os.path.join(current_dir, + '../../test_data/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt') + net = LeNet5() + load_dict = load_checkpoint(ckpt_name) + load_param_into_net(net, load_dict) + net.set_train(False) + model = ModelToBeAttacked(net) + return model + +def get_dataset(current_dir): + # upload trained network + + # get test data + test_images = np.load(os.path.join(current_dir, + '../../test_data/test_images.npy')) + test_labels = np.load(os.path.join(current_dir, + '../../test_data/test_labels.npy')) + return test_images, test_labels +def nes_mnist_attack(scene, top_k): + """ + hsja-Attack test + """ + current_dir = os.path.dirname(os.path.abspath(__file__)) + test_images, test_labels = get_dataset(current_dir) + model = get_model(current_dir) + # prediction accuracy before attack + batch_num = 5 # the number of batches of attacking samples + predict_labels = [] + i = 0 + for img in test_images: + i += 1 + pred_labels = np.argmax(model.predict(img), axis=1) + predict_labels.append(pred_labels) + if i >= batch_num: + break + predict_labels = np.concatenate(predict_labels) + true_labels = test_labels + accuracy = np.mean(np.equal(predict_labels, true_labels[:batch_num])) + LOGGER.info(TAG, "prediction accuracy before attacking is : %s", + accuracy) + test_images = test_images + + # attacking + if scene == 'Query_Limit': + top_k = -1 + elif scene == 'Partial_Info': + top_k = top_k + elif scene == 'Label_Only': + top_k = top_k + + success = 0 + queries_num = 0 + + nes_instance = NES(model, scene, top_k=top_k) + test_length = 1 + advs = [] + for img_index in range(test_length): + # INITIAL IMAGE AND CLASS SELECTION + initial_img = test_images[img_index] + orig_class = true_labels[img_index] + initial_img = [initial_img] + target_class = random_target_labels([orig_class]) + target_image = create_target_images(test_images, true_labels, + target_class) + + nes_instance.set_target_images(target_image) + tag, adv, queries = nes_instance.generate(initial_img, target_class) + if tag[0]: + success += 1 + queries_num += queries[0] + advs.append(adv) + + advs = np.reshape(advs, (len(advs), 1, 32, 32)) + assert (advs != test_images[:batch_num]).any() + + adv_pred = np.argmax(model.predict(advs), axis=1) + adv_accuracy = np.mean(np.equal(adv_pred, true_labels[:test_length])) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_nes_query_limit(): + # scene is in ['Query_Limit', 'Partial_Info', 'Label_Only'] + scene = 'Query_Limit' + nes_mnist_attack(scene, top_k=-1) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_nes_partial_info(): + # scene is in ['Query_Limit', 'Partial_Info', 'Label_Only'] + scene = 'Partial_Info' + nes_mnist_attack(scene, top_k=5) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_nes_label_only(): + # scene is in ['Query_Limit', 'Partial_Info', 'Label_Only'] + scene = 'Label_Only' + nes_mnist_attack(scene, top_k=5) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_value_error(): + """test that exception is raised for invalid labels""" + with pytest.raises(ValueError): + assert nes_mnist_attack('Label_Only', -1) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_none(): + current_dir = os.path.dirname(os.path.abspath(__file__)) + model = get_model(current_dir) + test_images, test_labels = get_dataset(current_dir) + nes = NES(model, 'Partial_Info') + with pytest.raises(ValueError): + assert nes.generate(test_images, test_labels) diff --git a/tests/ut/python/attacks/black/test_pointwise_attack.py b/tests/ut/python/attacks/black/test_pointwise_attack.py new file mode 100644 index 0000000..7acd0f4 --- /dev/null +++ b/tests/ut/python/attacks/black/test_pointwise_attack.py @@ -0,0 +1,90 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +PointWise Attack test +""" +import sys +import os +import numpy as np +import pytest + + +from mindspore import Tensor +from mindspore import context +from mindspore.train.serialization import load_checkpoint, load_param_into_net + +from mindarmour.attacks.black.pointwise_attack import PointWiseAttack +from mindarmour.utils.logger import LogUtil +from mindarmour.attacks.black.black_model import BlackModel + +sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), + "../../../../../")) +from example.mnist_demo.lenet5_net import LeNet5 + + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + +LOGGER = LogUtil.get_instance() +TAG = 'Pointwise_Test' +LOGGER.set_level('INFO') + + +class ModelToBeAttacked(BlackModel): + """model to be attack""" + + def __init__(self, network): + super(ModelToBeAttacked, self).__init__() + self._network = network + + def predict(self, inputs): + """predict""" + result = self._network(Tensor(inputs.astype(np.float32))) + return result.asnumpy() + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_pointwise_attack_method(): + """ + Pointwise attack method unit test. + """ + np.random.seed(123) + # upload trained network + current_dir = os.path.dirname(os.path.abspath(__file__)) + ckpt_name = os.path.join(current_dir, + '../../test_data/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt') + net = LeNet5() + load_dict = load_checkpoint(ckpt_name) + load_param_into_net(net, load_dict) + + # get one mnist image + input_np = np.load(os.path.join(current_dir, + '../../test_data/test_images.npy'))[:3] + labels = np.load(os.path.join(current_dir, + '../../test_data/test_labels.npy'))[:3] + model = ModelToBeAttacked(net) + pre_label = np.argmax(model.predict(input_np), axis=1) + LOGGER.info(TAG, 'original sample predict labels are :{}'.format(pre_label)) + LOGGER.info(TAG, 'true labels are: {}'.format(labels)) + attack = PointWiseAttack(model, sparse=True, is_targeted=False) + is_adv, adv_data, query_times = attack.generate(input_np, pre_label) + LOGGER.info(TAG, 'adv sample predict labels are: {}' + .format(np.argmax(model.predict(adv_data), axis=1))) + + assert np.any(adv_data[is_adv][0] != input_np[is_adv][0]), 'Pointwise attack method: ' \ + 'generate value must not be equal' \ + ' to original value.' diff --git a/tests/ut/python/attacks/black/test_pso_attack.py b/tests/ut/python/attacks/black/test_pso_attack.py new file mode 100644 index 0000000..1763580 --- /dev/null +++ b/tests/ut/python/attacks/black/test_pso_attack.py @@ -0,0 +1,166 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +PSO-Attack test. +""" +import numpy as np +import pytest + +from mindspore import Tensor +import mindspore.nn as nn +from mindspore.nn import Cell +from mindspore import context + +from mindarmour.attacks.black.pso_attack import PSOAttack +from mindarmour.attacks.black.black_model import BlackModel + + +# for user +class ModelToBeAttacked(BlackModel): + """model to be attack""" + + def __init__(self, network): + super(ModelToBeAttacked, self).__init__() + self._network = network + + def predict(self, inputs): + """predict""" + result = self._network(Tensor(inputs.astype(np.float32))) + return result.asnumpy() + + +class SimpleNet(Cell): + """ + Construct the network of target model. + + Examples: + >>> net = SimpleNet() + """ + + def __init__(self): + """ + Introduce the layers used for network construction. + """ + super(SimpleNet, self).__init__() + + self._relu = nn.ReLU() + + def construct(self, inputs): + """ + Construct network. + + Args: + inputs (Tensor): Input data. + """ + out = self._relu(inputs) + return out + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_pso_attack(): + """ + PSO_Attack test + """ + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + batch_size = 6 + + net = SimpleNet() + inputs = np.random.rand(batch_size, 10) + + model = ModelToBeAttacked(net) + labels = np.random.randint(low=0, high=10, size=batch_size) + labels = np.eye(10)[labels] + labels = labels.astype(np.float32) + + attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False) + _, adv_data, _ = attack.generate(inputs, labels) + assert np.any(inputs != adv_data) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_pso_attack_targeted(): + """ + PSO_Attack test + """ + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + batch_size = 6 + + net = SimpleNet() + inputs = np.random.rand(batch_size, 10) + + model = ModelToBeAttacked(net) + labels = np.random.randint(low=0, high=10, size=batch_size) + labels = np.eye(10)[labels] + labels = labels.astype(np.float32) + + attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, targeted=True, + sparse=False) + _, adv_data, _ = attack.generate(inputs, labels) + assert np.any(inputs != adv_data) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_inference +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_pso_attack_gpu(): + """ + PSO_Attack test + """ + context.set_context(device_target="GPU") + batch_size = 6 + + net = SimpleNet() + inputs = np.random.rand(batch_size, 10) + + model = ModelToBeAttacked(net) + labels = np.random.randint(low=0, high=10, size=batch_size) + labels = np.eye(10)[labels] + labels = labels.astype(np.float32) + + attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False) + _, adv_data, _ = attack.generate(inputs, labels) + assert np.any(inputs != adv_data) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_pso_attack_cpu(): + """ + PSO_Attack test + """ + context.set_context(mode=context.GRAPH_MODE, device_target="CPU") + batch_size = 6 + + net = SimpleNet() + inputs = np.random.rand(batch_size, 10) + + model = ModelToBeAttacked(net) + labels = np.random.randint(low=0, high=10, size=batch_size) + labels = np.eye(10)[labels] + labels = labels.astype(np.float32) + + attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False) + _, adv_data, _ = attack.generate(inputs, labels) + assert np.any(inputs != adv_data) diff --git a/tests/ut/python/attacks/black/test_salt_and_pepper_attack.py b/tests/ut/python/attacks/black/test_salt_and_pepper_attack.py new file mode 100644 index 0000000..bed4141 --- /dev/null +++ b/tests/ut/python/attacks/black/test_salt_and_pepper_attack.py @@ -0,0 +1,123 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +SaltAndPepper Attack Test +""" +import numpy as np +import pytest + +import mindspore.ops.operations as M +from mindspore import Tensor +from mindspore.nn import Cell +from mindspore import context + +from mindarmour.attacks.black.salt_and_pepper_attack import \ + SaltAndPepperNoiseAttack +from mindarmour.attacks.black.black_model import BlackModel + +context.set_context(mode=context.GRAPH_MODE) +context.set_context(device_target="Ascend") + + +# for user +class ModelToBeAttacked(BlackModel): + """model to be attack""" + + def __init__(self, network): + super(ModelToBeAttacked, self).__init__() + self._network = network + + def predict(self, inputs): + """predict""" + result = self._network(Tensor(inputs.astype(np.float32))) + return result.asnumpy() + + +# for user +class SimpleNet(Cell): + """ + Construct the network of target model. + + Examples: + >>> net = SimpleNet() + """ + + def __init__(self): + """ + Introduce the layers used for network construction. + """ + super(SimpleNet, self).__init__() + self._softmax = M.Softmax() + + def construct(self, inputs): + """ + Construct network. + + Args: + inputs (Tensor): Input data. + """ + out = self._softmax(inputs) + return out + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_salt_and_pepper_attack_method(): + """ + Salt and pepper attack method unit test. + """ + batch_size = 6 + np.random.seed(123) + net = SimpleNet() + inputs = np.random.rand(batch_size, 10) + + model = ModelToBeAttacked(net) + labels = np.random.randint(low=0, high=10, size=batch_size) + labels = np.eye(10)[labels] + labels = labels.astype(np.float32) + + attack = SaltAndPepperNoiseAttack(model, sparse=False) + is_adv, adv_data, query_times = attack.generate(inputs, labels) + assert np.any(adv_data[0] != inputs[0]), 'Salt and pepper attack method: ' \ + 'generate value must not be equal' \ + ' to original value.' + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_salt_and_pepper_attack_in_batch(): + """ + Salt and pepper attack method unit test in batch. + """ + batch_size = 32 + np.random.seed(123) + net = SimpleNet() + inputs = np.random.rand(batch_size*2, 10) + + model = ModelToBeAttacked(net) + labels = np.random.randint(low=0, high=10, size=batch_size*2) + labels = np.eye(10)[labels] + labels = labels.astype(np.float32) + + attack = SaltAndPepperNoiseAttack(model, sparse=False) + adv_data = attack.batch_generate(inputs, labels, batch_size=32) + assert np.any(adv_data[0] != inputs[0]), 'Salt and pepper attack method: ' \ + 'generate value must not be equal' \ + ' to original value.' diff --git a/tests/ut/python/attacks/test_batch_generate_attack.py b/tests/ut/python/attacks/test_batch_generate_attack.py new file mode 100644 index 0000000..6855e72 --- /dev/null +++ b/tests/ut/python/attacks/test_batch_generate_attack.py @@ -0,0 +1,74 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Batch-generate-attack test. +""" +import numpy as np +import pytest + +import mindspore.ops.operations as P +from mindspore.nn import Cell +import mindspore.context as context + +from mindarmour.attacks.gradient_method import FastGradientMethod + + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + +# for user +class Net(Cell): + """ + Construct the network of target model. + + Examples: + >>> net = Net() + """ + + def __init__(self): + """ + Introduce the layers used for network construction. + """ + super(Net, self).__init__() + self._softmax = P.Softmax() + + def construct(self, inputs): + """ + Construct network. + + Args: + inputs (Tensor): Input data. + """ + out = self._softmax(inputs) + return out + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_batch_generate_attack(): + """ + Attack with batch-generate. + """ + input_np = np.random.random((128, 10)).astype(np.float32) + label = np.random.randint(0, 10, 128).astype(np.int32) + label = np.eye(10)[label].astype(np.float32) + + attack = FastGradientMethod(Net()) + ms_adv_x = attack.batch_generate(input_np, label, batch_size=32) + + assert np.any(ms_adv_x != input_np), 'Fast gradient method: generate value' \ + ' must not be equal to original value.' diff --git a/tests/ut/python/attacks/test_cw.py b/tests/ut/python/attacks/test_cw.py new file mode 100644 index 0000000..7135e9b --- /dev/null +++ b/tests/ut/python/attacks/test_cw.py @@ -0,0 +1,90 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +CW-Attack test. +""" +import numpy as np +import pytest + +import mindspore.ops.operations as M +from mindspore.nn import Cell +from mindspore import context + +from mindarmour.attacks.carlini_wagner import CarliniWagnerL2Attack + + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + +# for user +class Net(Cell): + """ + Construct the network of target model. + + Examples: + >>> net = Net() + """ + + def __init__(self): + """ + Introduce the layers used for network construction. + """ + super(Net, self).__init__() + self._softmax = M.Softmax() + + def construct(self, inputs): + """ + Construct network. + + Args: + inputs (Tensor): Input data. + """ + out = self._softmax(inputs) + return out + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_cw_attack(): + """ + CW-Attack test + """ + net = Net() + input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32) + label_np = np.array([3]).astype(np.int64) + num_classes = input_np.shape[1] + attack = CarliniWagnerL2Attack(net, num_classes, targeted=False) + adv_data = attack.generate(input_np, label_np) + assert np.any(input_np != adv_data) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_cw_attack_targeted(): + """ + CW-Attack test + """ + net = Net() + input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32) + target_np = np.array([1]).astype(np.int64) + num_classes = input_np.shape[1] + attack = CarliniWagnerL2Attack(net, num_classes, targeted=True) + adv_data = attack.generate(input_np, target_np) + assert np.any(input_np != adv_data) diff --git a/tests/ut/python/attacks/test_deep_fool.py b/tests/ut/python/attacks/test_deep_fool.py new file mode 100644 index 0000000..74f94b1 --- /dev/null +++ b/tests/ut/python/attacks/test_deep_fool.py @@ -0,0 +1,119 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +DeepFool-Attack test. +""" +import numpy as np +import pytest + +import mindspore.ops.operations as M +from mindspore.nn import Cell +from mindspore import context +from mindspore import Tensor + +from mindarmour.attacks.deep_fool import DeepFool + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + +# for user +class Net(Cell): + """ + Construct the network of target model. + + Examples: + >>> net = Net() + """ + + def __init__(self): + """ + Introduce the layers used for network construction. + """ + super(Net, self).__init__() + self._softmax = M.Softmax() + + def construct(self, inputs): + """ + Construct network. + + Args: + inputs (Tensor): Input data. + """ + out = self._softmax(inputs) + return out + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_deepfool_attack(): + """ + Deepfool-Attack test + """ + net = Net() + input_shape = (1, 5) + _, classes = input_shape + input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32) + input_me = Tensor(input_np) + true_labels = np.argmax(net(input_me).asnumpy(), axis=1) + attack = DeepFool(net, classes, max_iters=10, norm_level=2, + bounds=(0.0, 1.0)) + adv_data = attack.generate(input_np, true_labels) + # expected adv value + expect_value = np.asarray([[0.10300991, 0.20332647, 0.59308802, 0.59651263, + 0.40406296]]) + assert np.allclose(adv_data, expect_value), 'mindspore deepfool_method' \ + ' implementation error, ms_adv_x != expect_value' + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_deepfool_attack_inf(): + """ + Deepfool-Attack test + """ + net = Net() + input_shape = (1, 5) + _, classes = input_shape + input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32) + input_me = Tensor(input_np) + true_labels = np.argmax(net(input_me).asnumpy(), axis=1) + attack = DeepFool(net, classes, max_iters=10, norm_level=np.inf, + bounds=(0.0, 1.0)) + adv_data = attack.generate(input_np, true_labels) + assert np.any(input_np != adv_data) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_value_error(): + net = Net() + input_shape = (1, 5) + _, classes = input_shape + input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32) + input_me = Tensor(input_np) + true_labels = np.argmax(net(input_me).asnumpy(), axis=1) + with pytest.raises(NotImplementedError): + # norm_level=0 is not available + attack = DeepFool(net, classes, max_iters=10, norm_level=1, + bounds=(0.0, 1.0)) + assert attack.generate(input_np, true_labels) diff --git a/tests/ut/python/attacks/test_gradient_method.py b/tests/ut/python/attacks/test_gradient_method.py new file mode 100644 index 0000000..bab0a04 --- /dev/null +++ b/tests/ut/python/attacks/test_gradient_method.py @@ -0,0 +1,242 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Gradient-Attack test. +""" +import numpy as np +import pytest + +import mindspore.nn as nn +from mindspore.nn import Cell +import mindspore.context as context +from mindspore.nn import SoftmaxCrossEntropyWithLogits + +from mindarmour.attacks.gradient_method import FastGradientMethod +from mindarmour.attacks.gradient_method import FastGradientSignMethod +from mindarmour.attacks.gradient_method import LeastLikelyClassMethod +from mindarmour.attacks.gradient_method import RandomFastGradientMethod +from mindarmour.attacks.gradient_method import RandomFastGradientSignMethod +from mindarmour.attacks.gradient_method import RandomLeastLikelyClassMethod + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + +# for user +class Net(Cell): + """ + Construct the network of target model. + + Examples: + >>> net = Net() + """ + + def __init__(self): + """ + Introduce the layers used for network construction. + """ + super(Net, self).__init__() + self._relu = nn.ReLU() + + def construct(self, inputs): + """ + Construct network. + + Args: + inputs (Tensor): Input data. + """ + out = self._relu(inputs) + return out + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_fast_gradient_method(): + """ + Fast gradient method unit test. + """ + input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32) + label = np.asarray([2], np.int32) + label = np.eye(3)[label].astype(np.float32) + + attack = FastGradientMethod(Net()) + ms_adv_x = attack.generate(input_np, label) + + assert np.any(ms_adv_x != input_np), 'Fast gradient method: generate value' \ + ' must not be equal to original value.' + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_inference +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_fast_gradient_method_gpu(): + """ + Fast gradient method unit test. + """ + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32) + label = np.asarray([2], np.int32) + label = np.eye(3)[label].astype(np.float32) + + attack = FastGradientMethod(Net()) + ms_adv_x = attack.generate(input_np, label) + + assert np.any(ms_adv_x != input_np), 'Fast gradient method: generate value' \ + ' must not be equal to original value.' + + +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_fast_gradient_method_cpu(): + """ + Fast gradient method unit test. + """ + context.set_context(mode=context.GRAPH_MODE, device_target="CPU") + input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32) + label = np.asarray([2], np.int32) + + loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + attack = FastGradientMethod(Net(), loss_fn=loss) + ms_adv_x = attack.generate(input_np, label) + + assert np.any(ms_adv_x != input_np), 'Fast gradient method: generate value' \ + ' must not be equal to original value.' + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_random_fast_gradient_method(): + """ + Random fast gradient method unit test. + """ + input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32) + label = np.asarray([2], np.int32) + label = np.eye(3)[label].astype(np.float32) + + attack = RandomFastGradientMethod(Net()) + ms_adv_x = attack.generate(input_np, label) + + assert np.any(ms_adv_x != input_np), 'Random fast gradient method: ' \ + 'generate value must not be equal to' \ + ' original value.' + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_fast_gradient_sign_method(): + """ + Fast gradient sign method unit test. + """ + input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32) + label = np.asarray([2], np.int32) + label = np.eye(3)[label].astype(np.float32) + + attack = FastGradientSignMethod(Net()) + ms_adv_x = attack.generate(input_np, label) + + assert np.any(ms_adv_x != input_np), 'Fast gradient sign method: generate' \ + ' value must not be equal to' \ + ' original value.' + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_random_fast_gradient_sign_method(): + """ + Random fast gradient sign method unit test. + """ + input_np = np.random.random((1, 28)).astype(np.float32) + label = np.asarray([2], np.int32) + label = np.eye(28)[label].astype(np.float32) + + attack = RandomFastGradientSignMethod(Net()) + ms_adv_x = attack.generate(input_np, label) + + assert np.any(ms_adv_x != input_np), 'Random fast gradient sign method: ' \ + 'generate value must not be equal to' \ + ' original value.' + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_least_likely_class_method(): + """ + Least likely class method unit test. + """ + input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32) + label = np.asarray([2], np.int32) + label = np.eye(3)[label].astype(np.float32) + + attack = LeastLikelyClassMethod(Net()) + ms_adv_x = attack.generate(input_np, label) + + assert np.any(ms_adv_x != input_np), 'Least likely class method: generate' \ + ' value must not be equal to' \ + ' original value.' + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_random_least_likely_class_method(): + """ + Random least likely class method unit test. + """ + input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32) + label = np.asarray([2], np.int32) + label = np.eye(3)[label].astype(np.float32) + + attack = RandomLeastLikelyClassMethod(Net(), eps=0.1, alpha=0.01) + ms_adv_x = attack.generate(input_np, label) + + assert np.any(ms_adv_x != input_np), 'Random least likely class method: ' \ + 'generate value must not be equal to' \ + ' original value.' + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_assert_error(): + """ + Random least likely class method unit test. + """ + input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32) + label = np.asarray([2], np.int32) + label = np.eye(3)[label].astype(np.float32) + + with pytest.raises(ValueError) as e: + assert RandomLeastLikelyClassMethod(Net(), eps=0.05, alpha=0.21) + assert str(e.value) == 'eps must be larger than alpha!' diff --git a/tests/ut/python/attacks/test_iterative_gradient_method.py b/tests/ut/python/attacks/test_iterative_gradient_method.py new file mode 100644 index 0000000..8a0b580 --- /dev/null +++ b/tests/ut/python/attacks/test_iterative_gradient_method.py @@ -0,0 +1,136 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Iterative-gradient Attack test. +""" +import numpy as np +import pytest + +from mindspore.ops import operations as P +from mindspore.nn import Cell +from mindspore import context + +from mindarmour.attacks import BasicIterativeMethod +from mindarmour.attacks import MomentumIterativeMethod +from mindarmour.attacks import ProjectedGradientDescent +from mindarmour.attacks import IterativeGradientMethod + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + +# for user +class Net(Cell): + """ + Construct the network of target model. + + Examples: + >>> net = Net() + """ + + def __init__(self): + super(Net, self).__init__() + self._softmax = P.Softmax() + + def construct(self, inputs): + """ + Construct network. + + Args: + inputs (Tensor): Input data. + """ + out = self._softmax(inputs) + return out + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_basic_iterative_method(): + """ + Basic iterative method unit test. + """ + input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32) + label = np.asarray([2], np.int32) + label = np.eye(3)[label].astype(np.float32) + + for i in range(5): + net = Net() + attack = BasicIterativeMethod(net, nb_iter=i + 1) + ms_adv_x = attack.generate(input_np, label) + assert np.any( + ms_adv_x != input_np), 'Basic iterative method: generate value' \ + ' must not be equal to original value.' + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_momentum_iterative_method(): + """ + Momentum iterative method unit test. + """ + input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32) + label = np.asarray([2], np.int32) + label = np.eye(3)[label].astype(np.float32) + + for i in range(5): + attack = MomentumIterativeMethod(Net(), nb_iter=i + 1) + ms_adv_x = attack.generate(input_np, label) + assert np.any(ms_adv_x != input_np), 'Basic iterative method: generate' \ + ' value must not be equal to' \ + ' original value.' + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_projected_gradient_descent_method(): + """ + Projected gradient descent method unit test. + """ + input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32) + label = np.asarray([2], np.int32) + label = np.eye(3)[label].astype(np.float32) + + for i in range(5): + attack = ProjectedGradientDescent(Net(), nb_iter=i + 1) + ms_adv_x = attack.generate(input_np, label) + + assert np.any( + ms_adv_x != input_np), 'Projected gradient descent method: ' \ + 'generate value must not be equal to' \ + ' original value.' + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_error(): + with pytest.raises(ValueError): + # check_param_multi_types + assert IterativeGradientMethod(Net(), bounds=None) + attack = IterativeGradientMethod(Net(), bounds=(0.0, 1.0)) + with pytest.raises(NotImplementedError): + input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32) + label = np.asarray([2], np.int32) + label = np.eye(3)[label].astype(np.float32) + assert attack.generate(input_np, label) diff --git a/tests/ut/python/attacks/test_jsma.py b/tests/ut/python/attacks/test_jsma.py new file mode 100644 index 0000000..ef76125 --- /dev/null +++ b/tests/ut/python/attacks/test_jsma.py @@ -0,0 +1,161 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +JSMA-Attack test. +""" +import numpy as np +import pytest + +import mindspore.nn as nn +from mindspore.nn import Cell +from mindspore import context +from mindspore import Tensor +from mindarmour.attacks.jsma import JSMAAttack + + +# for user +class Net(Cell): + """ + Construct the network of target model. + + Examples: + >>> net = Net() + """ + + def __init__(self): + """ + Introduce the layers used for network construction. + """ + super(Net, self).__init__() + self._relu = nn.ReLU() + + def construct(self, inputs): + """ + Construct network. + + Args: + inputs (Tensor): Input data. + """ + out = self._relu(inputs) + return out + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_jsma_attack(): + """ + JSMA-Attack test + """ + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + net = Net() + input_shape = (1, 5) + batch_size, classes = input_shape + np.random.seed(5) + input_np = np.random.random(input_shape).astype(np.float32) + label_np = np.random.randint(classes, size=batch_size) + ori_label = np.argmax(net(Tensor(input_np)).asnumpy(), axis=1) + for i in range(batch_size): + if label_np[i] == ori_label[i]: + if label_np[i] < classes - 1: + label_np[i] += 1 + else: + label_np[i] -= 1 + attack = JSMAAttack(net, classes, max_iteration=5) + adv_data = attack.generate(input_np, label_np) + assert np.any(input_np != adv_data) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_jsma_attack_2(): + """ + JSMA-Attack test + """ + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + net = Net() + input_shape = (1, 5) + batch_size, classes = input_shape + np.random.seed(5) + input_np = np.random.random(input_shape).astype(np.float32) + label_np = np.random.randint(classes, size=batch_size) + ori_label = np.argmax(net(Tensor(input_np)).asnumpy(), axis=1) + for i in range(batch_size): + if label_np[i] == ori_label[i]: + if label_np[i] < classes - 1: + label_np[i] += 1 + else: + label_np[i] -= 1 + attack = JSMAAttack(net, classes, max_iteration=5, increase=False) + adv_data = attack.generate(input_np, label_np) + assert np.any(input_np != adv_data) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_inference +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_jsma_attack_gpu(): + """ + JSMA-Attack test + """ + context.set_context(device_target="GPU") + net = Net() + input_shape = (1, 5) + batch_size, classes = input_shape + np.random.seed(5) + input_np = np.random.random(input_shape).astype(np.float32) + label_np = np.random.randint(classes, size=batch_size) + ori_label = np.argmax(net(Tensor(input_np)).asnumpy(), axis=1) + for i in range(batch_size): + if label_np[i] == ori_label[i]: + if label_np[i] < classes - 1: + label_np[i] += 1 + else: + label_np[i] -= 1 + attack = JSMAAttack(net, classes, max_iteration=5) + adv_data = attack.generate(input_np, label_np) + assert np.any(input_np != adv_data) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_jsma_attack_cpu(): + """ + JSMA-Attack test + """ + context.set_context(mode=context.GRAPH_MODE, device_target="CPU") + net = Net() + input_shape = (1, 5) + batch_size, classes = input_shape + np.random.seed(5) + input_np = np.random.random(input_shape).astype(np.float32) + label_np = np.random.randint(classes, size=batch_size) + ori_label = np.argmax(net(Tensor(input_np)).asnumpy(), axis=1) + for i in range(batch_size): + if label_np[i] == ori_label[i]: + if label_np[i] < classes - 1: + label_np[i] += 1 + else: + label_np[i] -= 1 + attack = JSMAAttack(net, classes, max_iteration=5) + adv_data = attack.generate(input_np, label_np) + assert np.any(input_np != adv_data) diff --git a/tests/ut/python/attacks/test_lbfgs.py b/tests/ut/python/attacks/test_lbfgs.py new file mode 100644 index 0000000..649ea1f --- /dev/null +++ b/tests/ut/python/attacks/test_lbfgs.py @@ -0,0 +1,72 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +LBFGS-Attack test. +""" +import sys +import numpy as np +import pytest +import os + +from mindspore import context +from mindspore.train.serialization import load_checkpoint, load_param_into_net + +from mindarmour.attacks.lbfgs import LBFGS +from mindarmour.utils.logger import LogUtil + +sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), + "../../../../")) +from example.mnist_demo.lenet5_net import LeNet5 + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + +LOGGER = LogUtil.get_instance() +TAG = 'LBFGS_Test' +LOGGER.set_level('DEBUG') + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_lbfgs_attack(): + """ + LBFGS-Attack test + """ + np.random.seed(123) + # upload trained network + current_dir = os.path.dirname(os.path.abspath(__file__)) + ckpt_name = os.path.join(current_dir, + '../test_data/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt') + net = LeNet5() + load_dict = load_checkpoint(ckpt_name) + load_param_into_net(net, load_dict) + + # get one mnist image + input_np = np.load(os.path.join(current_dir, + '../test_data/test_images.npy'))[:1] + label_np = np.load(os.path.join(current_dir, + '../test_data/test_labels.npy'))[:1] + LOGGER.debug(TAG, 'true label is :{}'.format(label_np[0])) + classes = 10 + target_np = np.random.randint(0, classes, 1) + while target_np == label_np[0]: + target_np = np.random.randint(0, classes) + target_np = np.eye(10)[target_np].astype(np.float32) + + attack = LBFGS(net, is_targeted=True) + LOGGER.debug(TAG, 'target_np is :{}'.format(target_np[0])) + adv_data = attack.generate(input_np, target_np) diff --git a/tests/ut/python/defenses/mock_net.py b/tests/ut/python/defenses/mock_net.py new file mode 100644 index 0000000..663b5a0 --- /dev/null +++ b/tests/ut/python/defenses/mock_net.py @@ -0,0 +1,107 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +mocked model for UT of defense algorithms. +""" +import numpy as np + +from mindspore import nn +from mindspore import Tensor +from mindspore.nn import Cell +from mindspore.nn import WithLossCell, TrainOneStepCell +from mindspore.nn.optim.momentum import Momentum +from mindspore.ops import operations as P +from mindspore import context +from mindspore.common.initializer import TruncatedNormal + +from mindarmour.attacks import FastGradientSignMethod + + +def conv(in_channels, out_channels, kernel_size, stride=1, padding=0): + weight = weight_variable() + return nn.Conv2d(in_channels, out_channels, + kernel_size=kernel_size, stride=stride, padding=padding, + weight_init=weight, has_bias=False, pad_mode="valid") + + +def fc_with_initialize(input_channels, out_channels): + weight = weight_variable() + bias = weight_variable() + return nn.Dense(input_channels, out_channels, weight, bias) + + +def weight_variable(): + return TruncatedNormal(0.02) + + +class Net(nn.Cell): + """ + Lenet network + """ + def __init__(self): + super(Net, self).__init__() + self.conv1 = conv(1, 6, 5) + self.conv2 = conv(6, 16, 5) + self.fc1 = fc_with_initialize(16*5*5, 120) + self.fc2 = fc_with_initialize(120, 84) + self.fc3 = fc_with_initialize(84, 10) + self.relu = nn.ReLU() + self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) + self.reshape = P.Reshape() + + def construct(self, x): + x = self.conv1(x) + x = self.relu(x) + x = self.max_pool2d(x) + x = self.conv2(x) + x = self.relu(x) + x = self.max_pool2d(x) + x = self.reshape(x, (-1, 16*5*5)) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.relu(x) + x = self.fc3(x) + return x + +if __name__ == '__main__': + num_classes = 10 + batch_size = 32 + + sparse = False + context.set_context(mode=context.GRAPH_MODE) + context.set_context(device_target='Ascend') + + # create test data + inputs_np = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) + labels_np = np.random.randint(num_classes, size=batch_size).astype(np.int32) + if not sparse: + labels_np = np.eye(num_classes)[labels_np].astype(np.float32) + + net = Net() + + # test fgsm + attack = FastGradientSignMethod(net, eps=0.3) + attack.generate(inputs_np, labels_np) + + # test train ops + loss_fn = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=sparse) + optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), + 0.01, 0.9) + loss_net = WithLossCell(net, loss_fn) + train_net = TrainOneStepCell(loss_net, optimizer) + train_net.set_train() + + train_net(Tensor(inputs_np), Tensor(labels_np)) + diff --git a/tests/ut/python/defenses/test_ad.py b/tests/ut/python/defenses/test_ad.py new file mode 100644 index 0000000..d90c853 --- /dev/null +++ b/tests/ut/python/defenses/test_ad.py @@ -0,0 +1,66 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Adversarial defense test. +""" +import numpy as np +import pytest +import logging + +from mindspore import nn +from mindspore import Tensor +from mindspore import context +from mindspore.nn.optim.momentum import Momentum + +from mindarmour.defenses.adversarial_defense import AdversarialDefense +from mindarmour.utils.logger import LogUtil + +from mock_net import Net + +LOGGER = LogUtil.get_instance() +TAG = 'Ad_Test' + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_ad(): + """UT for adversarial defense.""" + num_classes = 10 + batch_size = 16 + + sparse = False + context.set_context(mode=context.GRAPH_MODE) + context.set_context(device_target='Ascend') + + # create test data + inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) + labels = np.random.randint(num_classes, size=batch_size).astype(np.int32) + if not sparse: + labels = np.eye(num_classes)[labels].astype(np.float32) + + net = Net() + loss_fn = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=sparse) + optimizer = Momentum(learning_rate=Tensor(np.array([0.001], np.float32)), + momentum=Tensor(np.array([0.9], np.float32)), + params=net.trainable_params()) + + ad_defense = AdversarialDefense(net, loss_fn=loss_fn, optimizer=optimizer) + LOGGER.set_level(logging.DEBUG) + LOGGER.debug(TAG, '--start adversarial defense--') + loss = ad_defense.defense(inputs, labels) + LOGGER.debug(TAG, '--end adversarial defense--') + assert np.any(loss >= 0.0) diff --git a/tests/ut/python/defenses/test_ead.py b/tests/ut/python/defenses/test_ead.py new file mode 100644 index 0000000..3001e24 --- /dev/null +++ b/tests/ut/python/defenses/test_ead.py @@ -0,0 +1,70 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +ensemble adversarial defense test. +""" +import numpy as np +import pytest +import logging + +from mindspore import nn +from mindspore import context +from mindspore.nn.optim.momentum import Momentum + +from mindarmour.attacks.gradient_method import FastGradientSignMethod +from mindarmour.attacks.iterative_gradient_method import \ + ProjectedGradientDescent +from mindarmour.defenses.adversarial_defense import EnsembleAdversarialDefense +from mindarmour.utils.logger import LogUtil + +from mock_net import Net + +LOGGER = LogUtil.get_instance() +TAG = 'Ead_Test' + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_ead(): + """UT for ensemble adversarial defense.""" + num_classes = 10 + batch_size = 16 + + sparse = False + context.set_context(mode=context.GRAPH_MODE) + context.set_context(device_target='Ascend') + + # create test data + inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) + labels = np.random.randint(num_classes, size=batch_size).astype(np.int32) + if not sparse: + labels = np.eye(num_classes)[labels].astype(np.float32) + + net = Net() + loss_fn = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=sparse) + optimizer = Momentum(net.trainable_params(), 0.001, 0.9) + + net = Net() + fgsm = FastGradientSignMethod(net) + pgd = ProjectedGradientDescent(net) + ead = EnsembleAdversarialDefense(net, [fgsm, pgd], loss_fn=loss_fn, + optimizer=optimizer) + LOGGER.set_level(logging.DEBUG) + LOGGER.debug(TAG, '---start ensemble adversarial defense--') + loss = ead.defense(inputs, labels) + LOGGER.debug(TAG, '---end ensemble adversarial defense--') + assert np.any(loss >= 0.0) diff --git a/tests/ut/python/defenses/test_nad.py b/tests/ut/python/defenses/test_nad.py new file mode 100644 index 0000000..a5e06a8 --- /dev/null +++ b/tests/ut/python/defenses/test_nad.py @@ -0,0 +1,65 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Natural adversarial defense test. +""" +import numpy as np +import pytest +import logging + +from mindspore import nn +from mindspore import context +from mindspore.nn.optim.momentum import Momentum + +from mindarmour.defenses.natural_adversarial_defense import \ + NaturalAdversarialDefense +from mindarmour.utils.logger import LogUtil + +from mock_net import Net + +LOGGER = LogUtil.get_instance() +TAG = 'Nad_Test' + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_nad(): + """UT for natural adversarial defense.""" + num_classes = 10 + batch_size = 16 + + sparse = False + context.set_context(mode=context.GRAPH_MODE) + context.set_context(device_target='Ascend') + + # create test data + inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) + labels = np.random.randint(num_classes, size=batch_size).astype(np.int32) + if not sparse: + labels = np.eye(num_classes)[labels].astype(np.float32) + + net = Net() + loss_fn = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=sparse) + optimizer = Momentum(net.trainable_params(), 0.001, 0.9) + + # defense + nad = NaturalAdversarialDefense(net, loss_fn=loss_fn, optimizer=optimizer) + LOGGER.set_level(logging.DEBUG) + LOGGER.debug(TAG, '---start natural adversarial defense--') + loss = nad.defense(inputs, labels) + LOGGER.debug(TAG, '---end natural adversarial defense--') + assert np.any(loss >= 0.0) diff --git a/tests/ut/python/defenses/test_pad.py b/tests/ut/python/defenses/test_pad.py new file mode 100644 index 0000000..f4ee0ad --- /dev/null +++ b/tests/ut/python/defenses/test_pad.py @@ -0,0 +1,66 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Projected adversarial defense test. +""" +import numpy as np +import pytest +import logging + +from mindspore import nn +from mindspore import context +from mindspore.nn.optim.momentum import Momentum + +from mindarmour.defenses.projected_adversarial_defense import \ + ProjectedAdversarialDefense +from mindarmour.utils.logger import LogUtil + +from mock_net import Net + +LOGGER = LogUtil.get_instance() +TAG = 'Pad_Test' + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_pad(): + """UT for projected adversarial defense.""" + num_classes = 10 + batch_size = 16 + + sparse = False + context.set_context(mode=context.GRAPH_MODE) + context.set_context(device_target='Ascend') + + # create test data + inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) + labels = np.random.randint(num_classes, size=batch_size).astype(np.int32) + if not sparse: + labels = np.eye(num_classes)[labels].astype(np.float32) + + # construct network + net = Net() + loss_fn = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=sparse) + optimizer = Momentum(net.trainable_params(), 0.001, 0.9) + + # defense + pad = ProjectedAdversarialDefense(net, loss_fn=loss_fn, optimizer=optimizer) + LOGGER.set_level(logging.DEBUG) + LOGGER.debug(TAG, '---start projected adversarial defense--') + loss = pad.defense(inputs, labels) + LOGGER.debug(TAG, '---end projected adversarial defense--') + assert np.any(loss >= 0.0) diff --git a/tests/ut/python/detectors/black/test_similarity_detector.py b/tests/ut/python/detectors/black/test_similarity_detector.py new file mode 100644 index 0000000..255a58b --- /dev/null +++ b/tests/ut/python/detectors/black/test_similarity_detector.py @@ -0,0 +1,101 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Similarity-detector test. +""" +import numpy as np +import pytest + +from mindspore.nn import Cell +from mindspore import Model +from mindspore import context +from mindspore.ops.operations import TensorAdd + +from mindarmour.detectors.black.similarity_detector import SimilarityDetector + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + +class EncoderNet(Cell): + """ + Similarity encoder for input data + """ + + def __init__(self, encode_dim): + super(EncoderNet, self).__init__() + self._encode_dim = encode_dim + self.add = TensorAdd() + + def construct(self, inputs): + """ + construct the neural network + Args: + inputs (Tensor): input data to neural network. + Returns: + Tensor, output of neural network. + """ + return self.add(inputs, inputs) + + def get_encode_dim(self): + """ + Get the dimension of encoded inputs + + Returns: + int, dimension of encoded inputs. + """ + return self._encode_dim + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_similarity_detector(): + """ + Similarity detector unit test + """ + # Prepare dataset + np.random.seed(5) + x_train = np.random.rand(1000, 32, 32, 3).astype(np.float32) + perm = np.random.permutation(x_train.shape[0]) + + # Generate query sequences + benign_queries = x_train[perm[:1000], :, :, :] + suspicious_queries = x_train[perm[-1], :, :, :] + np.random.normal( + 0, 0.05, (1000,) + x_train.shape[1:]) + suspicious_queries = suspicious_queries.astype(np.float32) + + # explicit threshold not provided, calculate threshold for K + encoder = Model(EncoderNet(encode_dim=256)) + detector = SimilarityDetector(max_k_neighbor=50, trans_model=encoder) + num_nearest_neighbors, thresholds = detector.fit(inputs=x_train) + detector.set_threshold(num_nearest_neighbors[-1], thresholds[-1]) + + detector.detect(benign_queries) + detections = detector.get_detection_interval() + # compare + expected_value = 0 + assert len(detections) == expected_value + + detector.clear_buffer() + detector.detect(suspicious_queries) + + # compare + expected_value = [1051, 1102, 1153, 1204, 1255, + 1306, 1357, 1408, 1459, 1510, + 1561, 1612, 1663, 1714, 1765, + 1816, 1867, 1918, 1969] + assert np.all(detector.get_detected_queries() == expected_value) + diff --git a/tests/ut/python/detectors/test_ensemble_detector.py b/tests/ut/python/detectors/test_ensemble_detector.py new file mode 100644 index 0000000..28d306c --- /dev/null +++ b/tests/ut/python/detectors/test_ensemble_detector.py @@ -0,0 +1,112 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +EnsembleDetector Test +""" +import numpy as np +import pytest + +from mindspore.nn import Cell +from mindspore.ops.operations import TensorAdd +from mindspore.train.model import Model +from mindspore import context + +from mindarmour.detectors.mag_net import ErrorBasedDetector +from mindarmour.detectors.region_based_detector import RegionBasedDetector +from mindarmour.detectors.ensemble_detector import EnsembleDetector + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + +class Net(Cell): + """ + Construct the network of target model. + """ + def __init__(self): + super(Net, self).__init__() + self.add = TensorAdd() + + def construct(self, inputs): + """ + Construct network. + + Args: + inputs (Tensor): Input data. + """ + return self.add(inputs, inputs) + + +class AutoNet(Cell): + """ + Construct the network of target model. + """ + def __init__(self): + super(AutoNet, self).__init__() + self.add = TensorAdd() + + def construct(self, inputs): + """ + Construct network. + + Args: + inputs (Tensor): Input data. + """ + return self.add(inputs, inputs) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_ensemble_detector(): + """ + Compute mindspore result. + """ + np.random.seed(6) + adv = np.random.rand(4, 4).astype(np.float32) + model = Model(Net()) + auto_encoder = Model(AutoNet()) + random_label = np.random.randint(10, size=4) + labels = np.eye(10)[random_label] + magnet_detector = ErrorBasedDetector(auto_encoder) + region_detector = RegionBasedDetector(model) + # use this to enable radius in region_detector + region_detector.fit(adv, labels) + detectors = [magnet_detector, region_detector] + detector = EnsembleDetector(detectors) + detected_res = detector.detect(adv) + expected_value = np.array([0, 1, 0, 0]) + assert np.all(detected_res == expected_value) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_error(): + np.random.seed(6) + adv = np.random.rand(4, 4).astype(np.float32) + model = Model(Net()) + auto_encoder = Model(AutoNet()) + random_label = np.random.randint(10, size=4) + labels = np.eye(10)[random_label] + magnet_detector = ErrorBasedDetector(auto_encoder) + region_detector = RegionBasedDetector(model) + # use this to enable radius in region_detector + detectors = [magnet_detector, region_detector] + detector = EnsembleDetector(detectors) + with pytest.raises(NotImplementedError): + assert detector.fit(adv, labels) diff --git a/tests/ut/python/detectors/test_mag_net.py b/tests/ut/python/detectors/test_mag_net.py new file mode 100644 index 0000000..13916ca --- /dev/null +++ b/tests/ut/python/detectors/test_mag_net.py @@ -0,0 +1,164 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Mag-net detector test. +""" +import numpy as np +import pytest + +import mindspore.ops.operations as P +from mindspore.nn import Cell +from mindspore.ops.operations import TensorAdd +from mindspore import Model +from mindspore import context + +from mindarmour.detectors.mag_net import ErrorBasedDetector +from mindarmour.detectors.mag_net import DivergenceBasedDetector + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + +class Net(Cell): + """ + Construct the network of target model. + """ + + def __init__(self): + super(Net, self).__init__() + self.add = TensorAdd() + + def construct(self, inputs): + """ + Construct network. + + Args: + inputs (Tensor): Input data. + """ + return self.add(inputs, inputs) + + +class PredNet(Cell): + """ + Construct the network of target model. + """ + + def __init__(self): + super(PredNet, self).__init__() + self.shape = P.Shape() + self.reshape = P.Reshape() + self._softmax = P.Softmax() + + def construct(self, inputs): + """ + Construct network. + + Args: + inputs (Tensor): Input data. + """ + data = self.reshape(inputs, (self.shape(inputs)[0], -1)) + return self._softmax(data) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_mag_net(): + """ + Compute mindspore result. + """ + np.random.seed(5) + ori = np.random.rand(4, 4, 4).astype(np.float32) + np.random.seed(6) + adv = np.random.rand(4, 4, 4).astype(np.float32) + model = Model(Net()) + detector = ErrorBasedDetector(model) + detector.fit(ori) + detected_res = detector.detect(adv) + expected_value = np.array([1, 1, 1, 1]) + assert np.all(detected_res == expected_value) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_mag_net_transform(): + """ + Compute mindspore result. + """ + np.random.seed(6) + adv = np.random.rand(4, 4, 4).astype(np.float32) + model = Model(Net()) + detector = ErrorBasedDetector(model) + adv_trans = detector.transform(adv) + assert np.any(adv_trans != adv) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_mag_net_divergence(): + """ + Compute mindspore result. + """ + np.random.seed(5) + ori = np.random.rand(4, 4, 4).astype(np.float32) + np.random.seed(6) + adv = np.random.rand(4, 4, 4).astype(np.float32) + encoder = Model(Net()) + model = Model(PredNet()) + detector = DivergenceBasedDetector(encoder, model) + threshold = detector.fit(ori) + detector.set_threshold(threshold) + detected_res = detector.detect(adv) + expected_value = np.array([1, 0, 1, 1]) + assert np.all(detected_res == expected_value) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_mag_net_divergence_transform(): + """ + Compute mindspore result. + """ + np.random.seed(6) + adv = np.random.rand(4, 4, 4).astype(np.float32) + encoder = Model(Net()) + model = Model(PredNet()) + detector = DivergenceBasedDetector(encoder, model) + adv_trans = detector.transform(adv) + assert np.any(adv_trans != adv) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_value_error(): + np.random.seed(6) + adv = np.random.rand(4, 4, 4).astype(np.float32) + encoder = Model(Net()) + model = Model(PredNet()) + detector = DivergenceBasedDetector(encoder, model, option='bad_op') + with pytest.raises(NotImplementedError): + assert detector.detect_diff(adv) diff --git a/tests/ut/python/detectors/test_region_based_detector.py b/tests/ut/python/detectors/test_region_based_detector.py new file mode 100644 index 0000000..c958749 --- /dev/null +++ b/tests/ut/python/detectors/test_region_based_detector.py @@ -0,0 +1,115 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Region-based detector test. +""" +import numpy as np +import pytest + +from mindspore.nn import Cell +from mindspore import Model +from mindspore import context +from mindspore.ops.operations import TensorAdd + +from mindarmour.detectors.region_based_detector import \ + RegionBasedDetector + + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + +class Net(Cell): + """ + Construct the network of target model. + """ + def __init__(self): + super(Net, self).__init__() + self.add = TensorAdd() + + def construct(self, inputs): + """ + Construct network. + + Args: + inputs (Tensor): Input data. + """ + return self.add(inputs, inputs) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_region_based_classification(): + """ + Compute mindspore result. + """ + np.random.seed(5) + ori = np.random.rand(4, 4).astype(np.float32) + labels = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0], + [0, 1, 0, 0]]).astype(np.int32) + np.random.seed(6) + adv = np.random.rand(4, 4).astype(np.float32) + model = Model(Net()) + detector = RegionBasedDetector(model) + radius = detector.fit(ori, labels) + detector.set_radius(radius) + detected_res = detector.detect(adv) + expected_value = np.array([0, 0, 1, 0]) + assert np.all(detected_res == expected_value) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_value_error(): + np.random.seed(5) + ori = np.random.rand(4, 4).astype(np.float32) + labels = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0], + [0, 1, 0, 0]]).astype(np.int32) + np.random.seed(6) + adv = np.random.rand(4, 4).astype(np.float32) + model = Model(Net()) + # model should be mindspore model + with pytest.raises(ValueError): + assert RegionBasedDetector(Net()) + + with pytest.raises(ValueError): + assert RegionBasedDetector(model, number_points=-1) + + with pytest.raises(ValueError): + assert RegionBasedDetector(model, initial_radius=-1) + + with pytest.raises(ValueError): + assert RegionBasedDetector(model, max_radius=-2.2) + + with pytest.raises(ValueError): + assert RegionBasedDetector(model, search_step=0) + + with pytest.raises(ValueError): + assert RegionBasedDetector(model, sparse='False') + + detector = RegionBasedDetector(model) + with pytest.raises(ValueError): + # radius must not empty + assert detector.detect(adv) + + radius = detector.fit(ori, labels) + detector.set_radius(radius) + with pytest.raises(ValueError): + # adv type should be in (list, tuple, numpy.ndarray) + assert detector.detect(adv.tostring()) diff --git a/tests/ut/python/detectors/test_spatial_smoothing.py b/tests/ut/python/detectors/test_spatial_smoothing.py new file mode 100644 index 0000000..fe7669c --- /dev/null +++ b/tests/ut/python/detectors/test_spatial_smoothing.py @@ -0,0 +1,116 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Spatial-smoothing detector test. +""" +import numpy as np +import pytest + +import mindspore.ops.operations as M +from mindspore import Model +from mindspore.nn import Cell +from mindspore import context + +from mindarmour.detectors.spatial_smoothing import SpatialSmoothing + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + +# for use +class Net(Cell): + """ + Construct the network of target model. + """ + def __init__(self): + super(Net, self).__init__() + self._softmax = M.Softmax() + + def construct(self, inputs): + """ + Construct network. + + Args: + inputs (Tensor): Input data. + """ + return self._softmax(inputs) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_spatial_smoothing(): + """ + Compute mindspore result. + """ + input_shape = (50, 3) + + np.random.seed(1) + input_np = np.random.randn(*input_shape).astype(np.float32) + + np.random.seed(2) + adv_np = np.random.randn(*input_shape).astype(np.float32) + + # mock user model + model = Model(Net()) + detector = SpatialSmoothing(model) + # Training + threshold = detector.fit(input_np) + detector.set_threshold(threshold.item()) + detected_res = np.array(detector.detect(adv_np)) + idx = np.where(detected_res > 0) + expected_value = np.array([10, 39, 48]) + assert np.all(idx == expected_value) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_spatial_smoothing_diff(): + """ + Compute mindspore result. + """ + input_shape = (50, 3) + np.random.seed(1) + input_np = np.random.randn(*input_shape).astype(np.float32) + + np.random.seed(2) + adv_np = np.random.randn(*input_shape).astype(np.float32) + + # mock user model + model = Model(Net()) + detector = SpatialSmoothing(model) + # Training + detector.fit(input_np) + diffs = detector.detect_diff(adv_np) + expected_value = np.array([0.20959496, 0.69537306, 0.13034256, 0.7421039, + 0.41419053, 0.56346416, 0.4277994, 0.3240941, + 0.048190027, 0.6806958, 1.1405756, 0.587804, + 0.40533313, 0.2875523, 0.36801508, 0.61993587, + 0.49286827, 0.13222921, 0.68012404, 0.4164942, + 0.25758877, 0.6008735, 0.60623455, 0.34981924, + 0.3945489, 0.879787, 0.3934811, 0.23387678, + 0.63480926, 0.56435543, 0.16067612, 0.57489645, + 0.21772699, 0.55924356, 0.5186635, 0.7094835, + 0.0613693, 0.13305652, 0.11505881, 1.2404268, + 0.50948, 0.15797901, 0.44473758, 0.2495422, + 0.38254014, 0.543059, 0.06452079, 0.36902517, + 1.1845329, 0.3870097]) + assert np.allclose(diffs, expected_value, 0.0001, 0.0001) + + + diff --git a/tests/ut/python/evaluations/black/test_black_defense_eval.py b/tests/ut/python/evaluations/black/test_black_defense_eval.py new file mode 100644 index 0000000..4cbd586 --- /dev/null +++ b/tests/ut/python/evaluations/black/test_black_defense_eval.py @@ -0,0 +1,73 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Black-box defense evaluation test. +""" +import numpy as np +import pytest + +from mindarmour.evaluations.black.defense_evaluation import BlackDefenseEvaluate + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_def_eval(): + """ + Tests for black-box defense evaluation + """ + # prepare data + raw_preds = np.array([[0.1, 0.1, 0.2, 0.6], [0.1, 0.7, 0.0, 0.2], + [0.8, 0.1, 0.0, 0.1], [0.1, 0.1, 0.2, 0.6], + [0.1, 0.7, 0.0, 0.2], [0.8, 0.1, 0.0, 0.1], + [0.1, 0.1, 0.2, 0.6], [0.1, 0.7, 0.0, 0.2], + [0.8, 0.1, 0.0, 0.1], [0.1, 0.1, 0.2, 0.6]]) + + def_preds = np.array([[0.1, 0.1, 0.2, 0.6], [0.1, 0.7, 0.0, 0.2], + [0.8, 0.1, 0.0, 0.1], [0.1, 0.1, 0.2, 0.6], + [0.1, 0.7, 0.0, 0.2], [0.8, 0.1, 0.0, 0.1], + [0.1, 0.1, 0.2, 0.6], [0.1, 0.7, 0.0, 0.2], + [0.8, 0.1, 0.0, 0.1], [0.1, 0.1, 0.2, 0.6]]) + raw_query_counts = np.array([0, 0, 0, 0, 0, 10, 10, 20, 20, 30]) + def_query_counts = np.array([0, 0, 0, 0, 0, 30, 30, 40, 40, 50]) + + raw_query_time = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 2, 2, 4, 4, 6]) + def_query_time = np.array([0.3, 0.3, 0.3, 0.3, 0.3, 4, 4, 8, 8, 12]) + + def_detection_counts = np.array([1, 0, 0, 0, 1, 5, 5, 5, 10, 20]) + + true_labels = np.array([3, 1, 0, 3, 1, 0, 3, 1, 0, 3]) + + # create obj + def_eval = BlackDefenseEvaluate(raw_preds, + def_preds, + raw_query_counts, + def_query_counts, + raw_query_time, + def_query_time, + def_detection_counts, + true_labels, + max_queries=100) + # run eval + qcv = def_eval.qcv() + asv = def_eval.asv() + fpr = def_eval.fpr() + qrv = def_eval.qrv() + res = [qcv, asv, fpr, qrv] + + # compare + expected_value = [0.2, 0.0, 0.4, 2.0] + assert np.allclose(res, expected_value, 0.0001, 0.0001) diff --git a/tests/ut/python/evaluations/test_attack_eval.py b/tests/ut/python/evaluations/test_attack_eval.py new file mode 100644 index 0000000..daee550 --- /dev/null +++ b/tests/ut/python/evaluations/test_attack_eval.py @@ -0,0 +1,95 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Attack evaluation test. +""" +import numpy as np +import pytest + +from mindarmour.evaluations.attack_evaluation import AttackEvaluate + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_attack_eval(): + # prepare test data + np.random.seed(1024) + inputs = np.random.normal(size=(3, 512, 512, 3)) + labels = np.array([[0.1, 0.1, 0.2, 0.6], + [0.1, 0.7, 0.0, 0.2], + [0.8, 0.1, 0.0, 0.1]]) + adv_x = inputs + np.ones((3, 512, 512, 3))*0.001 + adv_y = np.array([[0.1, 0.1, 0.2, 0.6], + [0.1, 0.0, 0.8, 0.1], + [0.0, 0.9, 0.1, 0.0]]) + + # create obj + attack_eval = AttackEvaluate(inputs, labels, adv_x, adv_y) + + # run eval + mr = attack_eval.mis_classification_rate() + acac = attack_eval.avg_conf_adv_class() + l_0, l_2, l_inf = attack_eval.avg_lp_distance() + ass = attack_eval.avg_ssim() + nte = attack_eval.nte() + res = [mr, acac, l_0, l_2, l_inf, ass, nte] + + # compare + expected_value = [0.6666, 0.8500, 1.0, 0.0009, 0.0001, 0.9999, 0.75] + assert np.allclose(res, expected_value, 0.0001, 0.0001) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_value_error(): + # prepare test data + np.random.seed(1024) + inputs = np.random.normal(size=(3, 512, 512, 3)) + labels = np.array([[0.1, 0.1, 0.2, 0.6], + [0.1, 0.7, 0.0, 0.2], + [0.8, 0.1, 0.0, 0.1]]) + adv_x = inputs + np.ones((3, 512, 512, 3))*0.001 + adv_y = np.array([[0.1, 0.1, 0.2, 0.6], + [0.1, 0.0, 0.8, 0.1], + [0.0, 0.9, 0.1, 0.0]]) + + # create obj + with pytest.raises(ValueError) as e: + assert AttackEvaluate(inputs, labels, adv_x, adv_y, targeted=True) + assert str(e.value) == 'targeted attack need target_label, but got None.' + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_value_error(): + # prepare test data + np.random.seed(1024) + inputs = np.array([]) + labels = np.array([]) + adv_x = inputs + adv_y = np.array([]) + + # create obj + with pytest.raises(ValueError) as e: + assert AttackEvaluate(inputs, labels, adv_x, adv_y) + assert str(e.value) == 'inputs must not be empty' diff --git a/tests/ut/python/evaluations/test_defense_eval.py b/tests/ut/python/evaluations/test_defense_eval.py new file mode 100644 index 0000000..97fee9b --- /dev/null +++ b/tests/ut/python/evaluations/test_defense_eval.py @@ -0,0 +1,51 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Defense evaluation test. +""" +import numpy as np +import pytest + +from mindarmour.evaluations.defense_evaluation import DefenseEvaluate + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_def_eval(): + # prepare data + raw_preds = np.array([[0.1, 0.1, 0.2, 0.6], + [0.1, 0.7, 0.0, 0.2], + [0.8, 0.1, 0.0, 0.1]]) + def_preds = np.array([[0.1, 0.1, 0.1, 0.7], + [0.1, 0.6, 0.2, 0.1], + [0.1, 0.2, 0.1, 0.6]]) + true_labels = np.array([3, 1, 0]) + + # create obj + def_eval = DefenseEvaluate(raw_preds, def_preds, true_labels) + + # run eval + cav = def_eval.cav() + crr = def_eval.crr() + csr = def_eval.csr() + ccv = def_eval.ccv() + cos = def_eval.cos() + res = [cav, crr, csr, ccv, cos] + + # compare + expected_value = [-0.3333, 0.0, 0.3333, 0.0999, 0.0450] + assert np.allclose(res, expected_value, 0.0001, 0.0001) diff --git a/tests/ut/python/evaluations/test_radar_metric.py b/tests/ut/python/evaluations/test_radar_metric.py new file mode 100644 index 0000000..f93ef57 --- /dev/null +++ b/tests/ut/python/evaluations/test_radar_metric.py @@ -0,0 +1,57 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Radar map test. +""" +import pytest +from mindarmour.evaluations.visual_metrics import RadarMetric +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_radar_metric(): + # prepare data + metrics_name = ['MR', 'ACAC', 'ASS', 'NTE', 'RGB'] + def_metrics = [0.9, 0.85, 0.6, 0.7, 0.8] + raw_metrics = [0.5, 0.3, 0.55, 0.65, 0.7] + metrics_data = [def_metrics, raw_metrics] + metrics_labels = ['before', 'after'] + + # create obj + rm = RadarMetric(metrics_name, metrics_data, metrics_labels, title='', + scale='sparse') + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_card +@pytest.mark.component_mindarmour +def test_value_error(): + # prepare data + metrics_name = ['MR', 'ACAC', 'ASS', 'NTE', 'RGB'] + def_metrics = [0.9, 0.85, 0.6, 0.7, 0.8] + raw_metrics = [0.5, 0.3, 0.55, 0.65, 0.7] + metrics_data = [def_metrics, raw_metrics] + metrics_labels = ['before', 'after'] + + with pytest.raises(ValueError): + assert RadarMetric(metrics_name, metrics_data, metrics_labels, + title='', scale='bad_s') + + with pytest.raises(ValueError): + assert RadarMetric(['MR', 'ACAC', 'ASS'], metrics_data, metrics_labels, + title='', scale='bad_s') + diff --git a/tests/ut/python/test_data/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt b/tests/ut/python/test_data/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt new file mode 100644 index 0000000000000000000000000000000000000000..5264090b30ef689038217edf2eab704f30387273 GIT binary patch literal 494163 zcmY(KX;hA1*!DHg(x6dA$&iXd>fYzROJpchN$9W4LWU$nhBOP!R7#~((oCrPI`?g; zq>@5tAf(Y)gowPY^{)4Q*7N=Pbgg6W{X382IA#Be%gS%wyXT;p>7lJVJhuBNe-xLN zlopYekd{!ETCi*HMxUAHT212r&3w%AUSD9zV=QUm>R##-aDw{&6A|j}y3W+Lsn8QO zZIoyk^3`|EW62_g80K3-KYljiFI1D_d;T$FuZ$+)+Z(+!T2G#DywifOFL{JT{wu+i z)AQ(C#RfXXtDZ`~wV`{(C-LtOALQGa8u4Y7XV8CZH}fY%ckstvA7dT3Ld5!w?G?VH zX9U0EuC(=XI}__~veT^(D*xhJUex6)9e&7Plc8We(NEUeRHcD0{HrarnHWo-r5xc8 zs0Q)}92NMEQ%u>6Z+8)`lR2l)!LU%Po7(XB>8kfvIHL*M z=+eYsYL(akdR>k*PcxYaW|-0G?ek15~KLzmxYw}9V$avlH4QZK%qlr4Y1 zVjHbV(B*sG(C1J4XTzT{U79~DH;OJa(E$ZJCHh+>8iI?=xCfJGVgYY0eUSH*-WyJ$ zgSywLNWw{)9CelE7)MesSpglgKR_S7OQ%_Te?qCjck)Qm4gSsK(^sw@)WYA8-QzD~ zd9vP&cZ!Ea;6+ZviwRvT-ZQnFrq2}v$sZ&@Yrm+}lo`cP*-VH3t3UJqr@uG{=`kY;{9pR>ci1TX5(*=VxIG!^xZOyd z<|mm__ovxlEBg;Gh@|4c^0zeP!x7+j+oR*k=j_UmK8?9Kp1*C1H7ws6$TsSi;^v4( z@Lu_zUYp#Er;S}$h_5Y{=+306sW07ldQmav7K@m3h0cBYnriO+Ldyeg zG70J@-0-KD8cZ?gKlL6>hc+G*Dl%_j2Riee4)XcELpRZ=&_UQQbDmvTGoL?i-38iH zY03Qd1~L=Z8i-39&zD~4$y~EmpxXf-z@OW>Uxh>PB2f$qooadOCR?+FPrLEorD!hy z?31!a9Tjxh{SABS#c5Hc8s(NR#$9UfiF}qisr>mEZ}yqd<3a9Rg0B+wIy;#j4SWHT zE{$-wI35EYiL+bJw!tF(lW>~1f<4tyrV;xB=|289NVntI$EjJUI7gn|Fe;=XGP77N z{|B?(IGU<&tEZ#pFCv|CXW4($k1gL2OpbRQrOWgTATE}$sV}r48v>}ALMH6obpfj# z3$Z{ggPRbb40|7qXTit1NXfKhTqZpV+Ikltm`SnF&*C^iJA$0Q`V_qqMcFr92^P4& zi6_aA#`e4~m@=&xH(fAfVLJ5?DQd@dI_faqm;cy{(e20z^{I}x zE@yf&Z{Jv5X691E<=hCz`2~Dt!%ybItTNF3QvA;)2JQ?S*@WUp=kT*^$ur}u+UG&LRfDaT_$wJ$6< z_MU949ulmW)rzv!AIPrYXLM(LAAPsenJ#q@XCFQ0fbZpKD#ZoRd$X4_-(x3PtoXoY^G?Kk^rG|2_Q)Ha9O&OIaO(S!6G6v0qTOq_+@mNUPI1D`&AK?(^8Sr}
m?oZ*aq%1c8`W0;a50Vw)Ft2^Rdk3u~ha%A$8ZuJ0>~Dq|6Pb-9V%7q5`C5qkzApsbN(9f`KC;K z$9dH0bEU`cln8vPCqd;(Q*2l}9W_@tz-ze}l;CB-D7nkLpiB)}PUD6L&#Ps{D^Lz+?U1Q5>eQ)Famc0XE z3d3B)r5Bc^N0aH7jxF>_?6)bRrz+#ZGhMpY4hv@Uzo*^7GgE+%Z>h6Zxu!M!*FGTnP1 zc=2meTwqJzwDDo|hXcg-*&jiZWIco%8^YV5T5@>w5GSbO@(V5~C=($gtL@fV2xYo!q|8cz-H2OLw zOb}ys4P$X>{ah&5@8;%Q9?Mhsl@;BlTB77ft_vZDO2T}u3|IG$yDmf^S2$Js$!O_pvGMxR>WWJ;4h zv$7+)Lf+ehtmV57_sUTW)>sc?wE7%6BdlPKgV}0$WT(g$T@Hh-*8&8uqVB<(#bfE# zhoO)ZE{*C%x4E2*1@KF)3!a%Y!6!Ebv|E5&*UW7Ep;bc;clYDKrCBh2(*uN#bR6BK z1Scz-Q9tMwT*{h4Ht+ES)74K=ucn(b_}~t~)xl^~XGD~LuEI3eX=s`)MF+D3>Fw>N zbdyOiJusIrZ*b!lWQd{nRTtPePJ@^=+2J#-nasNNG!y8?VQ#}xT>D@*hN_H$YVYyD zAN(fxJ#f41gn2UbeaRBMb4umr=FY`$$M^9{N6u~R4O9B==PWIsRliEmn~Qc{Nco=1D$^>Pq5i(=`0=lM~i7yO7dxO6b*UhJ&;A!*s1nyeTo= zoQt^-`p%7|QNt5x^_M0XJ#>j&I++BU4=SV7)LxhoaTg-Ki_j~NL(pe;8$Q)ujaz?f zf(R>w(WRE~rcjxTI#vRF7Y|5%dJYsTPC@u(FK*WXKZqNrOb(68Mak7R;OzB<6zdwp z{x81ZW)Ox;o9=N6TT|dZ?+tfVIu88~DRJ$KUc%K{QQUFi0+t2NWgnK=;HrgzD5q6{ znJ0KwkN@6Bc%_5}dG7eExf5-7Z^g#TCCJ-ck7kYbY)iH$6MJ%&tC82HYh_Ytyht0} zI!lS~WLig${mZ5O7O8Yc*%Nxdq>)yeT%iVwx3jLm0_HTSm_0n7&#d}CvsE4CEauEB zbXAUHr>!%YLDL!5(817tcM%-5zJn>T3n8gs6g%1H3$qHsZjnd2OXMJ+3AHpIboxoRSypJkE3zbmx-vYH}801 z4JvLr&%5ll63UPE;C`Jz{F}KQ*XN3`M8!Yciw%h|Xjp^Uj-sIb=mXU4@!@&;D$>DD zMXFVDo@XpRnU2{cy3C6)WgaQJEN-Jg>{7HW}K%cjv^%T!sMUp447 zRPja!od>zjSe}c68@DF%B3V)VSe!dWWQgjZ)(vnPqqiHqk9;Y|7aZ20pb+O$rZe{KF) zp;g)q<`bsK@B221ANgrC|I^BIG-{nHzbb;q*R}gf3r=^luhYxGOy)A(#H)hAU}@pQ z=aYn@rO{abPMOHr_ThH)LLDZBT^{D3H2XLVxNYDZ_O-!&8GlTbmlpUQAK)fD3`QwG zO*pi8A%@=k%Kc2*3h{CR2>g=-S-c?B9lhqP5b#yG+K)8M+c8|U|5 zVz=*IWS?#aqALc0=JzS=hV&_R^wtABQy)shElrqlTLA`XeF96vqtyOz75%cz7@~7h zV0lVEmHRJ)4p%nf%TZ3$?cD?_@?R=QSbRdWpd0Lmm=#Wv3xFrG#~|07pz}3#b}v4O zYn=9`w87YqG+SW>f)9b|Tbn!*W{Ojm`%9VH2D+(XXe2b*@GdxfJK`!S# zCu`qDf?k{oj0Dc+{E?$b=vi`D>AVubHcOoEsSN4#0B&$dW4TPIN@T z&l-Kq$vlKh=XRm%2^CB?I)?jawUhImnxJN34zsFXlbj3pID=pUPBV_;o;&xTvU(n< zS1CfH+6}Vxkp<1FI*$`ij)GreDONsmTWG|Ls7*Or3!V@ol1={B^gUx**~OWCjG&$o?%Hy@^A+tgxw?I6i`e_z1qCUvHL zRGJA}N9KLQi!gV1BD?cu3BI@cgwC-|Bva}=x5A?Xf$jEVcj93h zw#*yk^{g4@G7Jx)SKn-i-tZLbHv6Gp#}8g7vBQ~D28mpFEPT)NB0rrka#dgZFs5V^ zy1v!`hZ8Gd_g4jc|A25t!AZn((LA*0f`~RJ&0fe}#cCp{Y|ngL%{lloX%#4Yi*Wnufn9-@H&1qkGDZYFE6cg9WFwXi4Jl~-Q)_&of+I~|O zuy`3;H%)=IF5E&z8gD_nuMaElQe;ak3}IH$37q`m1{YryfzQRp(z?V6uq;}R#(EZU zlPC4#{@hA#nx-ObZJvo?#%J)+ZUvk*DyHmm`a1Bgn+>%pJ5lXQIR1UU;3 z%Fp-<>rZp2uab?@ZxvBQ?gmsioxvulOQloF*5Jia(fCNU8NF3=aJgXzW|fR)jt}S1 zYcpN&r%Ey|`qj^E+7XM7wIwz3HEI-l6^D8hsdPmo*Ev;% zcWlxe-uj{YytU86xSllvGWUupy#8JXXB>`#>T_vaHvJBHF!?68Z|xs$qEsV>wT~iq z&UtZf+|1$4&KRpS5l{G%D#Jn}&SFpGH7wb4AMfU7L0+#Pm`~TjJx)K!=#*1%{ADH% zr>DS1_<@Zu zSlA{*7u+ntzT5gN^4iFmT0fqKiOPV9XgX9LY_j?_flH6RDQ&>)a-BIC-8i?^Xv_GY%WXtICo#jk^bP`*= z-iFE4){`AASxo9+I=-AmXmMaB-k$dqZ%r~|ldh-Z`HkfQF|L7I@wW^YUw;6Zdi9`g zbs5Gtk1)oDGvwov8nV-76>ROb!s2*cqAs_Ri_%rYCFbUssuzy#yB%y*XnGnn6-n^9eK=X=?g5;djaWAPa!|4-fFpS z8JXWOlg|4hM>qXs(4(48ho#oS8|+4t-U9AX=rmNhU@$WOjYGYej5odHCvJ#Z%z{oj zGfp!I_Sd%I-K9|=-{iBiHotw<`;Hs>MF~Yp_KbhD0*$Bl`Geki$Vr zB-Q3A_h5S<8)!U*hu4bGQ}I3Y?t-7ReytW?cbYwa$Mqfjz2$TG=ks)F#*W8yO>Q{N zcg+M_R?7-IHVId~EMN^*Q-v$mdI&pvmkTpIm4yeDcQdW*? z%GVoR({))#mlgIK2BZ6!8Qc;5-Vx92Aqw{2K(#L}cy~AiLua34sgo3#Z>c@Zbp*O| z`#d_|_$=54U81tK|7cX`S-P-OoxYqALf?#dk3s7`!R;mqwlU%{ehYmJCIykqF*k>m zp1#Uz4}`Ih*YT(uXTZ*$-A9(EHR4#qGu-s19UQa#g-v&-GN-Nt{4#bn%vTKpu#keB zRVm!l(LG@GHw)%?KL+EQA#{wWHXUpWhq#r-H047YTsgjeo*-I=t+Pm9f>Ce0%#9 zBBFTk{Ye4NHP|Rf`XbG8KSv2HNA9beJ8dDqZ4Fvaiej!?Bbd;k5!YN94-Y1tgAEl^ zA$D*n=-cVC8OE!@^RFYl(yq(@_HmdV9&1GZT~c9ez61<(=+pkFFnYayJ2hKe%Py!0 znB}=J_VW5hwpl_NL>>D{Fcy*MbGuQ*N0n|Z>xbTfr8GTq4$)k?6;rj`cvJhIW2BEE zD0^zrIL}}jGX6Qy{2+&JNnyNgHD>RBb-c3)~ETN}1Dzn2SNx1o?JX1>UBR``i zqiRS2rz&8f` zI^;#6+Cd=rHhBYkW#Gm>Y+B2ze#Em^BR>0#2S)6Sg96;Uro#H;lhNyZ1%#J(K=|fO zP&_FY#t=8GySEfBd)R#toi39qZo+Rz&6v%UWG1>che>Uh1?~Q3`g0~wVaYG{ z(l}Ts851u|R|^q7F7gpRT6_{V&&Xk;`Xz*Cf1F`fpAA^gffanYWlQ;|pSba_tUSXv zl-b5V!mp$a)R!KsSww%k2f~C;;!HBNjb||E#wGf{N3+Z!ygS7Xt~Q)Q&(41%Bw)#o z&-25ZHzcv(-xV_Y=yq37VbXSOP5KmrMIWH6do#VI=tz%DA4`4h-hubtHS~6x13kSg4~wTs64$9q zQP;hI+jX=9n{JlD@2t^OxqCe5NJxOszDnN7?CKG=a-cPOrgU5WCOR$zsH@pHobXDP zI#hc=pTG@{gx-hAo>c;G@d6Cox)_ezcA=bMK6y5=l1zL)z*Rg$Zfkog@0VpRZ(n;j zT(Og;cXZU~w5ZcK_WosD6`YIr?j_;Sz%16FT8CpNio!ix2~>Gz%jPSb$9pw-czoYc zOn5HjY1jP{1UM8!OBx5${o7FU?mU_<l8UD15YCxTO_@_s0)1|9Xxk97v^B?bqnv zHI#P9?58bOS!i?q258mxgK4}RwVaF4x}3mbw^qC>YepVEjDp09AY9@*72gI}VUOf) zSp4m0nVyI%{_Y;YT_s2GZS_s!_}78>ubd91=4BA+`5g{il!1_6T>=~Ld~|%_0q11b zl7!zm&|Z~=HIixAJE#nQR%z2-!9}WR@tW@YzJn&jpQqmQ#xVV>&M2q+4d;H%!(=-j zFfdVKc6lD`&MZsjRkoh(nJ2>>yw4B|>l`?I%AdDx+DA_LK@VJMenB24SMlClRwXru z%rNCjG9-PS1&bnAz++<awMa?@=+h?3iKOw9Ab4yUoaxE51N#s)*}r=_st*b*=qXok{=diok`#vEqjpGtiv$B*<^=b2$~)7CigTYsO$4nu(Dz<23P3g zO|MeecBBN>M}Os|e^daILMfWPRSUWU#hB~ob_{rIDX3XEgZ>Ea!Lmj%CcZ|Vmln1h z;`RsAI@45qaQYT5Fg2y~Hk}9e`E%ew?n0IvA- zyBS{}noGV;Ux>b$3&|1tIq*sE22|c{CQaH6_%V4k{vGkI@0=+kD`x&6)kG1i@=n7A z=@UfS?k=1qYWRC*8j=`mt7Ebea6DU{HeWgiFXj3`@&@7O$AWw{DtQV+4eFU_Hbvn?c!UIm%CSWU97v zaifsXS)Jzep-PhA@jo3ld!Hzc)Uu<-y9Y?YXhpVGQjP7p{0E}1T||$|Guhw@9kyhJ z=7<)OF=u`>u4_-kJ-b7}#rX(&nhhe~;w#?HvnK}?KXHq94PdNLgDu{96925cFIW=N z!Sz_lfnLcyT<|yrjQ)FPW#Q=r?y{i}P*{d@&uQYaj#2pcKRvwk>yymCQtq@ z(PA$9H=w`oe71CZ0-5(W2lzwPBy&v$m(_U!a~_(~rmiFCarGt~9=C=(o)QS-=2UWl z=cBQ>Ya*2kIScR3IHT8KB9`n5!_`u`Wggbo1@uP|Ht6c!%01qXr%ykEjHBwX*Zw&f ztLR;tyXiSeXe~xZtvzL5_b5Q}qHLf`KO*xIXPe{iqoSBM*ZsYMv|e_?hR*$TU1&aC zSv->#jy}&6oy(b3;2N4Y`vJAFXrvL>n#idao$TtU-zXm%N^eR$qhm}e>6%4K_^9Rs zbBprBiBAJ)t5hLXZha4}ht!14ff~ZP>wDP!_E*^5K7oy1b+2sFxfUE7^qI*1xrZ+m zmD$qIf8o`{)1>O!MR5530;l|&BdBnA&RHzZLCxY6oGzzF->g3m7sd2>B70+@&!B*; zK6C=A3^Z8aI$LJ$I)kNM4&V$M#MoSuKUl9bgGi_sk(m5F_-6Db+_hAm86Pi&Yab_2 z=hzl#6}bWP-@ z4o5+Q&Lp~J@@TM49uMNivaEY=C2?<%Vy!#HQEBD_LB4?+wgi3zJ6A0{Qu`h{ivEL3 zR=2TxcM}9Znuw=b#^3=XRoa>;!Bl^KMM=9bEH0|Ym8tGfta+Oor#J;;MONT3rC&JR zyBrj}2VjPj3~riv5Qhf+An&s$*V_<^er>^XcJ+iqjKv4yS7FVIo3t02r(MInol4BK zRg@_NOE87xFmkL;kxnYD2jPxxD+{Z6?6CI(jP4u8O6MVP{a8BUq5Vdoa1`&>vGW)j za~y*C@8HJv(d<%lF{a*}h;wGD!nh;hW$_scaO1|C__XOH91v+lncvau-uBnbd`2M) z+$%-vSKXwa#j^;BR}{7zmbBigiOOI~o{8d+Mh0<8R3F zn=YP(L*x2cd`3Dun2|^$Cq~gFKWDM&CZ9p`?I!5>a|_(+1fV!xhL|@q?t+3nPT*@( znPgSQZ+sTnoF?kcKNF+`hJ(j6AfOp_tr+*dQc7fGq#}B{cx`Hdm1@re;4Xv zE{*V$9h_RK&BcwmgldOWXzvaQ_<4B_jk{z)AJK-=A;B~fzGMqlWetpQ?L>Nf={tye zr$!_venXv&n)JAs5uKKJ3;wog!1b30z~I*b-psH&FvIK}G)km`Y%&EuzXas39s)zp z(NOL93NkCQIGa{Ic6WjmIE03g82=)H`ojhiy7dAIPvnkxydrGnT7QyxVJB|da1Yj- zs*cPwK&Ea#170s}QGcl+sI9AmuCjArw>!YeOSzY2o2ZF*CQZdTR3rKT@Y^K7ZrFor=}SUhiVVIuAWolcQ-V2$sTk&D!?|{3 z^K1u4vA8!pT&A5#I^RsChi@rzEOH)OBwUSgu17FJ^8n=yjibG1q+pgu7&gC}4o-Qj z%znQou;`^|>edX8YSUojo^@Q(CmR?Xr9g(BG-JcM^IUhKIeZDVCqq|U=@?xFXy%F2 zKTC^=mfB=6wzHwD=SRZ<=@dw@-v#=+ufdFjqqtz7Cvn-I08-z=v8}fRiw)e_p4ku4 z&WPd$m8qpv9N22EvBAM2Elr_JN9-kcSm*(9~LeB#Bvwi&z5omnh=64OZ}~{w9u%EXBMf ziCpm_6K?6px!_qc8GmiIfbFKuSdhIPt_PkTxgmDK6?PUox5jX9OyW^WK?^!mN7AaV z7X{TWhv_>5O_Gw_3)?10(xh=CWc*|fmPaeXxowmt$cl44$M-R-(UxqniV0mjyo$Qj z%Yv?t3;GT#(ar}q;MW5K<|P`8SfI%aX8y*9DW}-_g?BI^b_rGaT*JH#MmgC$vTohvOR{ZH5J$s)&afm+yl5+WoMl`~%)8(MQ*>tB}7jj!V#9 zh$iJV+{V#g$?8EH5PPA>_SagV-aoM_AIXG8Xj@y&VZxE3XrX^!-`37$py!0 zg02J~sGO{UkKGZe=tMXheE^T1?n0($P39SJkALC*UjBnGElt@dMtD^RyI_a=5!6^a3rn{JVB|k@R+8O`^EQ9s!X7)|;mRQ1 zv0v`=_pI|ksH2s&qPr|CC^_*NYVj5Bh_JFzn^j7m?+!GT}P9x zyO{XVD!96!1nZ7v!`r%rIJ`fc=a`v8f@aqMtuUuu*Ja>r=lh%2k1T=4yP4ek6XoD; zFoy21apCL+oG@KMg~%_hgl8w7qwOw7e6l&dDkM*+Sd6&IS$V0J4SRikVerr}kdDRzkS?~f}^((pg z(UL6P=R0N^iZhX)`4E{eMz2YYw2k!hP<-__P9-82<2H7a*2L#v=`w>}{;}Fh?pg;K z8r{#yzL`ze*_D9>Pm1p8m4$JovGDYUKmK=J0baNIL*odoh^_X_||Q>6kf|^Sgps; zYf|yQmdT(<=jkkkMeuTK49`!lG3)rS%!l!TBC8a)=;30V+(| z#*aSG2xcKelb}6Yg;gdSvB7hQ?1c%f1Gk2_p%uR0rv@Z{ln(T*P1bu0;J-AGtYR ziQaYn$LT1RW8l#@$Rni~v*r*yp^CJ!V;+;(^bMbc=D_h;S~S+!5)@i@;?hAQ)?NMt z1A4Y{a>A042FW(!Q549z-=2w+mIe!Q+ls)A-;MG2#|e&Cjia57p%4^2hJM%(hrJum zfl2dLo1s%1eaY13pB@2&w{st z#;NDgVze@@Q;)$v#$)K4Q`yjA8-XA0PR2!hGDxdyIeOH+N4Gm-kojB*H=q4X)*pBT zrXHOFlL`;qSr@^pRPrGo*A&6E`9qkxU7Zx9@5X&WKcFO|h2V~5xX{iD!lvHEc~9$L zyMLIVK3j}=`vlqE-ypCp&3M@62M&^jY^&g?@aHn@ysa=F@!yPt%P z&fBS9Q5m%7>M)T3CvtIuH<;wlC7r!6;yRCoS7pyJ@9-v`X~Jy$wOtDRmz?3=*3Ut` z-xopaLOD$KJAk)?5gOqsbjehLY`8SD7@_=u^mLs3>@Ake=V6(1AT;VGkoDp>Il4iV z8F_sp|3tS#$Bq`>@f#<}P5loz)KUYHK?oiy>7e|nkf&Qd8YUl5=jMx~!xm3TF6`4< zQa4=`3cRFwnFsUALhYY}O>8{ed`q|uTs5R>+(WngUxK{Lx!me%6_(W@hIQSIM90K} zIVBa7@}4~Ig@AFx3TMajET?4kBn}Qn>3Yp>6sqC(t7R*i$fk?p= zD&Hl?ni9HExy_0$_U7ou5uKA~VxSW&13xAW;+pA4QSrk}NNd!)Wy(FT7@*4R6k_=eoxY!>Y-~^r=rg zuKOK@-#pr^c1$s$n)me8G&WA7PU$LCS$sM>yLzO>KE{Xz8-0flO0(JX)9Y!3V=ZmY=){tr zGuVGqG+BdE=LnBI!>T2XD3_3mkx%UCha($#Zx=+MT2Bm!oY+O(G9J+tE!J2f7RR#^ zv*r%vOk=BLr{J+m?rgXAc(&-JHZ$R0U>^+s;`_cGY@^y0ob4!pga=WaUrq?SMKqX< z>06A?-pc|Pic*(=7AUs*h6eXIuGRJuUYUPWkbgv(%Cwy1DcOkNQ}Imf9pg@IvtL6~ z^By+tT{ZSk$%6d!B{c5LFXC}-2QHqKhRQN2+^O2P*rWRq(z!a++6C1A^*?xPn1nl9 z2FZ@}Qv7PCPBSKmQ|m{G_&hcaTedcV!Ty(EU}Xc9v|X@IZ#m0|xdIW9(>R`o5xlS2 zRu-gIi2cHEywXXMH0)3_j9nfFmBq%iGxrDQ!QPc!SBi%v`N8b%Tzk0fsl(3QDo3$& zK$(P#+}^Dc)a>0!*u*1vLW-4X+>nRPono}))?*Ypy~d|oy~xQcd4elrZlKGLAbhjj zg_ge4g_gi2cwtX5h8;S}H4NQ`&ElSH~>v_z0{y@nf(D$_oX zN#x16DNG@Ms=(NN3*P!`g(*7Yne^P9Ym7W;OMzP1hBermGe zSbrvwAAs6P|IsCrsvxg*Et~a7pY=v9g^YL!Diqtmibk(yZo(2wv02WQpJR~B`vOzK zF2d%0_E2i!kI^G+nlKp2iPBdjQ?VLfUr=B-YD>6%?H>4dK_6;-@x#iu9bB!16FJZt zDkzaW3;T?ni2lTDc+ccw&Hn(%}h}Cg*%Z0Xcf2` z_4bRAD^H{0+RzF?;koa)zNQ)TQ>?7M|7VCgKCQUy=VW^9NfLN8tI^7sE@Em>r@A z>+a7)yOH$#oXiNH-;#h&?#>uc7Kh%uKcHlt8Iio_!hLaB2}u!3bQVZLTTwm~?z)2t zmgY1(XrzJ5rICBvCFqY;W2o2_Wm@RKNAc5M(DZmWt#9Yi1zu8AE%Y&+SE2#WdIv|| z+L!QQ9FI1OxPn4TIGGhLFZi_e5tHrT!`>gCzrv)97aiDLe3cIS9AbZLsEO#8o5?+>Yc}sG^-k=;4j#uHz&L|96 zwE{+mX5#Z}{ivj<4<&1I$bO*<>=X5cl1JkqYP<&B-WdaT<}1-DeX?|Y`vo+-rH-2} z?}5SQT^MiWR(4u1k&J3;!_Zd`1-|R@@aC7x7#1r*wFVyG<^wh~hCd^&N-%3_DV4dn(7m1FbTri_%^IqCdcGG z)u2Y*l3ty=4SA-2;F944w6)|>t+qL2e?MSQW`hwYbx$7m;*b*EaOIX-(I@OyW+7 zj{MV(ZPe-gRoGnS0cB-UP#)^+lMDMTEL)!1S>8{gga@|s8p0A?Umeu+hwI7d$JeV*|eCx zdVCoS(>Gvm|3;D<#0QJcr@SLs2jJ*wXR2yAjrmNUNVQfhgH!ANai1pq0d2Y0P&`+I z>}nKeC0}Hj&nm(`?g~K35&p>Aw;6t%)r5*m*m z9yQPIhxI+J#PM(}H$3?>Bq&W}$Bj7f7;&UU!vdIw+I^Ur9u584BG|S{hUT7phP#)F z(WKSA#OHQB1igrZJ9Xl8)9`0BzUK#isg_{>&4d&*tKk97eBLC!1-EB{0dtl7fgzoZ zJQ?XSm|$FoxTX%YKN~ZxyZJbE)D^tWb(w&_9ZNUQh19(|hV7>cAt0w$EFkpUry4Ib;MtO1C_Q&%KX2F8=V3 zj^aLtj-yiXKR`31i*uH~fuRmHI3)81;@_;o*`sU(X3j@o%#nzjDW^mQPN|7-c8w&0 zNSDCLEDx$i&c3Qe1j!?7h|?@ZT#E6~Rp`$v&nZWr?q68*M%PlJy%$gLxTOK;okg2%jVf zaB(oq?7syc_O^q=YkB(SSu{6AHiTR&ScBbu+c9zA1n6DPLq7*G_WQ$4?y6rb+&15W zqmM5x4bdn-+2_7!vFDYN&onOekT!kOeVbG1L!!Ps0LHl$;QZBb z_~X@j(0ndK*Gg$};`Ke~sZ3B;K@D;f|=XiOlJUJTV|D@x)ujjclUuSZa;Un|&H(&fQE)WdP zjiTCZ!Ki0Z#YuiJ<=!8@j8FTXlirRblE2dijjG-fBOL>F{8}yMtg*uAix-KAPLLH^ ziNV|gd6=Z7L2opk0aN35ME9^9dR)ATEgsdVpYxXcn6(%c<(lz++k8-ME8-etAL8qh zUEIHlF1&oQ9Zm&>!txb;f|+_5WW(l|k=CIGYmDeY4Ym!6JOax8TPa63e+onU?v%1y z(?-%qRTY-MX)3)_n~LLI(!nVHCCJPC1L2anAem zK3~Xn!`BeI1N+LYnhHU~v_+&es$Uc-Crww3=PW9DJbt}r46EEv!kp=yz;=qTt-M|! z`GjYtpdCL92xC>I5v**4CYz^!94A~lheZ~4{6oSIbp76s2mP;NeB@f*^sWP!=bs>s zC-rIMQy1>~ClMz(%%$>mTiMr_5?o(%G#xwgsqj4SfcQtqS}w@oW{(5(j!g^{&1u24 zXD^6N2j!suh%d|{tsP|swcxc{4+!|@g8eE65VLPG*?4sWl*GPbfek>TTO~g8-OYBKNEcghN_ljLtNPn9PK3vd3Cf4sGF01l{G^-|23sGbv z{}54QyCmF;97%676RLh_F(pm~V0SYT7VVo!doPZsmI~wP;LKj|E4l_@?fU#kg#`Ip zUy2XvZSY;{S)zLTBzY(jh_kb`%;<4DdYOD?v{_Yn^t8`tLr1_eQ6|;?YUeadE@MZNDI7%?0wYeHN*G z)C0%dN03OGf$1uaFwOA@`1pPXmzKk@P3bL8YSQJyrg}n0?*@SP#rR%oAeGuKMKuNw zqEl<*;q99{@NVT7vb$gpB+fIUpFT))*t(DmPTl~QFR#NBnerG~r3bGKq~KA@No?$j zhdEyj=)=PVrKFOHr(Q5BJ}kh0Z-?^9?W%~f%>DjGw=KM zWYrEa%=!46R2Uh8YJND5&WnW)7bUPhp#%f`uHv8*QfQ#`6?3DN=$B>XFk@If3A}p@ z)uMci2-S!Ee+*$5DS}>HEf?r0#8nI84h0?QV-< z*?Vo#mG6o)`+zdG$qeBKmDiEH7u(>{l6B~KAr~4S^uf*}RjgP(hy9nV#_5fTbndW8 zG}~0TM@4@V?%J7FGxMXt%R~%g+OCMQUoIg5bCX2!Fq@^f$Me}UCi8(Mlem-TU0fT` z35lPg#i=Xg_+33?8rXLNRRZQy{SD)&$LpuWH}YlquB9u{Bw#awU zN;xQ%y@ZlXUx5rh!f)-$phH4CVfR>H(ZuyC^!NUr@|v(j@L!exJOwZu{MUUVLJ z-!kWejK}h>v;F0_qwm7b1^V=$(G(iAAqIX1`he?;M)9g}Ybrn8nWilG38OC##B1jU z@Q9H@a%~FztE`c%CQ3&2kYauYAW$q$}}tdp>!as88P|#KONHlBgA{hLyMd zAii%RRX-30&prg>snjr5zt)ycS#p>RnLmlPKW!kJ9VBVj(=cdu*WzFNU*UBjF?_zV z3SOm@;EeD-tF-Ye*d>MU3Vt}OeSHpZEA$toq^~Q=@_p4J6X=&?`MV#L+KEwZteK5hI5~crcpp~tIt}U2j z&Sy;-0@JH(`8w}YaIe~o?{2KZSD&s;({C}RF-H~Y-&;Rn)lo|rdeNS4U04i#VQO^1 zl`xWC`T}P~%JJn+-59x|4kp+ZlP&79-2P1(W~oG%8@n&Tr;`bU96k*HY!2gs6eSRy z)S(wdBls)yf|U_ZNMN8Q=&k63)Io>vR(2U|ueeU?_dh_d(OYncrZ!G5x`zY4myur& zFF?psHEt!DB9dO44!ZjW(D{KQ@UqGf4E-8J^tMUT*v}RqyJi3^+^Gk7g%jZUgynF} zunMnam@sA8vuNx2N_?g7E$+KJjFk!fwHtC5LG*4fnUd>Cb=9^$`6qU)^h7;AnSW75Ld?YBC(IJp^g zq6^7SO`x6LE2vSC9Q`^!0l%o2V}qLl_Vd9gC|eZ~bc0%n4lQk$h;Mke$8w3*Ex zWXikO1k$%|VYFcG5FYU;iVl-oOh4rwrut9_mwTnT%F;oiCgo((m6$Em)*fT@k7Afy zmBXfXYSPN)F#dhF2HZ_AK>SY*ZS{rkR6ZQX=LiY;kWsuNNrp>3%%Ie%9!i9Lzx}!+ zD(UIrlI#G`{k_QQ^uep_U|<;bROQ+Q=7Zx<>_*D`l`CV=(s>BfFW|j#SwNL~3`K@1b6(S=omvBWA+KvT0nY-wf}* z*$%&@rO;yPGpnOtp9nPiQQ=$AV~bK|!?Qo1tz>)JacrLqAzvw!?r1HSNxg#hNlFl^ zG8*U8K$zNG%i_gOB7>LxL`h;cL?2P5lPA@XH^&Wleeg*ZV|tM;u_>XK0*tv->>W%q z4H5{ND|ASgwza>6G52>e=CsKe)dkA;`?~Mq<_aZ#_~9`)-<^ZKl}UW_s@=TYT$V@g zECj{(VG#IrvMBXX1B^1e2%`(W3g;G8NS6!3xqW-M?i@{CU-1yDRxO2NU)JNrrWp2m zwkMq^`I7vV)ByX-1E|dREvS5JK1e%z)0Am4#QCZgN;*p8vWOLA%&>txu}Xsv4?f0i zej0O`0m`t@2C%YIbalInIWGI-33a_wd4&88(P*(e8?^N?o$9J@ZGSWjoG13<1m#=c zkW&jPhF{=A%Qvxux)HpeA_mEB57L}(M&IksCi?!BLipYfIxlPC#@CMc^|2Z58?_&k zb`ly~p~f#Bc>vl|ye$7It>Dg&KSQL{Nq$$=lU94J0o!xVC^OrTCyoe2t(kHV(i(!3 zQ|-j}p7-GCpy51bP?zYkViZ1_mV%`@OK{zV1>7w^9hK7uf=t>W?7KPv%NFJH<;tFX zqjxlJwFcxnmf_!*rldN51+G&ZLf0L*28#xEz~pH`^v~Ed$TfTbqrLm!Xa7nZu(S?K zv#Rjptz4|m|Bi)1t<5X#q(BYdg30T*;Y}Y`c-dG2o9r$^m#~Kt?~h<-89)}fsME$z z!kszY9)@dO!Yp|qow7!b&rV?h9QxqSXM5Dc@WKpOBh;<3o{i@Lf0J;l!4%qI z_Y-dHHQ^~?Z%|@*09r{}(Lc*>vaa(J(CV~=z%P}<$IAg2G;k=EOh`iU_ZrYzn*$r> zYl5kCAZrqAPs1I9aIwWR*1CTJy1X-h?Tt6tZWs>DS9idaX5OQ|G{y?iu|4Cp&sa>M?8d{zg>)nB$%6J8_O* z6jpj>g2cvVkg&c171OjJ%j7f8lCs0+_VVbtf|G4I<(T+9on;@=2MdQAaIPUuoEUzC zY(Jp@b^Y&Q}Fnr+{#L+oqOlbnjy*kK3W}CpvP$5N?KLZ}pjX3RGKXfb!#wTCriN=q1WpzbD zO|fh--uftocgCNBeT^~jIx`aFZHJ>#$5uRbYZ!Q%B*G1ed~nKD$JBS#WNnfWJlXNQ zyylw)T%BnHB9$v(bG{dkyiyU&4K8Rq`wu%XIT#0Iq@kO05mr}?uo~#{7ao581*o%)r#KxiEFxDDmYDbI7DY*`VYn!G;F5;*_!pOxsizwhQK%L*2br7drAG zBonMYUO5LRYR5sVlRiJ^egZvGr1-PWqnNi^hITDVLzi=TBI&H-%-^pQWt^2USJirf-}o;X&rE*|pZ=_e=L?r&SA;F> zANd)k%S*CN_PJnDbOa~dIm@#7J$PJVUY;E`2&U|cV!5xpX>o}<^k3~kdUpbSb)*Xh z~fQE zsC+L;mA4*cPv?cfQkR>kl+q>MKNrQJH+~X{pi7XHdLI7d9S{t14a{xRBiy!hB))fU zWOENZWY2omphH0n+}Iw63y-~lAJ!rsAW=;cCV#-oI`yFBA*}mBlZeD_5l>!ppKK^m z!Vy=u!0E0-uzH{_vtQ{Bo7M?T+~~zrJ6#^loK8^lPT~AnWQY|9#mrCr80x&PfM1im zVP8@vjvTs~|7|G72VTyEyY7VPs)-~mcRFN$D#aON&*R8w6|QU&j_0))7h#e-5hy~T0TxyzK8vXzkwn*6}9PVV)N&0c1%ly&*LMhn$kV!vH1)tZ{CsZO)k9A zNSpU9Nr$=DPr%cdB3QdI3g;xeA!7#0lGfjOk4p5+$;P?W2JG>FO}LW`<}!Ug;uFJ-VcCvUv}>!ys6|x}XQYVvyK30C5mhYb zt0FnK^2e2z7c9xqBpoVUrH^)X<7l2kDr_(lYF7rcsm64D@qmUZ@L91Mtj4C0g4Z{R z&*Tn)m&^j|^O9Wd?g?<&Vg~JVzKK<=B+1a?9_U-`CSLVn1f6Q}A2UB2176r6viLO> zW|bGiX{{S@yRDws88dYFc9&6uw_yDGKHIN06sNt+!o+dY@Xgx-cH+o=*qhk{-7)>* zUiY6&yEz;U%B*R|=!x`l#AeviFp>`#8-Nj4hSKYg=kQwfTVR)=#GS)(N!FQYGWm!! zZJ%!eDW^TL$Z!J`YMn zA_tFLZZgM&_icHA5w?ze?cpo9c;jLisBOxyh^@L;=OY#|qPt(oW1E_wwX?c-lGi*$r!K>m5L?+Y!1MMesX>E-H9_XzE zeSaAU-f9C;P2WWYBQ8N(+j2Zt8jcQIT`~UTbMf?UIo>(Y5wAZm!XFRXvFSf+=r0O` zy7QmN;+A<3RZs&N0*>HLrOFsiMgL2rdk<3&1+_(AAtSf-iqOV$ri!o-&L1vaqQ=zUN= zI-DuT=+V~?Dyi>THyYy^2L2oN!PCSUbilVm)bD;Mje7MG!hSx3g-I1K8B9<{Lkiz- z(ExElHcq)afGbpbV(Hh1;+fiS#91$G`JWeAxbad3Dz$Ir2Lq|7>^0?j-}3mUkGII1 zv5xp(g8}&_m5l+D=J3bMR&tl0eHdaf1THK;g}L$%JMh#7JN!_i4}#ugm#n+l4$~(;RyC`Fwhi9u!Og33$Lvs4E=XC928n zgGV8kV9MXIOx^SjX>XYg)Ai#;cK=PJU(J=tX8VDB-FtT)o1sD*J^JCy<2#uAU>3jm zH<%aitR|nv=);HX0sQR55j;HM2|C%|N4JPDqB?mY`pg(l_3q}71#z*=+;~s<^}jvr z$Q4T}HF!B~+AWLg_oOjHUq5t>A4zQnI1?$IgSh5{FG_6}Gnt45;?Y*sFyquf`eRBi z^ejlmBlj&JCUp$|INXM8_&JmMRO*pKwIPsheisJ+kcH=pd!SpbTdXCwh2(0x(uP^i z82fi5_>9bEw|2dQ09A9|k*kVU*QUc?vjtrKoi=t~55r2`1Xg|tu&>dIb(|5HS)r%@ z%3?U*glVLyO_@97Zvw*#2_AKL58Sd6_+w=Ww&zDa8>H`ti^q+nftlg(!EOfn>TjVd zFMTK3?av?{2lCjTmV9OOb!h#pPib2+_GAl&-#B%c+{pukVRO<=DzjQHv7WT_)$&7_~mPQ?M=u1F29wd2$F zJQ!xb5@Ung#8)dc=)?mPagt&iyQH`PTsusO?}sj&K1!X&O*)Bcv(#aAN2hpf#Axo; zRS9Rzu2$Qg%J0 zh8@>cr5}}>;l%0*eBZWh!d)+mRTg;R%9izT*hd!|qYe3;pu^-8Sq(3jUc#fbk8ra? z0VZ6Opbg@mpxh(idgIpdoA;AZ?s+8IW4rj-#4qg2ogVgg+%J}qy&pG5{Sq7W9>K%@ zi!e4aOavUF!?6P?94ijh6U9ek^QK|43|I^H3qcm+dQlgx?$u`IVA< zaw0783i*oo1r_*Ub=OuPa`&=9sWvcM_+idx$K(1hXfp z!pO4&$^Dc~PUqXg!>acUUNLHW%UWiP_BfoWP=rUqiv_-=Y;==g6WvW?ZlEE!wZI zh8NCeu=>Dr<`Q+5o%|PxKf%Ot&&yweFym40do6} z;&C<{l+hRHr%LEr4!2cIh{MB2XT zAh9MH-1m-zQ3kKzh+I6}A9VvdU;c*4wz*K4KLR~LpBGO4i8IdSk%t0Hjl=(g?=|~qKQiI!m0VQsmSk0{vSCo%LV94GE-z_G5-lC9427Dlu)_L1=zSF|T1Tsx z)>3Coe&ozn2|e?u8M@pks!kLkq?l#ijO1hf%0g$74Xl5DRj3ioroLMO*|j$ZS%rr# zPR-SUjUP_3ip~)t%L|@XXK%Md{VXkNJ1-b-S53g4Lp}I@zbkBbU_>uXx(`SH302N@ zXTiVW1+gD8k=p7j(uPm5q#*4%p$)5GsK;OmAL7~S^TX)$nhe-1bDt$>FM;-oMqp=; zgMssJc38ody-P}mqUbOp)tw5EZHE5eKBGmbBhjkP!;>m22z7}dcl{rTdzZ<;IDZqu z$4rLG_%?XwY5>_MkL=5Ba3?ituMD}wjsH~7MG2wvB3fKP>;c&PUhOq{QV)h*Gi<3S!%lIkUwuT7_` zrp|(Yni23M?F1ZDt3pxX?ebC&!D8UF8*Of%As0$@U|fGaT%L3S9EH1rhi5E`7VX3K zgKwbS@{MRiUN@|)t0bx3&rmhKx7_QNG*OgSAur5A*ur>Y?6$lO{b{SqHyR|dbF>+D z-(>J6bpS7oJd69SZLm^a3U>;hzxT8^`P24+?6+Hq<6p?~pW|xD{H%GvU+6MtZCO5X zw7_R2UM7X%F*v;80_bi~r#r4KVTJGZ!$+?-;H`L^Pbl)GervmdkCvKb%9H;V4P)Wh%{o3Y~U4sdN#q)k#wX1Z`uR3UK5GMw5y^OWOFE?fGKasp`S|;HZd zxqkf3TI`b1rM?cI*2-Fr?^*>czj+CK^N+&of2-iIwHox=H?sM{eraeHi(4dxcU9sP4iC&n!*9W? z_`mJswpJ9Z(>wt^A8JI|gS%MY`zP$h)&P{8wUN195;B!?mBeG$1$KPRCAK6h0BaIL zajZ~pEz8p-BfccT{q|rKnRT$aW9C4fXcL5vGRAXuw=qU|cRu^e@{jcqIN@?7#A_zv zS^bfqkXcPG-v1*KJ-G%BQ5KjJEWEc*>cBBD07eW}#L>Y z3QS`LXB$X*dL4P|B%Heo_JPKvC{R!BAtOI`qh)Cn2^lnss(VFZ>F@gT-iKe=+R|TS z*>aIceyAOD@INDViq9h2=*`kkH^Sh)xv(mBE4;C{Az_h2amIuF*s$fS&_uToJ8u2J zRw=(H-l5Ylwx*GUANK)Ur79M6GLl`q8;y<=QXyT)k~&PTg@ohHLRQcd(hmK_(ZBRb zjpbwFd-3_yI^R~%8C=eE)t)o0w)5mn?-Ddo782pNhw-Syk-RoBkr|HY5hX6`XQ$4O zB|B`A$ilwOIN8mdB@elcD{MxhDqXH$O9e&QiHw8PuZ{A`!Jo`QwjRvG{|=%m?z>V=4hYZGiIg)3JSgEsLF}10#c_*=*CN`1X@v5zoj36EX=Ge6xaLg$DdKNeR4%uzRU{(DdS>ice38}CVU(if2KU{!?pCrrnu zg4oRzZ0?g<@aum@@Xl=kYhHgF-h7l0wY?CD9#7)pFdI4AYNE!suMs1u`zZdoTAG;3 z--YShydx`m5Y6}^=VS>%_F?@Nu z3;xaO0R2l7;q>!)AQSQkBbVjED1|d5*6SDSa*)R-=LIHWpdAj}S!cCK+=G(E!)e9D z{o*e#$`o%mTK2SG$n< zZ((*#q3nV0Am~Zy6q5Lo;I9~d^;23Tc)u_pej`eVa_U`Xd_$;%w>Dw-NGmZj`i5rV zA?${h9z8f`1$y28j?3%klSzVYp!HS|T^f4^Hn?5j-EW>?=|^>%(O3>`XN8*Cp>T18 zg9BH8lE|zt1@hO)!L(EOeq!g(1`Br?ZXu;kpB3d(?-{`mf8>~W`^w#7^H6PE_;5Q; z*s=!)ocu#{h5A7808?l)!Zm+>rc6SJ|RH8#h2*!FzXVlQY zg9p#w6Kx!oB$ipXmSlRDvDNJz(6b>#Z1^Pa4^BB~vtV*nlhUNzt4=Z?OBeIXt>w0b<1p?A&~jc=a%( z(H4B(bXR^t{1FwCmFeI3OtxrB3LJY=gy|Dbfkm|xcbcTYi@$FI7mZ3lY7e)|hoSjj zWo8?@464^BV2#%~P#7_hNvsIQ`Qd7Oj{h0xSiT44UeClMc4uIUbS0|@G{yaMV&^A~16ItAWZSupOGz*)+! zM`9QT>f`qimE@WDi5&xN%_?&6%nib<4wEF)Yw+x6FswLdNLzi6GZM(&Zl9F?v5Sa?d3*HOE7g z!$;yg_B|G*9us-lREkm^?dcq?VW@p{2<@46jr47-!Rc%VEvdUt3KriM)s?!7A2BC! zno>QUJU*U(_V|d4ohR_$!K+C1!Dw-5$RsLPbq?}UoyqR9YuJCmfOiRN{gbIj?i>?v{4&JSEqaF0dh53eyn{EOKJs=?PI zkudDxN3dzrM<4ejMz18}h4wh{jSO!gdxY%%>2pG__5+LR-${nQ`-ysggDtPpRFYgo z(JL)p?{kw1NUQL&H}-tWO@O`O&m>w<49ZCG^%TsgA>+yvPzdYoUOYKUwlg- z{Us`xl}a&j^)&v;MjI!~ZHLG?=gHveXs8^snLI3?gF`$QVrg8t<&`%NXG;-5Wyf4%y+h-A}=*T#oiKJGxFjh0XRk z&mLc|MK#Z6EKBpoAG(Qxb!;xLSIfd%qzWXn4$wgkDtKv$G&m=FaF$kv`)20DioMe~ zYc=B;hmZ4_`)2U)yIjbbECu6)mHd%l^B&jUEuPRN1zUeqLtv3TkCqoR!^H^8-7|5H zt_&ZwekX6P(&CXU8Z>2Ii;mrHfz5I!tn}6eqSBN&R_;qkT)}Nv{cR+d-Ic&D&#V$X z&N>OB<#I(2O_~HFg*z(Smy%deG$YTbArB2S>*QdB;vib3%0fuff2ifr7n@8Jp0j@@NSx5 zxUPYsNK0t`;eb2GSHtkev+!Y!h&BrB`EVh(vHaI(A&b(D&1*L^i$kMf=IS}X#v`oF z9Zi4SddME;?0{|Gvfy9jJ1`G_07tt8!{AbVh)eKc6Iu-5oZ1|8_q~ZPeXL2-FrLx5K=Q2avijpDptXN8oxE=g&K?zq4a^%~AC17l)BPatSF0$> z_#!6q9MD{4i~nsHiVs#hQD37_xTdoWf;Jw)qb@S^p1~d1vT-&|U)~DQQSPYGWe7)_ z^=RtAp|q+w3U^iYvcQnrKz6Q&*@>UxvZ;+GJ?g*_hFF2G58v&$mebz z4oiYY@c|FV@a)_?@;EhEblg+utz9;P(%{uZEVU~{I$E1s%pJt<9C1K*CfFs9N`mdud7$%d7GylAMrh85QKTO> zp9x^E0yOA?rIIk>_$}CZ`wAStT?YrpjG_m7g+1oSG~v8bjAB)7a4Nh?dY4#0NA6zM zO@^|VmTQAd8m^Oe*2@_}mL8TX59 zRrf|ANn+)G?;GqKlnDO`o)eGc&A2!%2E9kSQ`?(^g^mX*psRazqT{gg>g{5TYwrNx+ zp&C+>{*VJzHLUw`HF-F1BhHw!6cn{yaL)Qtxmn_-YiurZ21Uss4YWxK@9nvtt;< zzBl5-wWmSUyfBO_Q|75UGw|)E&6ruc2{c)zs3L1Fgm1qKsvjHJ@LGB7(f&&!?yP{( zE<>SVnE_5)`WUupH=;PA9FO-q|Tce1;F9A(nCE&*vu?W+VjLzxs)b zGbq|sMZ@07OQ>071a!BQGG*yx(V};Pk>6lBU94OH2kK|SfLGGgMt3OKs!Q`@Y#v(} znoYT2HT^g*hF?58ja!!5&}V(7aHC`kx#5?{%{Lw9>z`=SwkLUPR-i8pklD(|7rA2T zu{ijzIS6WNW9fvn628_%n6z;B6gjeT473Eaz!Kvcymo6i%37+kQ*GgRaE&GIKG4jf zFX&SCJ)UA?OzuEHH4ETy5?ER%>wrf5)wbTf-ypmDek>?Wq~O&e9f5 zAd|!smx-9&c@qvQm%w~OC=L~R9l<&IxTf?P7{-KQlIXJ7`f(xl+t)#J-8NWSR*KQb zGC}vX4~@54MNgHR(D7qqVaN4Y`14keHtdnatLK&y>+8S8&Zi9d!bpbiBGzH+umVu_ zm*WFpR6}IN2l%R^L}i1&!i&XeWP(9D%OM?u zH`8%$G5GFY3V14LaGzzmFey|_p2&~JKjwNoYugvN>@$$=9T>=}X9zq>nmug(eG+m) z(@|$!8U_XmjH=Qp^ilO9dP@bj!HyMhE47(a+wH}1Q)a-~xeuW&z=YoI9R?BX5iYLG z#P3&+!jNe#L^ZvFh+gai-*`)Y`?weXoG}xB-7doo4ZrdFc~!BCm%s#9S+d)m!SJTj zmHO!>K+B?8;<{%yAj?UQFTDAc%^qpM$80#w^AGCb&278*Ad5h(6Yk$1ZzB5k0MA@} z6Zpx|)NrL0L}~`&X=i|Au1?T-E(Pl}hR~bs?NsW#2#$N}FhkR;u)jj!_#5T<$|q^K zW8Q7F{CI%qd7DGc<}ED3MPQ}}E`qGHmsr|S>c@6Nxwsf4{GmzSC9!#XR+Jb83Ju*JNO8hBzGFN;gL$^j}qKD5G zPz#mAjXDU0`}Od|`5aU@lW3J#V?j-XbxOOun?-)=6lbbjgDBfWu(s5ctQ> zYSW3J!S=&*mwt9m`K2g5tdD#=EyK^wy8<7T45%*e5*gTECY!A#`OEBnG<&DWH|L3A zl-F4L-RcJP4c^W3>Ld94FH^b1oiU*2vw?mZBh)IB<`CDXggSId(w&>u)1FC#sf(in z&BFo#MWj7JpRz{WxFC#Q$WK83ZPqx&Erzd?+`>H%RAYiw8{T0nQF+HS zUNEDS-rFBS17dGr!jz@uV}>rJdu+7nX#aj#pwkPA!#;wU^K>{==Eq!zPoj|zce9N; zZoE=D3ogbLVew^W9#`>?Njx}CS5_KQwH3u=&mV36T<~p0d|N_YpSa=tg6oiyIfNS6 zALd?O^4u-v6In3%9(jLu7;M_yUw-YD6wK{7N_!)%S-_75@S8Ca$bc5K+R}z&ewL8< z-*ai?(Ijpr8&*Cl+={xM+({p{MZn)%gLz1@2LI)=kL=yi2ebSF==_kST%oQ6hUWf- ziy?FAhZWoDu34wq+e?~|T;+mWpS%FyRT)%z)g6|)rxmB2`H4?fR>0VCn%L4ahAzG{ z2X%Ha@-WnZ-;obQ$d(pqZdaq(Yv=LduZHqt&Fe(_#%kh&l!qdJxheSg@IsCo)cC(9 zDO^e)Fwv<>T)8iSC}k?*<`I=JyYvh`44#7%7pbzW8{e(IJlliY1+!vP%p_cxUPwBA zO`tE5Mx#Iy;-fhZaP-3*Fs&F*%f>a4$xhL5B61mhnI4UE2Dgx*Hd@q0`z;7wImlRO zgVzg1;2(JlLm#=5#U>OY#}47!z6|0^x+bvt8THT^zK8THLGwg#{|I)dCBbo zo-W=+mhU!z`J;rsvUQ($W=0B(DyjsTj=Q9IhYU>ku!rtU8AH3wrNzrjo}#kD8**-k z9IN@B4y$`N(o=e=BBRjtV0%H1_NTPKo495O^iN~bQKM)A@h1vGPOnXRH;!=XfKi76 zA#G(o9Qf?OXGVU<0k7Oa@|F}myE}l&F3TaB4-EK;gbX|v>5oH_lj!TgcW{i;1^##6 zMotB5c$oJI8hkRGY!=*jd7aT%>^BSM7b?Krc}FnoN)X2EI7=*wG*JA^9X>~FgRTfo zh`K%uTbf6rW{?ptUKYqUE*y-%z8%LMOGfbt7D+Jw><}t(H5+=YrMbN1IAP*%47}SA z41T+(u!GgbczvZGjI&wH$7x64vZlFA-RCfTscFF(^(MGbIT9N@WSBziQt<{ofrEZE z2S>*KM)N-aV>=y1N5p_9yk)Ro(-V!4*yG&2TB1i8Qt(1BssBnc#|mLGjpC{*@$54a zSZSTZ?)1He>3;@sOaByXs2Io0zGsPFUY6#UMHj_Wrs+Zd2_^cd=?q+HIlxvIN}z}K zKQdjWhTYRoVm7l2*>W{Qx?blNF1_1`&yKXSL1*5G;>UI1yy?BH_Nf8)zxYV-ZMm?D zWfnNs(H$q1q=4qzi^4RrHoSQ7BWQ3{u5j{`_;%8INOT`e*OytNrJFjwN`0ys`+z=9(&WFNt04C36;uw(gmXnQuydgu4twm+ ztkzD13{ftcZVqD8Dk88uX(8U))5Xfy+{HnMAEV3Q-*Dg{#W9PT@U%n(Og|mXYTRw% z%Y?1OOY#2OuLT-a#=%6M0yd#~pwZu5zOT%TO9{N#=7jyAx%&+X zo~K3+_?nQit#vHJ&H+aCRKv=g(-7D_8^)bBwwm1+D41ajae=J}pTEC^y~n&TQ_+=o zWiA2(VGz>I@m;v{P6ad;=P)M|U07(YCG>qQ;QNPL@cCUgkg^A`a+?};`~4m+yGUch zMQIFM`4m0=DpFth4j6xT7yo{>5Pdv*NzuwA8rS=rMZRCiHyH?yzWb-}U`0IEHx1-} zMk=9#bR!w@*qW!L+v5p~dc09T9eP$s;oFmkG2+*JsLkmnNe17@;QU#5Qn)7$xO0@c z%6icX6>Dfal}6(HoZ!l?8N4=Ki{H(d%Jn?2z}fc$Ik9))+VcAR)z54(r| zx*uU&eg~G;sh9g4o5>f38Szb)Ip}8~qGo%r{Lf6kLcnk%lAuZK#{L3x=j6q)0QvgW{!clT^5IAE%b~fvQ8o}mE++^X{Lu_&&4)WGC9L@2 z2T1tO9rlEc;0d38iLPbOK$XJzutFyh*8dua3fF=_X2x;QyXuK8+MS@^K8e2yz5)L8 zR>RqRVTy{WD$aTPOXS+N4(i_jWG5A`5N0+GVmo(}cSTXma#@AQ(RUbc_mzU2)*R*@ z=!&yzii;iShb<}0=kY1*kZWg^iQb`?A= z^C2gGx52z!`t(y~ADi0cjDCUx^xMSwpn0ws7q&*AuFo29m^UAaXE_LyG}N&z>bdBU z=|}9SP~o4iEF`JxWw?n!iumURWB5v_;PGt1E00rf(&h&77O!45&pw;Q9nCDCF@7Ak z{ZouRb;qIArxtcKoA61q4R9>ng3)Pa^xUC0@byY4Bvf5ui>vzucBvTu_2=N!CsUb1 zVmQQPH<4ZgeQvn$Eq?6|gKwV<;i?9Zq=rngGra*XuNwe)I;~dzr)q?An>sHwi52aS z>jts31T4+n389Xvw8K!1Xud3D8V6&E^BM_mr7;dPi87khCS&4EDKNUd7E*IApviV= za93=@(DYTr`}ZlLe6x+!9t=eDjL+q!)!(g()t{0?%ip5&W}Aq+Ssq+(+$uhKSE%9C z8KbM`8tC6A!!JA$!6A7GzTe?E+31l?j2*|rETKQ&^kOYP(BcEXv!QnHeHF*- zSVIS_6YlN@MN~sW#9ol*EAUG<&|lN@oF|P&KuV@jm0fd z(dFkASAlJoa7G9fCQ!MZ2G_C4VE#Lfgnd?`aiN2_RpK~2VpPm(S6v6Cg0Jw{XE)5z z{wkJ`(1GAVRnU?Ag#6t21pG!AP?02`i8@TMT1k|m01Ts3u_uz? z)ktgfjX4O_;$t!Y`7_~P&V-xlu^V|H6coiDB~xEbp~5TOf(N5xFh}+*HGk6p7m}U| zvY$Bd*31DIAksk7P1&#wVfn|sxIY$wkbPQ%h24Bf7CbG)eM_H!_n|zjNLLap9TkPq zyTlNW$+Em1Q}EZ^Xmt3}i`RAgiMQ=``qDXz(|YJLM$s9mBrzrm_#I^)XkBouf7jhuYH9*$qXtr45aHcr}T2qV^x!YtfGU3zp`?$>yH zdqY5#$_?Sb!DKAAd`)aeUL~(gPGPR*3iLSDh2BsB?^>J5T!KgXT_q(FaE2KC$=4rR+OfokkXE-vmBXi3`PQjcF)a&w_z&cP1I6pN-rFOF!u z%YpT_?&u%!n|L&9vYIJLKt7#8;*bC{7Jb00Mu*8j&l=eCVn0+TDsY*HPT-hoNls6V zk;cj#YSo-gt9Q$Tm)#`p?G80mo+ZQBya#BpAfC^TFR;%ESq%phifG8&S)@==564W^ z;S67B;U}N50-@Xuys4H#UoGJ?XFT8LL#Qp+#`h?^TlVuTFbT;1@c;^)Lm=Nf7);H@ zNzi*qZo!N&;-2}BObKnn(fyHBqbn48zgUBW*i|fA6C_Z0P=>9U={Tjn0^WY!hh;X> ze3rBWov#^jgR=3&;@)w1A29?Dd#d3sf7cjUDatzdZ~M+fZFcW~E_ZmO8s|A)iEFkQ z!@QnW(Y@NCbh+a#ZcJPt7Z8)ks`t4v<2^Sp@ohUtw=Sk}<22Y>i$_@E{sl?83y#R2 zE`W)}P^|qDPjz+R#M!D?cl!n@dU%$EJvM{oGxkvVc{kC@Ta2r2@`U2ADy%@SS16mk z2x5~*z{^}wTu|77E4giGUhK}F8S31d%ViK$=Etw1_1TGracF4$3nfC&*}dpC<31Ir zvyCJ9+^pt!j5-zw|Ah9CARmu>a}AGa2GjW~*Aa!~-jJf-*%<5*gY2323Vh|XzhBw|_ zpl!r+^ZIi6dGkH|u=)$MnNH+%yHx3zr(IOBf}bzO%tWR9RD9=E0|%PJK_kk9oo@{< z?l|epEtztM3S(sG$W%j4*}4w;m9pVwf+F`wBAediaPf z`+5BnbU%Jf@=o$bp;i02DHw!KzHTs~Nsse+%zyvj4R__f#g^x1to^YV+URy*^z1i)HLv@?AFWnGTa$#oO2RMbJm_YWP5+ z3Uk#qkevq_p<|*K){HWO>qEccUy{{4sP zB$y*?=I_IeCc_}I>?t&qzClaLX#Dr$4f5b>tnKvTijT!Iy>}WI=lcj8CzWtwJag23 z>`#{QLW=!$>_fkC$sm|)%M>GikcWS?Szz2PoDdmF72IN=yY4Rh79&jd^agxcJexR| ztbpKs2-}_cPLSpTsxe{&*UekBzq~n0ZUshS_8}eU(SHlOcRIu3=|Q--?@8DQNFtsE5>YxcY`F)qF?GB3duoX zIRDW%SRbO!Mk{wwagzwTY-cT6&z51grDwpX(9)8YBVWKJG>n|j^{18fVyw^h8c`ax z1I|xN!IObikh5kt`4?4$7Uq9oi<=2|yfGQq^#?<4%qgCAa{+FQib13HJd)p5i7Ai0 zNzeQn)Yg9q%*v`mTVG$8Q7cLU`JT~H;Z9ijU7JaMDkE}-%W2Yt8Qh8w8PGBy!Y=9@ z<2@6LE+RJ)q~Ph* zN$l9!b$Dx5HvCr9;*P9*48}>3;PZ;_1nh3Ymk~VtSp>2Mu}w}$i#V$J-B68JCxcvFztpuVWW&Ti)dZQTOzJ8*don-+K7>g9qs6J zaagE!#1|e7s&dcvZ{&7N-U0<{p4gX_KLRC1Gw$L*0w?ibI?0h*igONT;`y0I7-;SV zMFrl1yK9y4#9mj{AY#m1{`&*6?-r3K9la>q=*XNRrs4Go*HECj8U&_>Y}uhq>@WHt zkac*5TMx=|R)beD-9toh=SL5&&P~U-gBozdAq!PLdO^KUIPPwg=0e^&3GZ#@nHqMI z=zTjK3uaj2({&rUxwUh-*&b@JWX@OAGJS{h=H`IIF$qpZq@5DaJXFkYrQ0Q|@XU?^ z>h^Lq_C9Zff)Z1h(e)I!ZOeyz>u6%Mcpju4F~+V)b*#zWf<7-pY2n-q;w8V7>(GMh`6vKH%y&q3pn6LeNwBRaKxfa#~~3aztL zSOql!t!ooF=bFi=prnAk%SX_X=1dZQE(XXNMQ&!-bNYKo%6M&x7gAufk*J_@3+AoMu*lIpXy;8}YUqfTtX zlHM|?oADg-&3;1rjT)?cE)BRh49A=Eb9sq3Ls7DzP;mD_2?%!1;!=xV;^d(Ss$Dmg%Myfxky<{E`*aN1+QUdao1iggQA)+Io-s}0i8`do2< zBudQJg7Xiv@%b(j?wW8m&9IK(IsL)VcVh*{j68rl8qVS}2%+n0(uJe`jl}^E3v5^B zbA+*%F-fDI-l`hGo{N>xf#>dULnlQbcj*esHeWz?risd@ouvCEA7T9?E6!JX1ot)U z3alwlK}a0ORPH>*V@fSJbM8OfZKBNd{G;gbPHP-(tjNslZ=##dO#W{if)9=ne5P*% zuDLm%yUW|qkNP*mJ-=0)8K1+;GI(aMJjGeqJV6X~+9bJh_i2z>D8-g;n!x7gEMV4L zGz>O0(&CAMu&T+K#EOgIv?KPMg*R{`P6?<;xeItrX%IG-%3}#sWBTE15M1WSJ)C_9 z+&K}}Zg~|VE~|6$r}jZS|4lr+`48DE_8LBv8*ppnyx^MqAQsG8$!!=F1{Uk%VDWJo z&P7xOBJQcNXr)-NF*3l2w{aAP?t&b3VkN&k@n7>o&c$5;*~Cq_ii==tX*h;xg}}b# z?ag1DPDr|q{4{`4IqbL!Mt<68-s{pT*Z>NW+ZEd2-m4XNN< z6OFoCD(Urr*+R40RQm700a!UDhu^W?rY)X3;a#08&mBmFnR`?Dtn4easTLK4{&$M5 zoje&g>NwI5&F=)aVGSN`y@W-9)8RNa@2)z=k*9L1eThmu((LtS;z~m7byaN+FL< z`Fsn66Q*%@6wbh)w;p@5T+#mSm78d$FUf^;#=(gX4aEDHE)*-sa1u{{;l94(Ft}cq zY2SGTr^DBh@E6aKwEf1Zo6~T=xjW~j)&)3MnG?)1z@cVB=R8C^ry;dcO4+7}F^1Y2&S!D+o~$8Ofjh-|RCF z-K1Ap6}Sl`S--RgUex*kxkdIUc217{N%c~y+fT}pW9xj)u&kyUp9CpGDHntvzfR@P1by_rphVXPy%i28Y=^M< zcJNGg3@k}nL3C7=xXYwMuAL%mo#7Ic~Fg z5#~Rx#ARC_68o!HA=$baIX-J{@bLk;ZsdmhGSZ;x^BC^I_-%M9@d6&cnGefFs)^zC zV<4uahg(JR$WSlL`tv z3$H*=he<2RG1-D#`Xl2G8lD@2<*oUG(0Q}rQo$$eSS8N3$S~62E5ThUvB&CwMEom= zBPxsIF-33_WA)1Mg1;ore{&pC)FT8a;Z8>9U!qF4wxA?81Dvy_!olFl#Ir+%D^Iyb zwqKcmu|~3x`BDgBk<(Esuo~x0%%|OY|6#0CGJJcZ0KJfo`R2ta>+gY^W@_TTwj7M& z=d!k6ThTLDg%-~pL5oJ|!|PWP%<^z_2F!WfQZx@s|(N?CSVCJS2!Em1u6 z5ViIcBZrp!he72Na9u$Nb+J8)_f<3LBn35C@I;?!NSqb)X-}cZGi|3$vB6@SXs|K# z!3V;}VB)A&B5Bn{7ySK2%72_gr#@xYxX%Pf4QHWbxg;C*y@gAE{}5dFatF_Sb4m1f z0dJ$`cV$c8kWnUA5Jh#-r2Ha1;HJVIeSIIrJ5N)uwI!s+{JzjI+=ZAfn}^*o&*7Ja zF>2lPfYwq4wp~&kw+3jzP%Xi~zjzy~-7jHz<2Ces@(>hjs_5P_TL`eDiRGsB7uMo2#R6PhK+G zoU!4#%`0(H@)f$e*#|7Um7r#|GfeIr0bRr0#N0)jjSqb<@Vn6huPR;fcFaHF+rlCI ztbU)=wT|QtZ`8n~tZwp_?=n6Mo_zsZaj;7QabROoFracTa7Xs4M@Q4^PoAj1fIP-At>H*1%++^xT^57y}?_K~VC=U<* zbd-p=B#^`3HiKn@9K@4If&Yzg``WK#Ns@a7nJ+D%qMOfv%)DASGE$5+uFQZTlOPEF ztHJ(R&!Db(3*hk$H=*jGQxG*uf^+hDM{Rdz3Oz?@GQ(6KGVf~;6s*?}7OmxHWdAtq zwx5J+Jo!FTbtEd5@r)+37FZS3Mc!VlusdRxO?@jyb1l>V!u|hR1lp%nz%aZTt0N~t z@zYl*$WcKH&qxv`@eNx_cq4E!??yS|2U+`@$Ulc?uprF_lise!cWrv`Chs7oA8p6W zO(9Sr8$+LZP<(Q^1Qw_0;a-qaQH(UEF58qx8x?m46&8)Cx{Q6993X0eC8?Fu!WU7EqI*zgJ}KEB6kCtu*0Qdj(b<~(iM_X#&G=z>p46|i{u9C)$GpUG5m!k}|~ z@P4ij79MXDC{3A!B3IN&V=$ z`XBic9*8?NE^wN;DV$A_EbnuY;|4Xo;7pJ)m%URNX7vOKO%7DS3L3#Nb$fK@`|wMW zzoFvBAWSSgK}bK%MDa9R{N3~sC%zBI zh|P;J%qJ9uP01v_>LmKk;&=3<5@J)g;rr~1m?|BC_v8A=@;yoLFi(dw^85<%9>%zE zC>idQW}uI(4`klmiE*nRLEH&ZjvftU_b+V2TaAjG?9gHEr>6rEpX(v~IW2~YXpSVi zr){OLkB`9b$Ayq07s8!1IzuN^uVG=CrTA*eTwMNzzgPXyV)YX@vY^arH07

Y2aL z^4>nS=cP0Yyw?nGMDEelKab!q*3#r|zR%Gi0S^5aU`^)?ZuLZ6Zu&JPF5W_e+tn+J z->Q?ymTD(YwTvpF7_i+osDa75FGbB!mS(a3~yDe*|2vSw(Phq__?r@%iU+m z^*4mU=Lj<{kh*hHs`FT2Z<&3iZANI|`W)LGtNFgD8kevB3(jbGqjY8fSjtCn^QCuj z-^xD`hyGi5_P8S3cVIUs+I*ICoMwopjxtzysS;%!`(VM_dQ4t3k2mXHf%C6N;@7l^ z_`C5k9^Uy0wRLlF=(sFQ^ibd)ylkSMHhZA=gh{Yu`7ADXSw6Z&HBp&WvuW>}`LOuV z9?Z^vfY)O#!w2XoYRdrUmPQb=cUjjT!b^7%6D1KvcUDf|8VTG zUGS}bkWADZ%?^!CpmS#7a zrI33X3+wO6aZAUEa+4Z-V8q~Yn}3Bz;o9{l@U$v}#wYdC2eVe<-6D7LC^!k%9(##i zPGjNX@}sad{4(wwW5s4@y1_V|g&gctuNqD(N9_t{4`n!bT5J(1uZcBI2dB{xnmaS?`OC&AmJCLn3HhW(p(57zndcaA)DPRwna zaPMqUmZ#E!q8eG;j}=86j@Dpi_0FuPax66RY|ztIFZt~FNX(Y71mow&vA7*@crHWD ze*qZyVl{|8e$6-1-oVw)GjK>^JIr5u4x42^;P56hm|?L3SFddj6Ft_erFr1%ER5-{az9qM{D#oT&Vrkf6fF3${R}FJu4W6~M3auW z=G@G-Fz#{D1~e5@!e9RKOk+j}8=Ciz3S*~0bju7}%+I~vc1FNOFOF0AJQ`HGHMsDa z3Gm~$CRFA|16yZ3&-Hx^D?aMn6Htf+=j{ zKYdJG!=dEvr5L-+irKlWCP^}zxf!<{(9hF^PH8j+x!@=wk#Pg+Zi&F23VYUaUYs?> z889_Z6*lb(Pkj{$Vy|}?unlTcn9`ha?5l7Bh#NM;{!vG0`Rz(5-tz+Drg<{SRcpBT z+)U`VJjr=jBy!sx$x^M{Sp2*8E>`iq@7Pxo*gS0{_cZz)|BRRDYEM+%LwoYg+`eZi(3aEQzj@ z7iBwy6WJR573_7NIIHy>$+C6Z>3h4wV6e#&%Jl2(Jipzhi$rhY^e2ySyHq3z-j+eq zi%ej<$V!1{w=8OJyhs#P$Kvde-(cmCMXHKj$-{ywcoDrAdh6Wy?rk|yslAJKs$TLYwa=1gEcAo2VHf5m)nSLr zA&ky#7rd8LVj-I?*!g|@&VQdWOUU_*0Y+2txl9Z`mPjX05BEXCV|_08*Z4erW+n#O zyhq_rF(y%U4ez;!(r~L@cr&+9_~fq^%Q#*Gd;coHiE=}3-?}X@X46glx-JS_)&<~T z#TX`!CrIhiRxratL&(NIf|tq;h7d@?L3Y0ClUd-?eMNDDSae1bcJFChKQSyHUm zLuET{z@jz|_IBuCO!hP=)c%bdQ^#SK*E(FVe=&B{OeUA_mf-yDmxZ5(RYBsyS+v-j zO6)5u@yNc_7}8)b92a~8XDa!VHuqtAbBqQ#{;foydigrEly{-@+0nxAtlx07^)c~3 zBf<1z9l`Q@IIhW%XSw&<>A&B0ILB=SETt#u2m4`|q4h&BRX>yX=dT2DTRGUT`VykU zqsbQkCve}#3SR6D#>l36-1GS?M6<)-q4^b;n_I%1dL1h zj&t8--~!zTw8>Wn?q}J;*XOIq>R*w#JJJoB&UC?+0|JolR21X}CsC^~9p-Ic0S5-p z(OaUY!8!XL{E)s414iz6KJGDkyLXb$GK%cfkSGpvWjLkbD)^Ku)2Vg*PFeRi@3VR; z*kTv~+s5ypx;L6oLMi~av^mpvIiAp{6+|K`X2Fl?U#RTQa&-56kM8Fm(H*Phg@@!y zsiV9DuANd3R-rpUzmCr@tky;sDHqhSU%~fi!*O!A0e<*u!|(fgF#F{JGITu>{ly>R z8@`Kl=ej04ASDIM{(k3~u%D^yp8wGKt2h=^EQ4vX74&M=6jWDphVC!_;jC%riRTtg zV0~`1-K&Z8toR^2^2-I)4qw34(?r2%nk%l`vxrtc)5I+zE5M&SFxCDgKU5y9&r-F=UE*PmirTWwZ_05e&!X?6irme9syw<&#`-X7S8aV6?GjA zRI75K-m7~cYQ+lDy1AKDPqO3h3#X{$g-lw}a0=xkXQBURQD)2C0nL5MuubhXB-mYr zV4n=3PmuvTGyW|ukgEs9ua+?5r#Y>Csm_HpMPa_zN%}s#hlY$W233^@FtghkGEszE zV{#VOt@DNn9WLBR**Y*EUoHq*EY98rb)kB~VH`Sni1_PSz)c5x;e;>OvCC77yWnsg zkIX)alSI65hF~%1U0Ez#F)0q`?~ldqr94x+RExyTzAjLFd>87E{ve4wyZ5KXR-(3- z=e2ejgW9Rv^mCaZ{O0+io1K+#T3RbyFAoNrf)056Um8T6Uye0iTliUD1AVhX9{xN% zz%%&@$+BbX;hUu!q&z(ae^z{@C$z+I))Z+N9VW(>wdsS5;!|?-JI|GRxfVxv+Lj#T z|L?iR11SAQ6x;X3`20`BCf4EiqK1_Iigv{~RA?k4@*za-{#!A^y z`*{x_C$$3|=8T1@?twVOL}7Ma5gibDfxfP}BzDjhH0&HP?r#9v-s~n~CWE9^^$c)4 z^W)X+x0qa9jCl!HP~&P8nl4=k*Yp)(Tu2~{+T2BGuo=qdJjcB%w{UH36q)W?08y>+ z;MBbyjZQk?lwxttb{_A^Rt$y@gMDDeVxawi0dy?OfIKH#!OMySv~kZOPN8OOc+GQ^ zTw99z9bq^eX%5Yo{4nkd&*2}Z&w0$XW8a-@S%ujzGGT`*x*vatlJ7&XXu2ib&I^W_ zohR^P)*c9cT|!+xgy8&1$wI%m(eNLiA2oLEqqJ;@M*K74wCCwSrcWj2tp82ACbt2% zr5=nE1F&6HgwtF-Ol4$KFyncS@b#O`aO<%Iqz#N>YaE1_m>Wn&d=!$_>oo{wf8o%U zCSh)1D;^%5Po!$jk<$4YP}h)$7vq$1llDR=cRCJtHDbZC;u~q&wgYOiB(XXy6gKRy zL$O!mNpHzXTFvJP4-8BPh0^QPUPF=oF^do!O-P|`VQ0xhFHPLJqYh1F711PE0R6un z;g=P?L?PoOynK8d7S_)a^fqg7-%lIR+^sb@b88XJ)V~2`n&+w19fZH1&0*q(mJ%n| zOSn&GG+Za{xTE4HHcS=@n~5JZZs$3;zL$iDqL$mI%~8RiZ7R%TRu5io=nzQ7+$B{H zhlB~Cb5QG1DBZFx5*Alhfsy)3a!A63?=78%Xsp6Jaxu89I0fT%3Iw;g_0Tf42-HgQ z!E1&TQ)sEjG5Yt=IE0`4`ACpngLBv$7LTPv;VAKKDm@|yA!Qj`>Eq{mT-D9vbkFxY zBZKn;S5#_!^i>2rznO6w1}tQRO+u zX}|P-v@5=ZQGaI%Jq|sAjWQh1{;7k7#yl_0vd;oLczFgF*sddhwo#C1nPOp z{O&pnbwV;p=XFXI9~ENx(Z47ul!2^;li|c}8L0bM2$m)<$>#KBc;nk`Xv|eXr#%W> z|DThvy|WPm3VOkl@A}=Ael3Vl2tX5+F8KU%5*aSFAls*H2b+q1GIg^URNF>_?Yt7& zvr^PPtndXa!^N~gpakDaMCjB{rgV<|8_;~(0BvgHId=_vPD?Z%WD*5jD5na$c3%Rw z+ZTk-M~ZTzt7YN!+wCaIyac~?MA1uX%JBQWEH`0QB{T)cf%Wb@+%wQd7Sl@*@ZOxv zve^dB%RdRs`$7cn71=~pFCTT4X9yRB8M6Ifw%V3Q7VvK8iTLxw5?HqF4~!o_2S?`g z2~RwYhKq3owugq|>x>y#sbPv67$}XizmB{foW*hs1FFOtF>Px|~t52BWUx_-S8*qt#IW#98g;&uA+=2TRT&>D75OIGFE+-lwAgdL} z6^tcjechNf%>`d`TGnmJixmfPj}(amjLuPy#&g;-_ji=e5T9dEU3)Ugyo_7 zcuR6G%A4viyVJAb^7yO5E%#r6UPvkQD}KX{d5hp|@k5*#)gUN>+bFR3iC)Y(rVI-8 zKampI0PcJ1cMNfQjZTX{fQ+0zXa7i!_q6#F&z+18YrBH<>o$DR)J?TyBj8!sD^N~~ zB&UAyxu7^PR{rl13|xN%H+$@vSlxFBa6ExA=QiNY1Z{5H{kb(rXO-W`zimZaaJ{Jm{eNttvj ze!9c6NY3(SQA-T$b+55+^*@MQk}okjDoJ!lO=PhZyeBbtA#u2%NcPKEVuO+u(_d(a zg<}um;h^uNTkQ-EzBmCj4}cWR*M-@Ik8pk7M6i#1f{Sg=5dAOmEXuGKtA@21V($R$oNpIq%5xWf=lBHj|7n*I`YY#ZdkGbUeuB!HY*>G&QV@#uQe-j!9X> zaj^j>hkGI8`5-*JYL5@BAHspU{kVCaEO%47m==FAAp44o;p56Snyu-FowfmJ(X^I( z$Fn34v{(v#1qNJ|moa2P4=%YX#D-gASkjdkGGWR_&OBr;i}+^D8tsa3ThK20DRBhu zkT78#HPawVT%Cxf*1*Fv?fBR)99?QI3Kz?7!@xd2RGLzU!Q16Iue2Me**gu4GXKB~W=pAFX*J$L+A41djI_sM*}pIJH|5tY>{i5uf1_*UB=qQF=ipoKxg5 zvXwqHQ01bdQ^7P%mFzyK%azdA>=#Eu1-$Q4#EJwc+(UEu zbPPKw$0UkQ!obr^B4ud?qF(pGMx+8Fi_YShTvN2#*o8Df2~8t-PscyL)0EMG`jQiw zigzVAgl@x8EwAwXF+)78Z^Bh}l#uCuu2jw2g!A!^ntAqaAh!K+@IB`| zS1U1_RDJYjx$0ZkBTa2i5Gq6EW<`_2GB=i`BEt^tUPMU5BR-$L3s+S(gN68Snz7r0 zHQWxOHI0(oqlc~-zAhe5cb;HdTX({)JzY?{T8!(@=|t64l{~|1fw1edo z&`2%?6kPVugS*Dkv-SCcI?siI+DkK;-s#tL+;c@v?T{bmo|=p+-wD{9iA$N8-g}&_ z^aYa7zok3NKSM?AMDn_Z&${ZDftaf{7AJMXYzI?JT@VQWZJLimtAkKO^AZfm>qF|~ zJm&A=%q%A=vodDGiel%my5&CXPZFR1_>l!;F71U+-Bz$6U3yJ9kILkoqS@(T!k#gQa7gf#I$B;TP<{Ujn$r?t<<=4S(^G~^KXb=EVOl@Q zt>1_r^2Ip$;`jFXOG9Ab*jD-^vmg3wW^&*8GsU(dg06UE0{dRSCCAR(1BZ$e*n4OJ z_1(OXyZEY%s6QKvi*)(-z#Bm~fY0?!zK{1unZU(aYVdZ*pC(Qi6w0mF;+*H3;Vao( zvTU%OpBse`!+$5a|4u4{{Fm{Zgl7{RZkP+pYvy6O^1 z36Ct~IoBtvILmiDaeCmrU>WZQ9G_#zJi|@cipncAJM|Dez4-)(p1;SxN2j2U?kaNo zuQ+#XPc6drM*Oa3B-kOFgMSR&$b}*^EZBAwPrWz})AsRusNb%{jKBBfoQ{HdwmV?n z3oATPnu&2MO2N)dNSg)vT;PUeOwA-D+h`VsAKC?*WD2k$1fi5ws z#gnIh;g|DTq)S^A?{K>KYDPKlqrE0jUyuPO-|3yf z_$bjLaMoK#%}pphwY&fw7?1yV|Pr^{mTiU>1bJ9$gkd21&9Cb+q znecLeuhU?OdMI7=eU+f--!UW-nr!{P3z%|hBOG~IPMvlsLHoChf`9vs=vo^J?R`88 z*ewYR-krnm7D=#w_jouo&J_O!=aK$c3D&Tvm3$uF1l=?H=-sdHNcyrw7#+BbUhEzO zJ*PQ*FaHsIub7LmV{V~Io+QqVPKRB5UgMjoC>$0fgIDxBx~xy0CQpyUh?>K+`tTsx zl5LL3OGk38yBVs1j-YIb5<6j$596P$0+G`zNaOP-q`&$MOgRuw{llWjx#!9BUk^j4 zE2E*~UN|P2odDkkJsdC%!UtvB$nUUH`o&QOtSd8N`NglaF7~c4;ch(5{83~tC?2HI zbJgLMeGEy_Yk`XIpW&C=8%Xh1IlYkA_u8}dw6 z$%{smT*ZdGV4Q9L4M#qHNG6UgfRJ^oa9r0JaLAdCS0Yamm$O%Se$-)7GuaBZeAniF zjlKz2#6RMq(HE#gj6E4Amqnefd?6(}S71&2LEzFVK_cc0x^FrJ8PnE-^FD8?D6Wa7 zA_vKvyJ^I^vKaGc$HR34o^7OR2MUJ|pz4?uJC&gr>@`dTG0}dQxK9E08R?*jC2Y>Fu+UP=9O$_ezF$bZqoQEoEsOeMp{K>>a}fjZg6zn@7Ujug)~GK>?n~?7*WjOL4HF zlYVV;q;{Kr&`qIwtW`OZUhPxlGH0)X+PgRH(o51nDNUBUcg{;ts=#xlH#E}C(l5!1 zsrENbJuQI$QkrurUs zK9>i&Ob#9-i3ws;9pE~DKE(W8N9vvV{pr1*#5?F2v=6NXOXYa#^!6h;7|Wb${_8#-F%halmo9r<|N0q1K(;ab&U`YXc;7xFxj%Y#{h z=SQP?cYHaC^Erwxw;zM+-AALzM-l7z$Z%Fx7o?6+G4*#>itXKw?MZCcU-z z@XyKrzEQVPXcrP{sujb4(ijX$^D924{|H(gkR%R z=#bQStUGQ+TR$0N`sJfIJ!8K8kt!+3PB;$K!T?2Q_oB~`5x4jYpA+$XESx;~16@@w z#Wn;r3EpV!0jR$Au1$$v>z_&mSBm8c?D^eDcXJi1h=gY zuvxR#vsIsCNP5y(?%<(B;S$Y3T8oM+1{VmebY^MNYHaMQN!&S|5lrP^2xqXSg=x!~nSj{mT=0u9x}fziiBxK%$Ijit6>*u85+nCF03Hhm_3(p6aeCbs16$wu&B zuLp$_rNLOZo%VGl;hvKPkfZn&yVli^by+sVKJEvf%SeFVPFunE*>_sdkqV@K1l!_} z3Vzimgu&l0;MYC2V6@d4E;aJ|5b_%S7}nz84nuC&E;lSRNf0{f>ayUomGHFE1V3Q~ z4IglVXLV+HxN0Y6UD}LugktPYP7zj={~%AygJ8>vf8bjqkET*epf~vpQHVJ}g^w-Z z@#<{6JR?rH_pVZ$_)s3^3&Ux};V>3vy@V~2dI?WrL%{ly8~f%yk-ZLap-Q`X z-`^ZVe17#dGe~|3!KX%YqM1V^IsPiF2oAH)n0kjikl9b{y)CG^_%ED%E{QyE3I!AC z8zkm#njmpUKN!a<jnNWd2Y|4`&f270RFoy#cDMq*;oS= zko)r(PT1M8Km8wQo@*66kC?)ajppySL)SMr$$_jr)Zb;|&-$z71ximx83U7?*tF z9e&7IkGYBU@HlWTwl6%5_ijtDlF2jBpJ#&gUg-hlL+jYYv%voQ@(}EmR z|69pDFs|nBlP6f@QD4?BTnK$OO;sP&EPe7OXvTWYfdeFa@0Pfmm(Bb};UgmR^t{JJ+Nm&9XEewO^Nn#MJ zzJb(hL`LJ0>MG!`);)kR^jZ>9{}x}*i?8=N5-!ZWD1JOoax7qD~squCr~ zV=g@SK5noLCt6MI_@mtiWc}a6`qpRoI#fuU&#j|(CKo~-*9E)z?Eg#^#L-Qop!bvy z2JYR8hgG$3pVMbFXLgY9P=`bQlToy$6at%%MubgxnShDv zQqZaqPsy`!Y}br^7?Pt3?!hrADUwSDJ6rL3VH)TksS)fsag)@gUBL!2cAThd-9z8^^P=M@B{&sf>m+@SN*JrIaYzOQOB|R-_a%ONuB(NM=Ku@SN*N zG?XHtRHTe{C{Yys?%#j#ym+4bKIghVpZB{iC$VsmOEsQWOvYoCee8|ESz9%KA9|DH z_QIU@ehPtJp!lB{h@9#IZdp8$=W@9?BZ=Z_=a$ex5dI1fMH^Q~IAF(V)mV0## zq&^x0=+E)FqVgA|5cd5i%W{o|bq@!#f?7>F;jkoa3(=&VH72CeB!$iV^{KFV_cvIt zFpt(KtC9N;9>E8x*)XJDOg{h7q*sc3#r9++5k-{Zo_hd;j!4kO4ZF~2;}*zQ%7o!F z-mtRh=a}+|vy&ZyYoX>DJK&QJNA_oeU-Dfjdb|!JcD2HY;%T_z`9drTnu$j4(^${R zda=FlOmWnvhmi3ro4l}g#u1qpaD#NN-JKyKbh{!;L!)vr$tDi;^G0K;kr?#yp0nwz zCxe93R8~Lg3Km4#gT}|t!rW^tEWRW`n*z0I#GihV>+&pN(D(+d(|_XtA;;=1DTYg3 zA4s&fCk~X==Hb`hPy^Fd#=RAukE<4=TA{eM1!4e zzB?V2GKZ%(Uj*~ORu~??nCmZGL$*8#V$F5}|FmcvKiXx=Z@kmw(~P@eq5K>CRdb!) ztX>AV*+~3OUy}NT+ybIW!8ep9}M`V*a0kGc((^wScAE;8FSb>h!-EzqIwT% zpecVv;g`zycG-_h*!~9sZ~a#ti=A~1yO}I z36km`kPmt;5Y^BD)t?G6H&lXOcyN*w?39K#+Y(Uq)>jDmQx9&E51Al;Cj(Bb!yd1l z`1{ggSifJEYQMaU6`E?1jyQmLLMpTOqE^EfwW;^)Ul59}f@E zk9&Qo*Lj8#=WMvy79TG4J`&Yl1=DX&{(*Z{9PFqvqUQyU#IPy}S{iZ}3jM?BW@kbh zPOe}BMEPy(xa*Smvw5rC4wmh<+TMbWu=BXjny5K+9Wth(P zt0UXKZa-Z!r5Ov0BgoN913+f`BYd+U1`2!Yp(te!>P~0uT+2;#nyJMf8Qo>8D=V4Q z=-=X&HaEQ6F5tVG2jk~8GF0#0Y%C~Ig=jOg2jJQC0^Fwp-`{95P>s3nx{^i#D{8ma;F{2eBc2kdh1{~ zrrM{Wc1aYPH;m+Ejssz%#cOa%$^tiwCMfjjf@49`aQyWTtky_||A-WeqI8GSOJfpY zjPU@0TXhuDW>3YSVFxffAO??QMPb-7eg+BT-;m^Fa=sY1< z{77Gy1};{BX^+Ejji0bfXi*eCtGD38_)c7|C{e|`b{L`OFM1+Q#GA(iH}6#rKO6V3 z1d$_*dQ{KuMK$8cayjZ$bQs%q$x$m)W&D06h-LlEDl|q5$a~Gn!qvfrb4^0vz3}od zNfr97moxBfh`>{P@&(He$Z>l=4Unjx#gqc{u}auy7r$N&xi>G9ZQicLj;OFjODaTI z{1OIUQzi%R#$cJ2A9K)V_hCami#u%T zEy5`?y_t-KB3zpEl+CN1gcd*SMOLhnh3&0?D=Gx<`nm@3S4#jGkLhOlWcRfVt1FSV(EqL=&`9B{u<4uqoe-dO~Ji3 z#!?>7Y=3K~zEKBWST+%HtOzn1ROtefbWum&E_~$p8Skx+BaU%`gL3|Irt_4G+j};` z_V}Tgo??g_A6!GTFi(8%+y|PakI?PuaeOw#jV-)=1#*uu_N`$Q>fZIkaLqK>+584R zuNTUeG*1pe~GrA9^yD(ed?}iN}Nv$efZXBmf6r`cXRjwd>p6>Ct3#6#+hCs z;h#c8n{ojK-kW0LJ!qXA3uEUtk^{%|Xn*W7a8?kyjv4Oc%qexyRvJP}FC~*8{TN*Q zdI%5e|Az%f$Km?hS4m@i02Y0^M81kq)e9-?<;VE&~DR-@AiMd7=tq~I1F*u4|3Mt9+s{tk%FlSa#kUXqx*0j_=*;SjBX zK;~0gJ$<+6)#x#>Rd{X&zBH%HOrqfLE)$fW`iPz7Oz@y9V?1ePmgX`fV5vE@r(4ru zvm-G2PB0nJ=nBqP5=DMf!eC1LZn!-27BlNq)71+>)nfo& z3HXEO9OW=z;(0K++YXsggK3J!HPV#k2J5G^icJnSfmtnOKNl!a%P;5HjmOc#c@tUt zH&t?LcOUHfpowX9R=8rxBxbU=3)g(E0597`xN`X^GH$^cOe|((z}sqKz6Z$aXM*j$9on@vfavaU?Ek$4cGXR$2c=uFQ5=q@1A7_WP|CzvPei!_BWSRV z6#o9&1WHFUFfzp-1CqYrma_e#?5p)K+n`N+eCZMK+0_XmsS5?zzV;WMy6!*?GsVz$ ztC)Cj5PJQg7@q%+^$tnE@vjX1hTI~7t=72fK@)^p{$qzrlklurCLzvKg}tRC z9P&JXLo2&L#?lopl!ahWcp+4#DDzXUD?med)>tVdf{W<{_$0cD+WGM?KTMkITI8cy za42L<7jQY5Zg6XA7Jjf!!s!JM8CAKC^Ok*p1)4_-?NTf7vP~YiKaa$!_C``XDw?HS zorj+?7vUTIS~gauCc=HD`f$kFx_Zzj1n2Cv2EqO%9a03EZOjFr?0vyqrBitRK4$ z-+cMb8mes3KPwi~veVdr)Gq?_Y!8+yY!QcED}k>q!7OT$1v9SnCiyN`L_>^+f>-1N z9BZyeUnrD{A2(b97voY05WdSloF|iB=^KLIIvdM2=7^t4$5m zPiEuS`qf~e7Z3LKZ-qTaJlYm&k(88ySm`|$Kg||)VcbFJYP^6=qX+~zyE5e&1oBe5 zP(^eD%fGHixw{gq^WQp_op%te#(H7j{-fBC;Dr0Eufy1FTCm$dxU<9y`|!eCbdk`Y zf_aK-_#mb%91H!&EJ%Qt(0%Z|1UKY&pqk*_+%#jW_>xUBwuSbCMBsVI(fSXXR(~RG z!9uR^(j%<>*9^OVKf(uU3iN5t7kqkL_+K@zL7(wTsC70)a5W~8s-}}DacVY%6f73J zbs=abwGVyvuXNbxyzJv_^7JhNfJ zo^?>sI+Ux#S77j~9he(%9s@H!vh$`={DRR!EcyKxq^8YbbG5`|xT!b!IPo@)^cq`u zzV*J?;!he>9v=^Bi8;7nu`XEk9byGlZ$wj9h{^8aFJd`$Cmc~{f<_l5AZUXLMD~}$ ze~V6{rT$?Q^>hfm?Gk*rQ^>%r;-s-A1E)VP6a@wT!8vCy6F-&Fc;>YSUfDSse#F_6 z)@S8-P{>t8t}=iUkI&F5ILQ81=z!Nj4e_6AFUje7Be85=G|69)P`J$H2@6UuW|0$} zA<91%&i`J92efUU_u|CoN8fnNuxb)`mFPo34!0LUYOfm*c=9<{mM-Qx1!J7vhxg(H9uu)%y&K!FYE4P)wsc}JM)SEPnkcuy)!cHkg;L|TI7zpF)K4QYYzeH|ml<<9w zw9|fY58J23VV!q4Tx^?%rowN$+cW`Z&Ehcik1y_?qD0+OeaPKmk+>~73%uh_z=ESH zTvF(soGX@sO)FEp3goGDtUoDEGm_wkiuI_b?Vz+X)x@QdSVGR&<@^sPolH0k9SP<;1< zt+;iZ><_3x&+S9_o4^#ZX;v=_SU(rkBinG}CI?Vfkj9X=viPOO6}SHv3H6=1B(`KK zyb$J9->qwaWe8^g&#|Z=eT2A|9ue(-_DdXASi{_>HHl>2HWSq~2K;ErN0I%fIGp^6 z;;T#9WMoee7{0p4(x^H#sA%F(2?;z``h*-$J&UKly~Q;>q2TM@#grEw$BEx_aOui{ zcrrTJu6WWHqNhKA4%2Ff<|cDu+F=S$Q|eKxvkTiprswB>2_b*8CFrcK?d)Q(0d8!r zWNW{U6lOD*@y$aE;u;m3iMtMBUt4?*vo{%lP{FG&R{!8STzyEL(8$k*%y!PxrL{;y1}~B`jDiM z!}>lXV*HiEuxPxl=+lVX_;#r=8Poq6XL{a&-ZH_r>?;N{q2pDi)dh0bTtg8D(He~q}WNQsn;IU+c3Ua>0{9$=a52N@3$C$(mhjh6@Vok}wN z!bEfM9eRf;Ctrj1&kF4NB4=82?5|x)^E2i#SC>R9MC0$yl_b()zi9o-UwAbtnoS#g z7F$xxv2{$1(6h5F?9e^MbjGIR&ZxDlKTe;V(B7T0ySgU6| zlSz0$oHos&Y1;oim zD*jq6iU$D+#mYwOi>m=gLTw?St4u_pp=aMpkxS(5= zdE}lWx_fp&u>Dh*ddrwMnHAvV^QjmaDC`veEQg>O0QMnQ;E>rN?0A+&{HrFB+?E;) z{MCjw{|>TdcLS1kNQ$>)R+Gu+UXhK<>T!F;J?4<=&R?9%K);Gpq{YvYvVD=<^8~}; zEg$iQrZm%;pvG?+5XU_t{k0#gUqnpA%dhf+y_s9hR}<1$Zs0L^A9vdiEB;45@D@^m4?jhn~Vn*>OC^ zyqS&NzDg|7r%NXb?i+)1LCkl-Sx8D%!rXg#P5XJndS-M58AakDgP4s_s0uGK^R zIBUMcFbQTtC(X@8-`J^Cyn%`O0BDCuJd8wE6OC1FGSp%6PdHPa3`1wU#7TbxanTk@*m2Gi%*(Gr7D+N^k4(tg%3zy=5h*oYl24hbR6pfz%$?}^Z&*mc5{!|1HvJEnit$_9R@}j1T z;jsSgIXJEpEvj)l$ac^d*c>Pgq6~K~DPD$&IEC+!`46``+ClIBDw1|wo+lOFL#6w9 zn7OvGz@fYz6;4j#bGKVk?c;)HZc75b4E!W8mp_8UT35POqJbTeR-ux76ka_bjfxcl z%XGw6B0WqFefox?^gKIEQk=||Op+i4Mh79F=mFDyo`nkkjl(@f58;ke1^li*g>t$N zv3l)e*n2J?9}8Lhg9r5KvjkO}g=apn)6V^{tYbO43LSE*CGRoYYZTV%xWZ$zI#87p zdNpCW6c03^#=>F(XvzsTsiK8p73eV1sT6{w4Nf@#6KHe728lC3H z`Ogi;e8|==*s5NNR$RIb{uLgekst|25(`;+$UztuzEfPJRE=^gX2IR)M8Fg!8k3a> znX(=zd-oHbsgb97PaN&SMufAWNsoxFL>E|%)u%NFl^|^YY`h#B0j#GNyC?j`S_t%KY=wbMRzvGTgfDMOr6aVLJDACApBL?L{TqlHk94gZcYekLlBgzv2EZ26tcS^A@!W zf_pTNUW?m|Ucc)xt=f(I3T%|iTf^~!p)pZOpU&LoB|?9}B$(e+glpw%A+2LJioZz~ z{2M$UN9df#VeoEQ(Uf=;7moFx75v<(ziCBb5)Y=RNO9wX_zNN#ZvSrhRZ zl$WNWJlVwxW$uu_cjS0#@G)lORs~rLmxARzg0lF=PHNIs@nGM}#KHY7v)Z6(xA=)B zwV!51F057NaRN87d)YYHDfbZnK72yHL^OeKupw4Gu422&+C}kemxwmyY2wU>eZ=~Q z7HL~=%7Q#|aQ*@h*6s2dmMvMqR6nTj)IU*Np-q>1iqD9l{RqF@_Lkqc(uwu4Yrxv= z5|~Dw<6WDw&@fkmpHHfVY(onkTRs&>nf?LgrY(4`|Vr7^cjB=GC(G4SDhw>Eqbjv}=vav;n786+g_8eY+)<~2N-yq|hhEwZSZR#;V zmT6eO7De>-L+p;F=y>=&Ub^ZBSrUR1t}F;EUf9uNwv*6vi-=xWUri(bsM1$CzsZ9c zYP?T81M@$6iEZ9h;fz1;q3sjF)Y3y-d6yoKcYKQ9KI-rQ_2vAwSs1MPCKgGV)d>8j zM{MZ0MX3HR12?uGhtwJqI$V7(i&4uZ#y$>c<{~G~|NRl|oGmd>V5SeCi36XC(4c_i1Sla)w5BC$F1 zaen+Ru({I=@tqf0q`M>?o^C6SF+T<8&S}xZ<}Cux>IBU?xq+q}4T6=z+4i!z7jX&P z$nS2P##KS*W}kY`FSzf9&O6Wo5yUdsT_<>pGH$gjixcbjzQkp z&!`i!8ADoqFwfeszwloxK$7&Z}K)!Tw|s1-Hw zdstW*E=~7V#-siKbN>5t1X=f@0w1gUu|YbP*!$-rx~J!(`H|oFHu(~IIGw_qi-JkX zH$7rm9D{8O67lSR7a6bi2dexC3%oB24z(&c@W%l1vQz~o=@j_+Z$CTiIS|fG8O^PdVvHKG=+t;9n9>VqMm0Ec{Q0hx{cxpmP`_uk5(On(;WbGMkO}uz>`Lx%lyg zHTs_FBU|cbQkQ)~Uh+{t2$%zi*b-0fdl%tDOo0=p2XVh;X;eSkjq8}K#+!2``LeoM ze9)W}8r5WnCx2uMdqNlfTst1+7k`5}=lz-Ntat3r5X#SO?!!CV|6A-BjBKrDYy+^BVu5*q%h;KynvtoUL{dNm$dxCQH-e6<8H!pVA}jp zws2KENqa2h@|XR`Ur4C)kP)pottJXa41bOHep+Jw6Gu|Nq>_aT446B|ALDg8kh=G0 zv;1L#vs>Xa=|8JZ_Qj~f{;8+oZ$TodyEuS$_ZZTIyDs?TZMZ2MiKEqga#!R!-_{ z$gDuPBkW03?PGDk>|Ua591nYc+MwR;y>Q{rb(X9hB0glzK+{zZ{NtwZH;q4G>)zea zJ5QN<$6g|%dUa^(To*zF*OH0k1z73UOG4@`*hgP89Bwj>%s=>ojW|+3&c9Nie>O#u z<)-h!a>OAbzB8Dwo4X$M$$kh99LL={b?Cp#3;9%I5x1$2f}plaXb2rI-sI)PpHA?? zRhuo)GAUYoC9oQorpD4giaS7l*KItmmCm$`9^=>5UtyWA3H>`-U_*_YMw>U~F>9Hr zP_*HfNYPD(ES_-~bHAkH($kY+(XOeewT8o1uU65aq)OZs|Bxtek0uU-z2RHuQ&Oa< zi946Xh$ik%$6~u5Y@>80p4zR8?6j0fbwCTu8R7>83vw`P&qr})WCX}Z#)`gfzKSNk zpUG@+!O0HCNTXc?Ty9sVC0>O9FZGCZU0a#Kg#tj;el+k7Kv!z=f%f+|_P8?VhoZ=6&2KQciG3 zc^v?sS|QhE7Yk|z6yikfkC)YdnmOZ@(nNi>cIrfG&G-^O)TxL z>Cp2xM9QFxt4>D4j=lplQ{oqN!gT1fJ&Ll~gSf0(7am{%I1;ZgmvuV4r{WPlY1gAQ z7SEZ}E;DK_yknCpUb5lw65O&0ynEAm zI`OnIZ91Y2Y0C!L4A5LJc#DGA!^I=$-ryP*li7#=8j8^IKoqp3q{2$GK=5BP6<#+S zXF)3`(z#EM!LFO;eDton*thgJN!Tt8qkg-9n$KjqKr{m`NXFru(E|j&xEg9n-4h%a z1NnAu4SvQzg`N_2uNU{*b2npKFqmA6TitJ>lGh2`)w_V}Ij_WVW^?h)5Kq3`G6{QW z1C|=B=Ck4y`9;@wY^bS4$H)OZ*?Vi@5+x^?_AUT()Q;e`^nQ#~mB)Rp@>KfPRahOg z1me{e70zAo8dU@I>0GagwDCh4RFn#t(tm4ke0UWb<7x737Q7lM2K1_b zD;#kMhmk|Ig?y3(eI45(4r&?3NByxv)s`SOFSAgr_h1~V{P-Xan(12TD9lNhOAMx$ zrBAXqqnE*^%ne|z@RJO>_0#r#fE6kFy9Hu3r1|?%!fc_zjFr9X5I0OJLHY49%&k`& z8kXG>uNt!!cQ~wvjzA^1{*8Dj$CkhS_zk1}$%ESMZsOZj!kY6G zxF}qoXIx9eGlBx)-xY1VJlh^8w40)b=MIw6Y2bj7HMk9Sa(Qw63 zaX_sWm#r9r6AWIEPYZ<3t+o}>a4bYa*C|YUksn?jI2PYdN@OWjne2D*CJ5@Xz{S%9 z4?$=QOG`Qnt`90v;*dPNbCQHJRYG2MO9%}1amI#&F)-+g6z*8Egw@|uqt_4LgHy-s zsO#KTFjV!1NeK=(_RJq*x$FgT9C8~}E)5~Q$Da!M5Cy8!aS#_YMw5W!4EA=}qREd7 z__Jals6DV0yma@VCvhTdxz>V51eS)a^BE>wFF{E^o+Q^^Wa8F6B!8(CzW;R}>yy)P z^EL~-d}s`}ld*t#rob*#{3Z`gZm_AEeW+V^jFPcy=miaf$owZM`D4zOYP`ekzA>orPZeyc?O}JLKQ4|9gI?Dm+^@P3 zHdv&ht=UcDXulkKEz9tYUH}-Iy#hK`j%Ss$lQAzo!2Z@XOxjs^*2i>;V6h~1yKX`! zgbv2=7<0a~HWhk+(rm)l?9|P~KAi8mn*ntzjv7q!7nYz9Z4I(UI%U>fLK1Ks?tQD+I zrUkI7%?UOo9>!r^_aOIlq-e>jL3ByeWZGo?1>fxXjQSbVxZ&F#rhLwg`%fE0o3adO ztT2UOb}T)V?Hc>uFNHtEF{*tFCjz12YP!d;o_&k zXqq_&3olFY+S+ow5&MCy6wA=O>$*5X&KY)Yvl15#+JXBoOk=-Vq`6+5HO94gi!Szz z=Q6aAk=F472ie>%D`Em>*%sjlt4fTW{gjxDxrqywcruOqvqc7n@)<7#jqbSrs5D-zABRD8iCA2p0JE8Dm(}$QB67h7(0FbJ*|>BmSun7Q z`B|F~>qo!9<@GhuAi-Iv7}mu0Z79co4?V!Bu>*2~^H@#NMRMev3uO1tN3qcRP?i0_ zmaFx!|GMqytFSz99(ETE#Fo(3Dux?^w`)P3ER9WkN>(ebh7mm@iPXj#*04ht)~lXI zgVR^okJd{@1N^w9u%g>H!9`e(7yGe zQ_F+s4Wj}+X|yS~`7a;skK>F(a=6Vo2TO|@!1!hjd@G*99~&&? zqaN0Te#hwuIR}08<9l9Vb3r`r&VG14;U|gnhjBUF+7sFIk`BQ0woJ=}@y~4d z32#}Np)5uHOsuKSooum#l?x^qIiXJA87NO%$O1Yo_)x`#y!*^Rx;L~0K8-M^cUQP$ zS>|9K@c9vJ=o>(7=0s5C<7ROFWsX>C(H-`9Vi`nzI|#efW!W+>6&fo@XA_Lo}~zP zjeFqEw{ZH>ydU0d8AaZ;FNN)IVlY1KFS`D#LbGfKp5eToTPgg3M2Tx)wP-q&Ubz9o z={Hh7$_wLHwP4=jL-c}U95YbN!3k5W;Y9Q^JW?2fuV!|__u*YoxlNcSu1bdR!~a04 zbpW;fHA_=w>{HTs9nXY!}rSlz1-_%&)jRy_E?jNOjmlU=@8H!=p^o`0D4a@J(n z6J7^B-OZ%uh60cBh=X-1|5&&FbJ6QwO>AGZ4i^tp19`vwY?OpFKka;56eIY^9cNx< zr*-_X`r#ImB04B!gf+>^4N6?sPJxb+n}vzIg^V4e49O`LNcA+i;sJeuk(Gk&GQ+q^ zP9~-}m!QW-S-LN+6f{>(ghO4<;_9FGS^xfFf~RL5Jg`zCA19o|v)SH-As%-@R6d4o z6P$sPkvq6T{vU3kx0X*96_Mea|6)m15YOBE0?)q~NSpPCQ@747@Y-bwAnU?J8*l3J zrvy@V{SY0WR{=}3mhy)N!*GyCIec^sp{<)b!M;YFxBZL2Wnnt}+GAUuc+Us)?#&Y2 zY(L0u>Nc@wXB7AjmpI|xmjz||{b2a=G03`m(RWh$LZ*2G^_V;#3{xU7TXzJSrj4dw zOh?eci+@1Iv$Nphq)S_!M~fs61YxnH{5nTM$Bt#qnA$ew8(3J`t!F^l@^#X--&3v_T&aJV90n*@>3tsW{JR8G45u7VUfW8QyivQcsu3 zRORvs2z0v(R>$3FkjDm^;TJ=0)CyTx|9OyD5=Xt06RE$(Mry3(Nb}@Z(v|mH$O^24 z$IHv1J8&StwQ}5a`vN3N--S9m8Qx-h8n>Bw;B2)LwlZ}AJe+OMTxajY4BKd2+Z9Ph zmZ{L2{=Ook%z->e>j9)zyU@LIF;v-Z0k!aZ2Y=p=rUtMEA3ZST$26w!l|t`P>=gxb zJ019g=fimdyk`C{9r)lf!G|znBu*PygkB5m;iN$#h$Q6M>Nx^`YwBhcC27H;)@qm- zrXX;*y5V6<5?lXs3QvC6$BxAAg!U)Laq;p3hgPs&RfvX4z^9Y zsZP&-KMncp4XhhL#ebUu3kU0rpeNssrCJLwu_+1lVymwq{H2v1KWSf$DKgGjJHaK-O0*>Q2kX3 zHfD`wFNIn2yzE!-;N1u4zdcM~2c9jg9wa6HKGTW%&#{DQZfDTy{B!o@+I#%jbDr%8 z>4jMm7Wk+3f#}7n@Ax8hrg)gL6Akd6k6Z73fSAf)lG@zNCNwM;e`wsz?8o@PW*aN~ zaa5OgJ0+6Fq!8@o(^95E%0r1Jp{N$8J<+I>4>^XSr+2SS< zUhQH-mx={8q+~Hq-jOWZm>OnUi{#o@OeL{!;M+lWLDQCY#%#8@YpuM zk*5P`Qcn*2*r!C(MkQj!fRp&Cv{GQVorciSZtR3?y1+S);k5;Z{AaWg2?~?q+B=4V z&!#r^c#15H9DD+D+LJ+APYstoz9AlKPy%%jjitBVG4Gq3VK1C4(AfV8{w+Dg9()5j zX5=CGci4_k?khmojx%`s^I;gDya#(%S@8SP=4`!HpLn&y5Ihmkgz-CDnEFLoI#}9? zI@xj;GwyZW#mR#Er&Wz%|!d)Lp`n)$1K3B$o<)CJ=wbuj=yX8aBPamSK z^Bz??F5|8CDsjw|%VgtjMQ(HB4Y9h>2-Y442|V8h7Z-kj@M~ERCGfHi?TN;PryKG1 zNGVt?^r%-gE(7gHLT7cpE6NV zGwF{GVU8FmAWZgWkSeQU_%SyFJKk=C&gQLP`|dq>OHlfH<_b7rx`yh^vw~ZxLr7@9 zh_@f#&WkMUdFGlAu$gRTKaNb~Newz&OWg?|CracIlSw2z>ex3AQ`)pH0mhbPl0%;j zAl~{Kar)>5)faOi{Imh(&t8%ZhMM%e#0ffU%W!;eZpYcG`P^Z;n0v(EK0|KQEkrIR)2z6VY+0mm&A*bgDo0 zJ?z~X$BuceMR$pVc>7WU_I*BzH)1St+;9~hB^t@Y*8x|M)1h+9htkPHXX}l2DwL+W zP%DuwwJ|=9U(ZhC-M3vK#>xdkH{QgGqC+rbIET{D`cQLEf`5BmiJ~#i`1bKB81+M+ z_V3WAe!ny!$s^q`{(uE8Y78qly&L4`*p#PF>Z-mEd< z_g|dB1D`t3)3t=Hb}2!kdYx_R3&fHCRiNX;r+CoG0Q?4=2EA1Zv{Knr-~yXdac?*@ zIi0~JAGHJ?S2GFjEg|Pi6)}8mBayiz#luJJXOb^h;;wBuf{SDuJ7PPVtAtZt^hKH1 z^txj9t5M)p8c!5#&4j$hWK63q0~Hx1VtYwXB-3;NclPJ9RpMi;!+AEFy3PkCwdnHP z*4yGd;kWnu&U9j6F&a!9g|qC$k?7I37M6Qk0i3yySI*sskp4Zm(S8^H8}5V}u^UjS zum}CZUx}7yc8EGwrLx?wVOShyjEjn^#i#n+1n$r=$d$f>afejUZj~}`s8oZyLo)Hz z$1BXCO`Vn8-$tsZ*5i!-=b`t7VYk0Oe0IoY<>Jk-B-RHb1TT)X!dP^jBhNGU>cZ@Q zQlj}&l-Pz@k8$p$31Iwd0;;?V#~CZHiT$VevhRDBVWj6VJUh2f@M>%$uV0B^?SnaZ z;kG;;lWqXhWo3}`Xg4d3(cpTYz7@u6-i4Ec2Jlu*d120Y3%^-);Hxr4$e%S0^akdO zSKT+q{0lCkg!dL`Rwm8g`4!_%$xB4bRUQ8Z(p5K9$rJqeQgHraeasvrmKLAS1_yUO<}$zFGTJOl5xDiM_3eh4EMi3Lo&|Q zv6+*G!nB7Lg7-ZUm)EqRYMGcl)?wsK+$h#OZXcB81mM`5W~i;xrcW=uV{>e!kqjO} z>km5=-YY5tn`auhSGX(L;2}2j%`uen62mmFE08mNKVH&%j@J$iCYB1;IHALltWj)Y z{fi<*({AoVzGfKqmwG_Iu^lQ6|47z3#zLcT@9_C=H{Oxjgh`LnalOwb6s;LVuU>pe zI&{VGS6Pbp%`t($M~(P=ozZOh5(kusc0>EC;TX0~hjbm;O#BCZ7G~DUbV|Bgfpc9C zI0q#`VEaE7@$a!XcK#Zcb=(*h{da+=Pq$&uCGXaaQ zaD`jKy)5t<>b&11w!UgYMPp{*>W5y1XErC}kAaskxy_43Pq>Q>^CHlA=U0JII+vMU zQ^4iIcXoF`KISek#`nt=XhORqIccOpw=7DA{#W&IR_NLo{xgG99aiFjLn?5&_7Cu% zZ~(7A&4=fTgTeDoB7T3a#yjPA;rApNyNdydIPB=&a_LjIv;SXu%Iqa;nG8q5Z&YC@K zfR7ms%4amm)KCj}^g;peZ=8YGbYBbSqO(w#gKT&GNp$d3FOsnH) z<8FdsYZYLjb1Le`OvRVNZs_&?2A1x&3cuYSRCvd4A4XgsD7cbNK+E@bHq%IjTf7xm z#*5VHgBQhMRUQw4DMpZ8W&uX3MnbmXC3*TsggL@SCEKfEfOZ4Z z+?@|DY6Y-2&>X8WG`Z1}Ha1pGaK5Xohdbl51Sfhee7Gw~GH0B`Zv{)pn6ph{yLGal z?y?SJ=d07Nis@)*IfQ@LiGz?Cp@sJr48;$@Cvd;tZk!xc4W*9_af)qdp~C$h%r1&x zeETWfBaX(~zrTrIHoXIlX~Miv*l&!;RiMjGw3Acr|Dbs06x_H-@UO4Ugr>jtn4TxW zmP|<%s}`MSlAkV+oYh7&)v8oHYw2@1WFG_i23oKTzQW|lx3FV|92zZ6LiNertiHYm zR6R48#f3rS1^*_V*_MkxzB7^;FXYsPjH9l@HXNz(n?)?oAZluL5PGH^<*t4p8u#>R za^q9mZA**MUq+GL=n}X&(|=)|Nfj1%ZN_0`Cm?Tp1^z6NL6a?67=K-jiJG+eKl1_f zwsxvr*R+Xrd3Pj<+w8=LJCA}Rr+b*>he#xDPGZOR+PvcZCb-&Dhb@_suynb=$GE3tvIY@3sCQQqAMD@sPj^tFBcCa zKmJn&rgE25+loaieougj5ud@)e+>QJlaI?+1hH7}BP?0SLfn3nhut@0*e}5!>+(CD z1bjb>x0DE)2$`K5_u9nN=p)Q|`?^p;07aa#QYE$F25jH^`{FFY8>=&N5$3<1EZ(eu z0`KiB>lq#f^hg@WCwjuN(oWoVxPY|e_o3`EAJp1Oh38Qkxtiw(#R`7Vet9RZX?4au zxz2D_YZadBo6as7Ze+)Eg^q?}Iu1H}3a5I`X1Y3a@Jnhi?mAHdH%~NxnW7Rc3(+K3 z?-f8!Hz+oQK2 z(RUeimTHKvMD!DJZ6XA?FJQSdgjv~wEb=(V3K!{{^6oxawl~U@PJFVNt@UWZ-n6GM z>%T$px8oLDlAi-FI`0YR;2;>k(Fa!g|00%)gJ4RTu>Z0)#GIX4%<#reA@=YGtX0e~ zW5+XcLq?6d*1jwBT{szTxPAwln@2E4)|hteEGA|Go%dN+9*p-k!TixB==eAYvj(js zGLxm4T*Z23`1L8N_e_I^Q7YaE8F<-n;xf+n^kV7orqkYf%o!KAFLGOTEGUN%m;p zoX_Onc%#>yND{GQF}e9+zj(dCf>5f-XB!VxgPe^4J5gJOBfF-v1iknt9L`p<;< zZ_0g;ktjr84+8o%$6=_!6U@ijz_vOBqfIZ-BfC1_gvVC~Vv>N3@ z_MprmU-Y{=LU(c*%>VY)V(2jqu=7XMaZqhKs1`DcRi4I5sG!4~@TCjsAyChWfj zA$TkC9LOxV0mFYp88hif(pR1Z)2FZXWR>Wqvn z_YU0W0@1VAg3dDg>it_qnAN8Bu>Zg%lrcDg8R8O*mAElVwx2~=&P8>hg@B}551i8z zfp3|YaC~+Zx>#&P{;e*a-ibgy5YEpLx)dY2)QOFt6=P)6LD^Fu=;pl}aRBy^uv0(4 zC|wQaPa#lc_Zm%=Qs}IRqGe7aP!%48>H?{x zN%#x>YVU;)ES14>k}F2~gb?|BGko~{K5T>v-UW3HHt1hGTw7F0?zl{VgXIRe&-)h{ z3fhEoqg2_o)f+Le`2)QtEI}*uY9LB_1rAD!(@FYOu+!8YA2s!WeS$R!Q!eH!9xI`{ z1iQd&q6QJP&W7lz(u_irAp5o;4$_{z#DvRJn0TQaoXUA$9)6T!^%q6)v(Mfq=k@dO zk46CT*n9yW3dMl9VFSLrTZg+xf{6E(C3vH{0>8-!F;goIQ83{UiNA9cO41(EYOXUG zm)2;p^-msn*j@nHQI3uH?h0MsdK-U*xq-mec4{{y$cSoO#1#*9;HdQn*z_YBrs#xV zu<1n7z0w25pUwo`NB1C?yRE+Y8-U8&D=@(HE}50E1ky7kAu{$V@Q0t%u{*2a{Qd{z z;JI;fB0-5gH&Kl1u0(@R@?)$CiNz?+-R>GJ$KG3b5vqhgP#Fa`E)RbQqUN2f{_{_X zcjjFgHy1miPHHV^?AuIKxPIQe@BP(g4~2>PMHy~aAlfoIMp@uXM8L6nTb%+{_(T(@1TtRTx9_ zGXAwn5qhvofcbT~hiEPvL8fpNzwY@+{%lN#JQZt_oKQgSiw;9%l_^yb7=&u^-8egX z0feW?(+M|k;9id;Y<{Ljvf8IXe*jCGFB7tRXaS@Z+{9?a$aZGv< zyb@UiA1Ah>>7!C?j&i{@NnY@u;zamr`IMeeISX6<;?N?XK$dS)?Qa?Tkycm(6y*?aLWZWG*6H|U`p~it$Oqw_WO*&%u z4+rnz)eXs{T|5?J^rpe^sYB%bIdiNqEWl>bIr#1_*JU}LiWviQVEaHEtk-MiIrqQD zh#kFT?mtm>q=MsJr*i#ps~yBGstA+X1@VydQq<3%hOgVk;bU1M`MBgYf3UNfgm0Q) zanv9SJIlS`NYZu}*pku; zb9*U`%}=Zei~UB7zDz);bXiaxI7%(o2Y`y-Wnz?kl0Q)K2K-dCVDepUoa1-T;%fYL znCDS%K78Mf?mu}MV>%|Ybsr?yl^(q$YHAP)uYCaPN?L)Ge!-}J{uTHQNJHPBalCrBN1QA9AuW46l=AG%g<^ zPkzKe&a6RPb^9uIm)PKxA5V~Fs0WY(!1uuHsfcc zyeke%0%Sp*2sGJ?42Gl|EkM#kg7F z;c7`<>huP3Yqb)!J^mGKR5gk3%L8~mQ3E&kW?|agStuh@1#un;u;xAY^K4m9m|%ab zds9pN1^@EDG`OPX+6c@|lBa9W>w==zPyDuP0jy;{!_j5+P;Svrl?p`3x$kE9{m5yk zjMHW&<;VNvpSh&c z_c=r^YouwHV{w~9Gu@irOosK8*sAGw;8fWm+-|pm1}->6Y(092X)6y;6-CuZJ?et2 z`y8YE^kAdBg8!5*zR6h zdXS{rNp<}CDbeD5D%FxOWFQ0O%{y^Om1C?8iQ&!OFqpA3#v)0g34#XV>22L>7GGa- zEd3$}k|bSBn$JGQuahHq6Mj78)ve(ZWrr#B(qDC)7+?d5>&)?+|1ZLkwaCNCO3W!P z6K8rblGub2x z3fF(>nKG3;{B6eFK5pQ&kys3z5WzQCz-8E%9e`5m0H<#V0w#+v?!5x6k7Op^<2;m6 zSG3rxFLJTyoi5yQxJ<-f=Hfr0o%o8IN9<~CXnAWq+?u%tR&ac)?C=Mqb!#!$#$Sd; zg}r3vj2^z>^I((=7G_%H`uV#jEkqN^Rj8h-%zU-HRNXh4jYCtn@)WjO;fk6#jJi2S zO(&O<>ehPtZly3Tiukb>{tieb73I><%C#KhmQTcllg!&7y7pC38>~mpg z(Q$(>UGVD!|@1%*TeB7`W;=g+03Z9a*Oe^I3py5Z2_;6c4F|nk}MFJiSF9l@%$+ zunic(AEGSzJNW|I@14XZ8c4HSc9`Mkrdr&tc^|5sQ|Z#a_qeBHkYn#DvuwKo>AKa4 z?_c!d@AfX7_@f(rzFvg4`6)PjeLFu-#1KA5J;XgqzhPmeJO-d4gd04E8+W)q#Tj{a zjg=v&U!F^!zYB&Osd;cQT@T&~EC5N-LMoJYk=Q2Q27e|Op z4VEm=!K_$KhWBX(v%yc7S()3)dv&Q4M#gWDMtx28Sx7SVtP4bQojv$Awi0D*FZ06l zqPQHs9{JDk7Or;OLN>j&M|#ALogbe8C+shRyjTq!?2~7neE3Fvw>;S zpvxe0XB+;6&OvdLs+_^ z1pkwrO-xQibKN}zZAmq9I$RGPj#>lFeamy;-p{*Db=jBqgjnm52J|`m9~`_)K<4mc zeqY=wER%mkOlq|G*_l=tVP1^i3^M2^#ip9*^B&+)w*zCs)Vc2HAhbR)V;3II#4SPd zQT=Ks9^ty05-V9cE8`c~9o1v{G```T-)3wf*TYlWf0TKACYsrqnTlbCU9_)tI+L*} znx;sE!b-Y>#CpoIrJM&Nw_Jj`=c)tSWX3T`K%6ZsmSHANpMq^hR*d*(Z%EV6#fuiI zY{kN#*q?A2{@l&SHT8PzRvZVvNC7lVE+&(vPR9E9N^I?sVHO;{4^y*Rc@p^}q$Y!V zqrZ12E5dAdt9=S{}=z4srUm@=1%vUP-)$D<+e>}qDn!kE3t z-GJYkocyZo*=I_?iI4OS+EdveU0a?U<{e$y1OuLKmAM$9toD@4? zW5GTfH$(G1%CNuS8kUX#o3?EnvzU_@*moA6GBb$cRtKKst>2`>GLCP>F}a_a*x-+? zN>H@01uyz!;lBgZNQ!O;UW`;@<}OlWHh3ySRLn29&1HEX?^1-ze{2|+cwaVDSeu=D z@GBN3J*EeT4Zv?k5H-8r58X5NF!qrZ&}|*a-MNNg&;AKeTcO18tnLzc!RPRa20+6-Q|e-%{v zpMq}hjwYsF1q`e7li|FRrBGUj6MTIXM(A-^=Fj(vrisY9_dFkr1XF z;rfRTMYubm656%n>1TT_YkOdo|*8C^74O?Q2suIUm3&4$L z<`7(R9k*LZv(~Y7RA7k1kd|H9lIsjo~h4q{L**Wa7Gk%EMd_k;R~cS{f7G- z6UxCNhQGT)j;_g0C5!hegNQ>mXq&hYQ;vDDYA~A2-cbN%Np(m}OfllsC3K9w416P5 z-o17`&^Zx^y?r<6^#i3~docno>U^VzBdSP9s3f`-_2LY-ZjvN-4*y$-AiRpZH53Ul z_gz|``$ZP_xh=(dym`aVlU8Ni^(%1x!gr+BXCt0-+eQvfHm>Oy6=q&2y5c^CbnJP3 z9sJeGASqE4%@^E+kBVj%q5e1VVnr>tQ`m&9{x@*vr-KBI7T^g#d-yxt%eyMU@@w~b z;tK92obuKY3g%LrJ2l~&{(Mzf_9D|n27PJYI zisiUJ45zX>HK=eo2|}j7$0p7Z_@7WCx;vht|1J3sGNr@N!^?}dj0uA4`iXe|#4s^x zKY;EB(m|lk2Cdtp;n=k*Qayu$>+Z|+)suzz;I0~^Rpf(Kmps!eT0j!gUSjG^Rs75& zNVQpz2=;*29h@`jNjtgV%m>!u59Ds^hL|@FpfI7pqTci}$NLq8ip*8$!_88jyUwDD zXEDq(5nvNsb%FP-goJhb;OvAVGcmx^G@=cnkK zEJbg79mXG*gxGbKk7(itmOnGG67PJzPOquTlKKWCbUH3iMYIzj)KUhot`NfhBNb#z zUpFWzoCU?nBVsHCIE02m2n=7wr>5vr-aP%eS@oR{0TOtX+ zX~Ek!cM>L?EW=~pa{%Wjz=9}ocD_t7netkjd3W3f%B#k4Az8$;P?BJcdt~AM*@=uD zW5HW=TpAU`FHzIwWsnl#i1rTt<_1MuK+eUVKYcon9{DpBa$9;~ge<{Bw_|D3@pxPl zEC<)z18J_SBSvp@24;lgjGoP=4+WLss$C3zaz71l~sn~baC#F;o1G3Itc zEpLOSk;V3qGBS8%2^?(yN4HgoGwT{Cj@wt$X%BU2Q+6cEme@hVM~Zu2P}(lY&Azh? zQRUYVaXU~&$if~vy8zTC9Y9=TPaClr7GT9{_1{*?Q zH<$PQuWb^T_$K0dLj(9#^%mDu)x+0=a+o~%6&(>?hH*y6A^+_qxY)0P$99QR+04Ua zb4w%SbBwoh?Z@b=Di-7X98f_kj^mGP1b$ZujPVNiKM!%=>!xpL>N@UDdh;~48{CHT zHJ|wU{~1%O#CzzLK9jyIF2`wseb5vaMt11>@cuq2f;AuIn9pDBXnX*lG-P{*0{x7%t)dTcJU_>eMM?J%QST{B?f9&fCDFAE}KA;e?nK{W7IfT1&Qd6!gKUxGX_xa-5fSLS&F&7mhBP8kxkJxn83RDrY&S;0to$VKKBFpbN0jBb}; zxU&#sW-VX~w^81**cQ5LXA6n@>_XxmNAcGN%VC?+GrBe2t<5AGCV}fUAm!NA^AV$QNV$pn8 z(BHd+IyKJW7{!G|XY7p7@mZ=T_p=S?&1U#1m4>rjY6ub6ih85-pEVj5f=M z3BrotyC?^{IbWRW3jwxTFbl2Iu4A3}4*FlA7hN_K3wdoSOxfrxhP^k9xpKM!uDtNU zBj1&o;&1sdb~l4OKe!ac-WY?K{zDW}4@8e8d)a(NAAZ=@+c+~_n)4dvVNLrP?BzYD z)~6=3lRLSN$;mssJCUg{KEa0x=y=P_smoxxe6-0Gv#liYdNFv45gszL7XdNm6?CYKAIw-&z>Nwy&9UZ#2H3E)mRKkTAMU46&&sf^s1bx?R z2tUTT1lN?}YSrcV>TN6t>pn(>+28miSOWS&JE{HyWpZ_JB3rqqmaM!yk=06BK$mZ} zgTKG^QRq@8WX$5!#z$1x1tL%2f^!skq`d$miuYqe5!XQ-5@jm)eunWGn!wlXfbLgI z@N2Lw&D?kmI{s<0pOOz?ujOKny>H2!64=IY-!yEm{4%ybN&prmc9A=UvUur*2yR(- zk=hFu!=>Db6c}ap3#8@5I-FkD#+Q9^77toGVd2#%^pY9FXZx2j&s$_zbE!wP#d8k)N_1oV z=P6j!B^L6%v<~x!{Wqe|P6hUyY&Z%vsj}bx{KMk0No?8J5Ck1GX9PU>XeH%JY?ir(L2r1>8ZJZlX8%E4RWpnAuoA#b zXKm)i)=f;r&O*k=N|Z_8q|Iuzg%~4 zXM_OzV~tx)WlIDzWz!tyiv4n?rECN|=1c_rZ6~1l&tlk>5R272^Kg>bO4M6Bmp|{U zE7r)Hur-~L*ghyn-gxV%?RgCTgx#X%fYq6fVnZ1%jmow!iWe9_Mm+`Px;19 zAhlP~gY%KS`;-G$X7-WMF;Bca>z+mStXc4Q9VJ3sXR2Sc0ft6ilZ9eh%;fH2s8+i} z8Ko_VI?JfSK52Teqmit#tHu6*w`jJ3EaU1Wz@7+S!N#}T=9oAeF!zlJd)3Vz5}YZR ze2IoR0k`#RD*0XwLGx4DhK&2=`vK!wg<52$n1cmqay8u{@71tIUbWa?YK1 zS_w*gWI>FIlZ%t?L(v?L&$77{PPCQK@hNhc_elk}sR%Mv^F!foOC84a7ttEuY;fqg zOx)^%DO?ZcN@VrJ_2%4o_vM6DNOp!Jg`lf3^v;|tek9$rk^#zeNhqw)P$jOZhF-uy<%FicD&}1!4|wBqR5C^uLED%YNE`x@r8S? zk!Q}$wDqGl)A+3jcE|ZbSn(N*yp)dFlZ`Rs#BLbZbb=)vYv5e`JMwh#XZWO2OI}=$ zfi0gx_9$Dx2X!jC4O;q24}{K;I(}iZ5~_P! z;RfvnUeRK8dLZNnoU&g^oe~XTXNMrj{i*_UWe@BXn-4La92;SUE7~~&EP5PB8=M_T zNUjKb%zHXMzqFYQ45%}mm-oX?=ZR=z^#wi)j>14+C5gzMjmwfPP{DK+s0DwAyek11 z6zYfz1#T0(^@1N9ECYT4M&O;EM!d@>u*(-M1h1k3(tj_3jz`7ds{BsK$$LU?jg(?I z$CEF%i^N0wZi80Y6!z0BXP{nX{FQ+PytmOhIPoy4y{IqAHsGB|mwmbjg{m!U|{-w9^)BAP&oo624mp`L8xM?no zUA&FWbP}sxKAX}NH5>;$l_u7-<1?!;8uB9xYKDH22*EEj=FSEjoTS2*_WUH_Ws0y$ zHPhly-&XKUdO=~~bx@9L#B#1H_9oynRq0g6%|Vy(QTua@sxKsdj%k=zYJ~cJ`{~Ke zqA+}HCH(xBj>k2ZK-Sh%m|EfvIqm9fk;qDVe(pF8e&z}<$EvVrM=kLb$VIGND<5(GncVr<{auco#=ZL%Khj{Uo;Snj<4I({i2(PV=74_NU*pWBwU8e(gl{=E zXnR;A*O~32=X4f<+-X57ST{mDxg4SSb0LW1y5(oh^6BdRNfxIJzERDkQs|_(k{mrE z#dbAK!FQ>>!1vgQYvyhvmlnRKlh5bk$f~Jy^^pXc;!_EZE7S1c6pF-21ZTgIV7GpI zgAOnKpt9ANoo6q@?tZZtjFvahMENZIpfCpTD*~r~@Pu`fTTp4V4}K_#;pmJ?i>C+w z(CE4b%nl93d#ATTW9de4z5fNHBc@`+p=Ol1Gl5m+aw9iBde^LxH-Zd(8^}Fgg6a-B zaDrV0pO?I}aP;{{wsNe~&TFShl>7>uWX8E*rpjT%zDgK1)MRh%P=^Bb2Q_VP=W#t? zIT+(QbN`;bLW_HGgm=UYgsvhU>34=tQ~cmgfFQ>>$$@YsB{1_=ffd=3kZGGq3Q}z$ zInRf0bUh99H1?o5=YK2wVME7qHo_~T(>(DlT`~kihA~AUUfP z`^5EN+SGBnXXYnhPdp;e!;R@Y*NJSwmVQ{ZdNWKvb^?B}L8MH26F$6Eh~nO*pn6b_ z)slZq?M}z@6}x;P>#G8@#yJJx@pPWifgq5#`;GCVPf>EiTY9T(7ct3n$MZo~h+5Vm znAWhKpZuRAF6EWtso5Iz){aG;;O94*vs z@qyPfP_>T&VT=XwgMl0mFqAI2sYHKWDx-q9hM(Ja735_P!HnCN@Y%L3)Vm!7!xQ%5 zm#%AkG9??SojQd0$}(jkg0R#{0*s>U@X5cs$l3`oW7U?R!1WC!)~DhZ&hK?)|6UMY z7D_aaxRamB%>Z1sbe~^1Nj`NBgv6`K?S<>$2%wb5?=hq1?y!+Dd7H*COwCRt`# z?sU#qEx{%v1R%dd8Qc6;;(Ot?nrBU|IOE&|>Tx^_6Lchr$b?@YtTUaru`L_tpOfX! zF|C26^fa!2IL?z7(ZhrtobNBJkm^5cq`PK6q=%jQ`O9L`>7kxCbXA`)4Rbkw8iqBP zva*$A2VaB0T~5%%xv{pWt^wYoCjNcn>%7?E5#AEn2I|4wqSh%|*tn2Eqjl%8MfWE6 ze!7eruPi}G&xBdj6bCM#$w+v8r}Ab?nEccd3d`!rtBKn=KD#+=|5<{vGrcUXdhxKj z$sc>2s?e%)CTO&c)BKusys;T`QAn>9G}`0As{IBYcT}bUv28UsgsJ)CHuETNF_zqc-X(o-*troKE(kCa54FM?#VouwQvsKW?WcX=5-{VQ4XNHe2M2;! zyfLAdrhyHHPrZ%fcVF^_{RAO-#X8s!#e?Qr25xae6s{8a(eI^FNPc z=IB&7QS+KC_myN<@5$nQa!Vrwqt?*X$7F=1T#9AlTV$+7pQ0(~$HlG!Q8S30l%9_%))ZaumQUNZE zJO+VDis-dH5oZ6^#@?@!Au8ZJT8a%|l~E-KZj7&fKi&bAr7JD=OhasTybnjdUcw}u zU~)S~mCYBPhHg4(@bUK;YSuoXt5lR2M=1-2T3W%b3~o1gVhl>9g_s#5|DoIQ3hG^T z2kXku;YzKOIPd6Q%)H(RMZG3$#$E2MU>y(kK3dGj$jfkf|6MrmFau}vN9pDb#Xxr4 zB)@&%)EFkj;f4#psm|UQuvJWfO&fb*SVkVE*oV=vx4p!=y9ULRIB(AOLQFKi02}KR znU$?SAo8JX%>#}rYHK|kv%df4d$E2HX-y^lY{iJQ&h&`aD%idu-@tJ=e4my0G z7tNMnNXR+ht8q?N$tJR8`5NZ)CV%GevQ6NoGmT_EiiK!a7o4qgI46yG-pV$ z<@5@cgqfm$AGd>5j>YADUqPVz6c#mBLA9L%4Az&CyY+l9`jkN~WQ9@w>>aML}4Ql?+mC5!j=xV6owj z5&f%jmscBX1HM0HSpQvNXxP|I?uQO)!MycP(&NCj(2uC79{k zA{oB@Pi&qPjIVEK;z`eXq9rqtdBGQA?PAxlQd8g3D}tNw%`184yr(b|=X@BZkFP^v ziS@V|caUz$L=YBiAqM9=GI&e{0xpCzk&Cm-~sHbp2U_{ zwZWDLTkwu2J-O_$4I}S0^3(=H_>scb_>x(&%s%@#BDY5w{jc7mqB-GgZQCz2 zJjcg$m%DJlB^QOXeq*`bck`)76_|Y+S2C7H%cyw66*v%l4&0j8;pluNCedsoEPQK? zw`P@q@O()Of5&~!cYQ^*m#L`W-od>a z;{_Tgcb`}`s4=5U^WeaJdz4zTn!Oo!1{@9MFuI$aVVYwil(jV>IW2-&+%3`3U^{e_ zts}Fn?_->o1Qd&<5!T=d3EZMZ4_ICS*^6Pg+kO>jaGcR+UnVmAuai(Ki1RP}K#+MO z#Xz_gj4u0$4SpL~?cWq{uaRJ9Z;n7qS6>Us>-{LleIC5_(Pa1S&mbEX1=7B>$;_70 zB(&LCi;{!0aa)c8&8swlX{rk_tWS|GSRnvYR;Qp@>J+dw=elc0IsZg^1IXTfLN&rN zu;FPWJ#p$a_{ro$%aM(^^m{z;XEKa#;A0qFSVS(Kx(1AgK9e1n!MmG-O%~>=wTQ`#Xh! zt2~2^XAMqY&H1&wS!gc!2n|(papl8CvbbpqWN6ysMv?(Xln(GBFPpOhM?=}kI|P^m zol7CC_X%DYv|>f9_uvtQRg7`n6=M52jb9YAhZ*2zjIMeFb8#Q|D`5-Te{0}y@+sg6oK9I_F-iG(ze;b zu*!cXKXR7?yp&PG#Qaw{aqJ`7-Ce+@?~KQVu5B2{<;;DwPr}l&K+vyirgYyrsQ1x? zmzq+bx`*qy7$>1Z$`^7xKNJSu>43Ez=W4XGr&a21*gwx1wo-Lgx#KzEZ!?9xkZMm5%LH~RN9=i6Q#i3a@@l%vK9-ZvVo~hHqbY3u?_>m9l-{K(X z>RGnrgdwv)e+jc?b09SSYk=fG4`IHV4)e6OhW5^TMKjbs;HtS5$g>GWuclgZeDoL3 z&@TpL7p`SWOp0NzMgaDE7Gq3_6}!RWF|pdr&A*XSP}G`2ZmXzUxRtiUabF2G^S3=N zIev-8r`e!E_9)66pT_NQGr%QqDH%|ii@7>FASGna*RF9!ng29c*LVK(yT1~H56+^) zmoKnpV-c=hH4(e5WWn$8c#ZvdHzvTe^kzK~K;4UpSPXSMj@L<)v)$p>sn(J*O!l3zl$S*Xcn`7e0P5l?%=p^QfMgI zL-x&EL-z$`gOOz@N(xwGWlTDLYupApYv1FczZ&dj|7Q?!Y#FmAu^j&G)dHzo6;#_j z9v*GcfZ8<;e5vxvnx=s|GQyw0-hHOeN{W`_;V07|z-j_){#V44?~`B^G^J5k;2X`( z--e@`YvE+Vb$B|=rwXhjBmE5oB}E%};bsJ?;=SvNqfB%U5yGZ~!| zN64MlCb*fG42iM{FlB-$ep%E*?g<;BEuYIm%^V~LmvKAEMean!>^qn1)*)YHb5VOn z2a0u+Lfm-)jL*1%Tdv77y1T!_k26N-KFfj~-6n=n&&q4~(?$4Xob&nUW@DP&J@lB# zA=aD+$ZH!#?lzVHC-^GZw1)>< zr?Nk$+~O5Gd7;?KCcaa&5uALf0ee&p&^%xUn#m@Tch?hO1lCelYbyw}(L+^EdD?rfrgDpAOg4o!>)ow$BURe^K{Iq5dtpxVQk1D>w1**$;T3l1i70 z?7`hvBrsIi8E4*pft~l`iP8)?%HMh(Mz(TH#9uj}nOX{Iv$_7_`WrZMAQnx8o9T`n z&Na@nPT*CxgZOeRk_Y?dS&&!8c)-DdB#)owXJp60zGsJt`tE#ke5x6*Ek+Zqy|%)} zvS_^FAP#@j2T0S00J!Y52sUo|NuMSx0K6+f8b8J(d%OUC<=Gz$*eJtWtMlxd<% z5O3sC9iJy~6P*VHxNK=I74Qy(8MU*h*n}25^4Sp<$BofpJrBq(mH;MF5U!8b;in~> zH&mhmZEsz)xLcRY7w9cTY3?>&aKaqS@Ai@B99Jc@P#07u{v}`BMhGkW2j`#O30)&+ z5n^|s`kkqG{>m0`KRSu6?0d<-qkkGYSS|yWM#!J)a?$o)7VbVLfo^vdpzHc4a`sdWOx<7xUlMjf zYQr0T$#rQAD|nB+%p$V#gC=g(XKBJJ7hKJ;xKm2c@n0)elVeAQtK%0mRy&KHg0KHB zfU(L7+!ZRpepwJpPl;8+nL}UD(&QGLRqZ|R8-dFXTA1VSLhlrPAh!0_uw{N844jNX;RDq zvQfMWr(hz*-F{>Z!|199!T7vS9;b>GVhP6?y!*ig6PIbA-N-GFdins;z9nAJ!z_nS+lq+QjxWT=_!Br>Du?-_1IWqgz);u@V6?IiB;=L8_H|)RBPToJvMf zURy3py+i__+}#O%??myhypad*;#M^KG)8x%YQg8<0Qh1GHjK`J7kHQ3ksRWMC;W#c zyAx2y>?^)skq!q;4Drm=HE8~4jQ{ti8h%dJ#^oXFFt4*5IyOwlGs#QQ?sWwfebL5W z6JShVxQ?UqvlVn!)d~pqmu5wzjj{b$5!lI{C)&5f8G(7R0|Fl-j0$XJF%C`C%)?6p-YsWciwMJg%HNJ>h`tU?(=NvO{lp`5*54N9am zh@_%aN@)zp69-|&FNw5o+p!83#QSNv0vE74MH5ZjCa8G6E;z& z4=vyuyL7ghsn!pG{lx#srMW=@cKrpepIM5Z490U679(h9|3*}hT84raRqmMc%-jccjr3`%#%!*#>mplPMdg-oBuzI2{|41WIK zH|ajOR9(Zl(K8?~<0W2{t7UOlyWsQeFyvL>px=28_UN7mT`9gRqV%Ea`?QJRxXc`P zOzMQ}kZ2rsmg4>x>)^`UpM>=f*Wkd)kz9yE3G^5$!@MPKoa^#H@PE(W%{&f4EquTq zla)B!CeOLXo1>$1o6xfIw@~qSq9EDj6gFKo*!2fa#~fi>Z#}g6db%o%tDXg ztwb?;zrdEJNc^xW%^CqyIp1(-cvupC8Eu|?uuA*xL5S-t{F5k4B{QYD zR-cuSsr4O$GJ4p=NBKBj?C1~EqX3!;GGO}B=!q08W-bs)z7$j@=@Y< zOo;E7e}=j0YFx#!WZ@p$htS!&iN(q^U{sPE-Vd{aEy_$FeRK|LAClm{h&*GHe=9)B z?2FK=bqJ@VCF8%U85}iR!*2Ub#iJ9>qWZuw{A0uO6PCN6Y*7&!SNJk7**p}rnu9vu zW}@mBb#8lZ5B~0+MbkH5f*Wgf1!D3k`1rmydQ4bJMZX<_8-ZW(;GKUwJAEx|cY1=K z%)MAc0e^nXJBX|O_5r7T6cue{xU%SA7`PQoA9j3!mWOu*A5VQ?Hcl0=y>J>W%2jz{Xy^E(aj;fBR{t#&+?Sp}2+ z2TrVgd;-}1+s&dwUxKN|XFT*xpRLSuhl3+h*y@N=AT|YAAn#2Njg#ayPpw1iEB?3! zRKZ8`s<5I-7S8O{Ba?VmQuXW?Y`I??o_wMRD|1xHiV8>KSa%na-j3s>e!N8K1+0P$9I^U6$|IbE7R|NPvFlu85|&M zF-1+uO6$U4)!EE2n4y(GQXeeGxc-H(Y-7Ig!-Z#jmaCI3Skr~~bM>fOhJYH>9-y1jeY1?9)^>nrk6MD*mz^Z*rJ~?jv;nNP zJOzvR*-7o6J61Y>g~Z)=68-!j9~2s1GOMvPk*&SFFmpXqcH$8Z-J>^Hsx!UhE@ zX|ahZgvXHwdcp8<;{@tuD+>dO9M1W#6iLlmCjS0CHu3NJT~!6-$D+Sva@{5Ndfo~0 zKD``1kp6Sy(VpXGeP}w4hs%T5El2JK=0Zv6cHGq8t)!Z4^_kXbVoRA$B$_&3(0h|a;7@7 z&dN{pAcmFf$31qPJDFQ`09v@Uzf2NYfpo*&M4zf|1xClst5HRzGA4YrOz>R8PQ31-F>>3en>L?8^>+^jg zasL%uSmlc@i?zA=L>_L6OHxtA({T01WI@yVCQ|IT6K#S!uT+XhV#%3oM%-@Uo7Qsj zv|tWYFX|0xxIygHw0~nO9fI z8tw<;s=EcOHmHO5lQ@&4og-;_mpq&e+eO2a`|yDOFM+wO9z8f)o#tvEg|yqBFe@Yo z?`i%bE54aleV4n14w}Ev?c-$B%sGtu@07TS569s|&m8!7iwT}pcj4p@UBubW7{;}Y zgPc_|T*+w}?#}duAXC2tGScO_h4)wBwFgVEUu+pl-#LqUR=l@!)@*rA-lUH_^hW4-n1d4+Sw9C zZJyyP!;6fMWH8~b9_+vR9$P-#f>PyK_%ehOtlnr3@%-B*+vGX~Y_mX(ufsTfg*|=a z{*di@EY8Ij^WDtZ2SCc#5ZJ_-I40PXZaMlD)RcZfQeLXiD$@zyJvattlLuka;%1?pp@K8)cua4eYPdqYA`%PSQtk!lmEEQ0l83o?PZnBHhxUgbtCCH9QY4 z`Va1O3#s~KUW}IR@A-S$O}2JQFrMyA$HS9dV36O{g$vHXd%iO~Hq08b@1BRFO`_C? z?FZ?fe@XQ@9YNE{5!86*c)IL+Aq;N#iNquXguCxx1>S@sdMoJq_o=MBZZY$%T*@M9 zp2OdsT(&*Y8LWq^$R(apu<&+`Agl4NKy`AkFgUu6c|YPap^d)&naN| zSCp;`G~l-QpTQxaBG;C236hh0+0gVR$S<&h`BQ&^{^v88ojn`qx0U4GNDjMW|AE@$ z%OG9TiFXcK!pl5q^!=4@Io%)%ojf+db(1Rmr)7$&vf5lT{2T5aYDtg*!ar=Vj zlj4T0IOZSkh1nz_oD%vC?sT1E9#Rb?-=G+lUHT%(v8#ZsDrcZZhi6h-^E{s&N1*V? zHpt#31h3c5*pfICmzUoH8~;2A+VO*h{C)-Z%+i?5>IYb?=S-hj-vM2}uTYvY4}Q5d zvvY6GLHo;nOx}dx;ep@a9q|e38qK*AT6^$gktEa{aVFzF?0}shOMCnoZnl(R=i9Eb zYq3j-XmO&j_u?=tVp0HWb#n>`lgS6h-Oe@B|5X|6b zGXW}N>Ab4D?9vi5tIAb%xOc`cJiqESZp=H0_93ZQoINB+;rG_IRD#RRSpy9jd?vPC zggTWTqS}eiz(QM8=s3rk_g9alrB5uW?#%zdF?tG(4K&0*^1nf!&qEwM91Y#V8*tV? z2Toh0(#GE1;H~r#UmTZ1|KU)S>DU1d{VCWSZp6t-#B=cL1B-vEi$iB)ahYTdZ1AlH zneeT=LrZ|d^|oj;OB^}_5tjh(>U((~dcG_rTDul;eG88B{;>HN(9Yj|6FRVBv;y>W zW`UB57$;{lkHx(njH!_oc^ z-z)MOhJB-4=^39`s0vg>={2MIchpgVM3ERdVw+18vaaFIsutF8cDC?zL?Rex%w{Qh z;<#(?WaJ(xb9x~PXr{TZO0P^*csNp&$&Y=_#I;1Zs&WZhv;GgcB0+>(Po*&f+fYpO zjbu;c#?Y>#1P$HXu_2^|)r6jg>KVVl@n8{iSM6sNg2%b z$imxGcU4IW)Yz|q6u6L<3qNeO;N{Rg#IC{*zwW+AUJY)YjhCp^QW zw_3pUzhTf6RDj*IO7woK1}kc$xRE}RoPT;G=sStPdY|dkX<;)qpU)H+FYm-&y*P*} z3}U_u0G%&92A5=aoRT8VrQhxsRwO-TYi!Gyr$QKQv)+wfvaiuW+MTAF`_s9^k-CRZ z<5J2)@n*|EaOQu$RhLeb=U+fwJ`HkMeKW{5xYLlgzTg!)hB_JCA^z_dki7Nj_&ULu z<{#;RMcEa%I|=%iU1OV6LU5gY6&7_aK|y8_x#Khs|0M6k zL0JRHvQC12quI>VC7o^UFvX_x8g$CXLDCn-?+xt&*+RQVxX@=8)Cx8Lm+H$NM`S?m z_vP^4fGLJ;yliz=3{a?(kNWex@##rB7*3br8Z+&!l2;a>Rci|xjqt;Sg2}M5@f-Lz zFN5q)QP8RKkxd^ph1>GhhjW)AoYmt0Kxs3#u zlz4DUzX><`%dm$w3wB5*K*x|SP`h;Df$RF1pv>=%oi@S6I!j~ zvnp%27jFOUBX^&R($VgP;6)ml=_YY@f88i9UOJCG@rcD|srqD7!#@m~p~T%hnu6<9 z-jf$Tuke@kWUj@0CO6fk9p=UMv7u$faFplMzWlcx`{k7(f4w~3J|ja%+l)c8vN7Dm z)s|R0aw()c$kV(!DXw3OcMHtrK=(@*{8c*w>3Zt)t5z!dbO!V1WjyBFZ{)cinVeMd zJm^c!W!kax(S3amDLZpd*ot?_aye-(Y|co!ykjvqO4$ji**aWy?+P(_nL?g5?nIL} zX2klF8fuHzkT>(5!SdKpvij+1Y*?WIUu8#f88>dj#*c59j%N<}wC|JU@M`vKX1cx8il;g-waTs;Df}G}iACo4C(b~(J`0&VLTuPOo^4uws!L5c>(iP15 zrmfYFkBzYM*r4$Iy49>>tQjm2oCV#*a&)ilCD_xZ$rb3|#l8p`y5KhNGF=ye4)Niz zx%>%pnx#*BKAO@8C6vxJ-^IvLC!FDZ76zYLQ%xTNGlM4J=@wb~FA!K;=?r>2vXA{x zdkt+nn<4o_INPm1o;8utG;TN)k`F|&-dv8@jW*?`4*KJ5em+EJT^ER(F9NzU1AV!1 zoPqH(ka!t`jt??{ipCQ?g)Q8Ok%Rd6swwANF%}o8x0CPnMx4THY3_;hUr1qL!X3BY z;L~O!ZfMp7PU%Ae%0=ga$>XtfmB=}=$=8xA-zjS4YAeCdZ9-96ycM)cBVcihH>^7n z0p120(A;$!)+Zc=p@3Ilsn7dKym&vFRyD3qzl76Hr4rS!d2o8B2dC2R1Pdmq;|)5zP(K7JBcQyzyhqvBEWVivv; zAH@|0jwI{45$@)n=YCY};X(?WthR1C0>ZW0bcX#qZjNCW_gV8MdoX)4_NSg_U%rK+ z@X!`gUXTZ(la_+Ml{XeFQy^OE7a=!I3144VpySGKlDq}4>@TIbNRRO#C03- zy?qV*kx0iMMo00GhAF$OeF`oYK1TU=6%?P&XBUOWIKtuxN_QqPQ<-2aTqs3d#ygTl z>N}ys@D0|TyDGTTUjgba`(f0@cx+Doj`D+}>C+8GU^M4A`Dy=;N)6zaT8s_c`qHi6u z1jZANfI>e%b5>piZJqYeqPP}Uz2tXuixo-ewrX(w@B|j^mj}hy>Qp^K$ks2rkJCqt zp+gmK$Q~IZO!K~k%M}vI&m;4wS%MsQe}pfjEi8mdp0goqVj>Lt+{D36yZPCZBy0R8 zho*`e5F_mS)Q+pfeXkajh{zL~7$x{8>?B(@bmHz(S(va|i&~3mL00kx zoO7>+d}{rLo}DXc!j26zev%(;inZrnh$oS?H>A0#9|CA2^`JEoRv0(u8mlw8hIMWH zdneZg{1*DNx@o+RP3;k`%szxhOolCdbcFBM7;(S%<)ZSFIV|XW4GuqA&YD%s;FMUH zRgOXciJ6p*0afeZp5ja4M4d-yvQZHhb-g5R{~biBp*+&*>IhjHL8##*Mf{3m(0uDD zmU@0MoBHTEImyovmSkOo%}r&{)pw1p|Kx$G(|D(~tu>hJUCFHGpToa5uR`PKKz@fY znuxcrf`2!p}zTv!Ot5R?$_#0`o7CH5yxCe(#^FL2n9hG2va%D<^~31P9i8 zZ#5UAd6D~-v78Hj(FpD_hSc};ch>!~7k(Gdz-j05*;)Rb_FXj%GThW?bYc$?^>T)H z)pigwPe5z)BEgm+q~vw6OxROM!7^}tcwX>*RS1OHDPnKR9JGWvuqUwtqp^nl)4T%xn`WTIEfc)- zt^^I+XA1UYi?9dbi(%ijDP$ARk9*ZKn&*CqL;2AJSeCws@4H{b&9PFPoBCT+m@<`f zuN+Hnl<`@m0UPdG*;FpGP63YbOmBmqy1ZkH-%~#R2c0pMWO0liByoFao%tr%cR2y> zo+t#1-9iYN*~v;=)R=$hak55Nk^2~^%3URh;r3rpC$SjE*wn+7s$F<*T?8xsq0cHs zIAl!*JUZGdO{V&P-(Yt;T-jEh<+ z$uWrnPj7d!{+>I&Y!u*5i)L8mS_w-<9oYV|k?4KKfa)mMqDi1FE9uQYir;l4B$3=>1E8b&7e$`t6A7nxSmaV5q+UwYt7|jg;d#TB zc#r3llAQ2>w#xQ$_WOn)I$#IddlW(5 zTfWa2V8J}o+Vn+CB6nme~#b%poGew_rEW99%egRP=bH_O-$K!D6z%nf2ox*{; ztl=TO&JG0hKzWZj&((DRuTPbN)%$Ak=AnI^{S#w?i!VVORnB~B{*F1l5H8)#TG2T3*qILK*q!vD>tme4@t>b^y)dG%kQ9g<$j2t zyM#XSJqvOC?sQN_6+WKOfiG7pa4A3fp}3|LRT8#?KslE4=qTgFd}pv# z6ZFvhu@)}R4piMljutsYL(fBbcHpTZ7R=2ffm@W|WtR%P$=(H}YfD%oE=P|X z9jLF&7Bm%;y3Ah6i{nm}0;XH^7&prg zVaEGP7Svn^)7o_CQJTXPLODpyuz;2QvRvSwVLm4!OC?J76Y+2%i>b_pZy~n$_mCO{ z$Zz7NZ@-9gOGBBRzZkt3VMZmajG2~hGA=b<3@^5RgfO)d7L&9H+v2_w>u4e7-VVlZ zt41(`k@}>!C|vL~Mw?4L-oh5l^Z@1QnNXIfjY~b1@pIb@IG}0>=XU#m#3B=-B$sZL zzFe7>UN@&H*FFk#KAgliV=ds3Bm%QhheYpkoT8J@s%&kT&-tCq!Z!en%s-MjJzM!5 zwmte?{m$CNUZZ!z2b}blU|$yHgl%@9l-~<;dY=iW3eTfM{y9P9mGk8Fs&VwmwE{sx zUX@^GC!dXU7NHxfb*NOsDrk#rW>0%p(=R^{(xjKW$n%e{n24MOH;Ft3JSz5gC; z+%7@K_l>5eJ>e|#OEW4rCcxG&7eMRdACfa-2@Dn;5pMc&1&^$hBfcvx;I+N{%wk6u zxqZKZ#kKB-%(bV9M`1UbhA(1TkMFa8-){=rCOu}MmU1BSKIN*b?hNSpph#COHHXc2 z1Ngr7NN!(13TD(gvCJuw)cTP+)oVU2Y@T`u|7ArB-Hxb0fb2^M=Q(V?zi;zJQwh5G z;T*8@X#vmd9Fjdo6hHgR;A6Y?su*AXec7&xzekOtR!c(A`pj*RQU3ul);BQo{T>+vyFIwqmUGc)$3pZf`VMvr z-y#_xg0r^{k+)kXvRkzZz&@s7Udnv5<~g+8-P_=1?F~@;TM6}tMG>Rc;%=X8Ii-VV#2|H|l{gwdj~hEiUq2m>C`9OKCRGzT6CZUgoia zaUrbeL>0zQegO}DjHT)iy1?9Z8jkV&154lkWp)xb(X!5(Gniz-JxNH1#71kHKQ4-! zY_b!MPk#lEcAIdvorc`7*9Yv-e$JL{v=Oxa$mYgA=Q%g~Cqg5ib$jsZJ(Rw2p$AqZ zvfhthgzvcooMc&#r8YVk@hBSy^laIe#k$!FeZhi!Q}F(^$*|gLHICe} ziTgez#_h>f2f?UTcK3`GR6O6p-Az~ErdbVu)JbJJbBq{W(y&Kx`@~}0Xr@j#&WnT6 z)=IXw&YwMR>4S$;PLS2|1b%CMV9o{Zs1n&sY$d<47vTwP3k@HxtXd)|?UI~sw$ zTRx3eCbq&B&tigw&Q#~6ALMf5p}uG*@oC*Gfd86F!r6G(={y?N{7gp=QN9<#j_25L>Zd!0p`?4Xndlt+onL?l6TS*V^o5cBM zFJgM{1XN|@NSd|#5fiMc#rd-)ak)oimoPzE;s#># zuS?icsKI8}XmbijrgBfdUx1p90a*BkXYHovfzlW=Dwq%j7WyHmJWU6#{9MG}zr>(g zO$o(SW5^`?+ptGRpF3yGzyJB|rR>!JIBU655PeF(9v19^cctnKZ9A#@mW<5 zmG_ccn*8^--aFhmb|ZETSkQP=cY5!XIs5bPBn17I=(+u7KpU=KvHbrk>2I&Ql!pX2#Oa=Nb>>#1j*5Hx= zO)hUqC%(vkO2k*RK)<~a&MazxQ0_NVzkdu}z?fzCPv(RR_?_B7GpJ3uhmt$Ix#D_N z`qng(YOLW|*y$2`FslP*{{rg&)m(eG19}vxNA5zoA>XnOW?XhWw^VkV%+v~ zBk1Hcc0A{^o864cLs;7cA36Y+mu>**N9TnrjkEB~gGJ=ITOSK}YQ>WLPU2KQ1+sc* zG}o^?pX`&)gp2Y|h`RA_vOs4GS~`!SzttmH(8ldmnt!5%w=E=TJ*{UEJ*!}TnlhI* z#!O&W=>YD9Uj%LZ=dy@hh(WQ%aE^DAs1;vlxiL#HuId!(#6BS&4)wT6b_hc?ZwQ~S zDzQ3Y!DoeEhC%n3bKq1~PxQZ~k$ZQOnWsTM)I<>+{mh&NZ?eW08;g)d@$<^ML{Rz~ zhNGoZ1^)NLv0~eGvd_#Ab~Jy4py!`)O|d&n(Vv4}8nu|EJ{2FojV4u76EOB!2xq0k za0>4{{Wzuz(%S@_C~0Rk*KXjDSqnZ4??Y{so2U}95;nS&2~?l{$NLCApq%GBVIV($ z)~ummYNn5AJpOZcaa%j4BSvZ4NH_xgX!=tK9k)?9vrxj_EF=wn8+}$ zd$&J4c+v`2!gb*!CqX?9-vVRqJI_N{K;0x9X`s(9STl7tDtPC^xZRWJgXWvC?AI|o zs9THUT>H__%1_|b?~l{FUf`>J_wZ8ADlWd>i8NhS;8ZTAl8f^dxZGLeX}8-Xs`&8< z(TQ#br_WWa@cSdC^e_u=joAvRp|Zp;WeRLa`-YM8UPDOjBhdbE7czY&!sj}knXzA* zUYT)(pAA_FejPRAWamh5hMt+6=JgfaVD1!7?fP%w*w_mA^-vOyTI8~0xdc}yyx{NN zX^@wD7U+fXbdB;%vY@{e-@ToHtmG%0O3|as%6_x3E=g>Yuoc|;YA7gQYRTdrFyYaB zZ8~SY3zyJf!3iWo@kX-)tdZDL#=(C|i-e;!-#|&Y6&g47Vc?r694BTEr``V%SY=JM4~W6blopV6U57zK zY7o|#0eOiw&@mwrg89G>fIAm$21dYTVaOFDyRZ3r4ZSXg(cb zMQ=PjGj0Y>Y9YSg{EY?m)#D$fyYPETn{b<)1Rn5P0@8(b;B`L$4@nI$nNu@~?>I%& zQv3_I^seFRFAoJDDil$nBpY=u*>M|n-Z7&-QGAt|f?}uRQ0|Nu`sl4E$0ctH^e_0L z|C^uuyzU~4c(4W^IsbrdqrO<4ow{16bK?#fsQH2OjQ*g^o-~2J(=$x{z_Te8PV${t z8|YkL46nXOaCRs4Ir?f7@6uD@xVIO`eJ^SDZoLAD`DiWpwXqV^<5KaVPZfF8oD0{_ ziITI*cgb7t9q2MklUXl#3ro$W;nM(To|$<7*i1eubmS`E_cSERDpByjYyjlvFNDZA zSx!n?h->0X;MVz{usOO~82hLW60^QyvzLLeeB}?)S=d7I`8_9B91gg7FWLv?!3n&N z&s{xmD7T(v2!mB1pcr0H%~x6U(k!u;GX_#CSV{LX!?b>JXM$4B1u&YZ!(|Pbz@J+i1o7@nxahMSJc`;&IJL1j%T^Ix zHY~+~KcBEcs|P&<-mLO(6(}|Ho&{Sy?r@(R8#^pQCpd-^2kj!k5%YSCRD5PdGcB;) zZw&i=rdasE_67t=9R=keDNaBA2B-$s1Gu@eIhs{0N4uRI-#3F%H$(P5MVm{S&so`z zH6p(wUO`d8CsIY5n5oS#{Gy-@+v+WF<$^${_6&icJxAHXak8|bZwp*fJV|z@M1$wV z|4^1^N0;o?V+AkWL3FY@Ti#Q}92MkocELB|GX9(3Rb48E{=SLUB}Xy7KN3xp?qlQq zBG~cyHu%)#6TPR>-0as~xM}x0kQNI@i{3gbt&ykLWs5g#RD2ontiFSpjp5K{a|b=- z=E6N3WWwqkvLne2n2a(}&Q#_O?r9*d`f5UBPc?z67C!^!zgzWX`~*`SRiHcYr$Fn@ zEi~cZev)U;-+rnzD*?h&nq;41`^w=F{XU0Z}Ni)>&d-%VfpP8$#VG((D0 zDtL~Gu*y-lhW0tRXj(dmvebh->8pkWsja-bTpy2}a($9-&;8SD41;Ps=&IxO2|%b6n1N9!34u_-Ysmy+sYV+EBYk)dQ7?9T7T`_I>rY$(-(!Wa%!2ow2dk6v z`Lg`(Y$dU|{+M({9D)aj-r>HtMljv+1%5lwbal7If3R}bRs6Q&Hq<{`Pu3TWgz!DO zI7g%qvK@|Mt;%u~`o`e4zZQ@@$whGBk0@Ju{4&`T{u5?a+p?bj4q(~T@t{8CF`2N7 z&-2HKLWpZ47~gnCHkK@6+m>d+ho2|e*)W1OJlC+hG7fUT7(;N-R)~4ujqYAm_%yiz z*vh%k+8qwlRFdFBst(t&qZ!NeyIISj+Nvw|nHc;&3j^BHn2BgR(NC9vI>}iWmc?fm z4Ax?z^fg={d;)&0MLeUWR*?H)1Q&E{14}x(3m-m8fxoSqbk0~aJbR~xRd?QEusRfV zrkCJ@(gZYCmB6YuiRixII-Iz$P2jhvmraQgr{(4<)Z1h!arhpJ`!s#U*K_>|4`R9vZ|!p58Yi);s?J{a$jsNIMybEplt*- zY4IT=>YLF%@CNuD^+$!KHQ0SmoC_|GCnt8l1=*`AP-ZU+jUSUSJTHUY)az%Z&4-}n zvI|sbjOYGls)0gU6mjpmF4(KN9@C1?!6dst{PH&v>$j}L7h)I5$)@9^HuWnWxGe|G zziJ>i^*dPpRHJ9NHo%VgFJQT>3jK9Rod$i6K|JOU!-%+3b_I-Xn9B!XLMlyngXF& zc`iI@KLYD4LcybL9_%{22wlhKke5sBsPQ*LR%JShlM>9pq0Lip>?LLFP`iLvL)wHF zm3?vb;jK{WpDPTW&Bs2G{U~!X1ajFM(aecu9Xgzk5pG~Gi+x0el*u#7J z3_6*I$90ezy9^&i+jH*^4dUqWzt~3;Z>$o@;a$(xP*|ZQ+>pw@{kz6MuU;cF`xaQ6EWU8g8Rg&ohkAh``c@}bbd;^g&XbLhVU~hVuFoSyS##iBIPZWd zvm+5Xc@XHCi}G3si!X=59)EM#aL_=opJ%iw?I8z*ZY_vJLGu6vrtL)27G?ToY&^f$egx&BdN}WgkS*{R<<|L+ z!3sYu8g@8|CH-zgu4615wmyO19#x~+(O6u3$Pw7KIu!Y+z%6RM1%$+-?&d-?Dc%Z7 zGP2A){0P2ql;w)@Ezr+Y8mlY!g6&Kld@nzZ?y6o66C0YLDW#bO4oxN=$sYJ9OC6$i ziDJkRUDj7BOVu|-&{1baxiQguxxNGw_R4btj84B!Qg`2hlYB!~?_xA$yFP}A25%sr z&xtPWA4eN5q+*L*HTW(cOIKf#B(Dyaz~+t>Fi@t9W@Cad?Z_aM?UVwIOObe}&zY;v zP7zMzd(y9M-{Z#OI^M-nhQ(>lLbqWBVzEt{zP|kvycZ5)UD+Kdo*D|{?#`#nDhlB8 zq!xTaytw)2O*pkCLuUT>B5K~akG|6qIRWp=n_cgZ(5(s`nswS}Hr{6AACV-;h?v3pjR! zA++1*v79wmA<4spX-u5Vz2$v`9``Fy@wzxSp~xO~zkh{U2miur?GwZ&(wshdt`D1+ z&xY{#srYEqCVKJNad^=)8-^eM2d;N7L*B0=xVMnMb547Qll>$pAx)5XCl)SPPK6W+ zTGht$C zeF{9r6k*wcbR3ir;YzkoXWH-hxx!!~1YR4!hNqTX*u}ZjZT3fg-~SJu-ZtkHw42a5 zPMwnsYs38;lLV&5lJqDT(hyTK!DFrCs9LLsm;a7~f)!uk*|lQ99RWaDi3@tXdyWR% zYFQhc2Fc8K!aY=l8ol}m#dD-^spA>k-F=KT^1s{odmNb9%!l5M=OE5vJL)RUXZ6Q_ zfcudEa##5eC~u5K$5ASRKeCZhoYgz4vaI7=ADBE(6C=0oQogh znwVj-N>z?io7qo_d_5uR-B+@FsVSCcsgQXSqL2#|;SRSL!ND05taK9lg*f$rfO==+ zYibC;f9YfREMr`}Rv9xB7U7(E6-@KDAIRr@BBsvgtfYT00^cXyc;>}S=GYR2pQk30 zyW&@QW^)+IN&iIoRee@97AGtUQjeZa+nCcsCycw7!)(rvw%B61^D=sCX?u4ZMCx=RT->^sO( zjw?`yFiGy?LvuXVKAIbBl&8KY#05wT6FxsjRjRHBcFQyl^O@0icVvj$W961(m2rrHrawqXQa=OjuOUNb@K z$=b}n{xs%kT!a*pdeS03i2sa>gu42*_&0t(ygnX^YcyTaWyqCXIJ_AP2F+lu`%!__ z>rKKPi)MnTY|N}v8bi1%?+k08VjQs*Lz`1?gn{)3aoVsb?RUsV3%6I8yRVugeSbx4 zw<&S7B8My;k%GL=6~D?Q!wJ0uXmjR{z?f%~>0W&cI~9>B8oa>}yURG-+(5dEh47l^ zjfj6P$A0}6f}JVPNQlBX`u*BgaQg5R26dy^0S7f|85WA8{%W$C>V8NycxdJ0oXuof zpMmtD>v-+I4R9%FfNX0%Be2@D0O7@VQu(z5WPQWn_P@gdx2^`1u}?;^b%|iF@|ir+ z@5Qy<+I0J(7gl%EBH{3QBYgF!3+oG0h&F#7C9P_JS=_fYhK`-hvkB#Rdeyk zVObR2+@U-Yk~9%qx6XohD8aoxt&Gj@ zc{W$dT$1)H62?l5hJ^K!D5AZTg&*3AR%&T@=TtA#@Zx#nPbaZCJkz||Wicj&pA^2l zTu83(JOw9p8L>%~$NPyRp>Ezz5V=5@X_68f%$`r8E;O?0GnH_qe}Fu{a+37NJi}8* z&OnL%aVAwa0<{+H#>lA~h++R>U|O3oUp5jxtmE++~RU}T<@qrH+al*oxGw^M` zG0c6S$w_z|#~e8W92#2;g=1fn-4*rZ>Ss^1)%pnzo8OSnn+mb!E&sP{l;bjdEuhV7 zwAJNUPx$37O-~K2gO@Eepn8L!pYQ*I@p&hSk(d_S^wkx`(v`6(Jd(|re1XmHdxTwS zNlYUp3(a3flQG+BFgRDAvvg=>hc=1RU81R|cVRy4oaTnZV^r`{&{HyF$zd|qGY?04 z7{j(t*CFUfEBj_Rf>x&`2)2?RID97?p09ri4nO3$xk2J|WuFll|2F|b)*rwPeCL0G z>J0e(a37jx<>RSIOG)41Bg|*365As=6`xGjp!-zfg&w@e*z3=JfyVr2d=6|74$&-l zE3<-xeNpA4%&g&E-g+qfbyv`wJPt|&BA8r29{la?6-NCIz@_||r}8k2nOt{))=Wp7 zcX~Oz*)WUDsh@`t4*3{$Y#O_$Vu~FV8_2^?SID)U3Bsm-?XWmQL1=EBYc*XoA2cO* zfkkhJkZJp(YNv+q@o{|==b0^W(cR2ud?cG|T8K>{o51`<8?y@LaJ9V}G;Do>n~P1b zq97F}tUE4vdw4k>bMwT`_>-U`C(4EACzAh0)WX@1cd@Q79=_k%j592A!Qb*48x;+( z%l87d2CTvpW>qBM`!p_JSsUf7jOdWo4rr9MB&qv)c^=nCJTkZmV<$hNL-$Le`=_Zu z#XX+eztMvhbe*QhI1^lxHHB@O6pj5wgVcEJOI$bGo|_)InEw7}N@KG2(%66&tX4RO zA+4$Sj9q3PV|YG7=@+cjRE1^Njq!2#3p{MBEo{EM`nR>LUB^~0g7(+PwBJ2vk2VbH!LDDM;ZO+c7OCxpZ>Yq|fzj6qMrvG9MFcMrB zKNQ67c#oI1@jULiX{d1XB&M~>qRLWjI_8BnJYI2!8F@TIF-=J@iw(!7KO=G84h^br zdI{8q)KQq7g1<1Cf$n@*w17AN4A+yvaH29UBn1p0f95b~%}==w~8 zd)R6Q)i*k^{$d0hV|A;nPf5CgCJo^!Q3;)gMdp4z;z<2%* zH198BO@W!h2maqcGgp^1iKdX@lxA$L5P{G#cW(OMPk6bc7}i@P;uzs5XyETKPFiVb z=*{oz-F)!8K!-TOSgtxS1s^o)Qq=_}>`!jtm5N>+Xnzt5y!{1dXp3N7NeSy38b#(h z9K`Kk&av7IYxG~t`|ebE2d>I9v~YMrYMSmcw}>Q;M%1CN{VvD_b-H5VY*aK?fUhmL z=;P$oRPv$|tErjIXTWmlxm-1BA~uIh-7Sgzn+~w4OE2S=+c&wen;)5Ddj%}-3Sna; z-oOb(-l)1|JFas=un;YP@@>gD+UJwNr_+abafrfC_y1sp>R4{P#%jnh%)(HGqnJ^| zurc&Do+;&PTOl`KrsY+*;v`OsHZ!o9H%Snqun@eSN5T4o@>J>LdU(4d}1-X#|aW<@dX_DA`%RHaEcM{cd|JJ zGb&^t=h+A0>mTP)w_cX}v}ZT`^!&o!Y48k9@2$e=MMv52$SRy6FH47hN>f{M7tFd~~tYO!5oa>>))xLM2FKfK%)5J1XW?asSjoR^F%5oaEK8)smI>_yEI?8pW zm5}0kC)(@Vg&L#gP+xT&JY%x(>XxgeE42rpcId<>h=Rpnh>e#}ax$V!*K9LN+ zdU`;tT!WL^;Lc4g8Agk7WiYuwgWixijkhQJgZTR?^kTv{IJ2$-=iIl%9RVVAQBXbB zNo&zZ$`*9?JVJ-ll<8819z3sh4{h8YK@HFOsJOfkZflw0;CdBqW~eDV%=romS?5q| zt`VJU846-z(qR3(4L)fm!Ka2Sn9-uly^PD|Ce)we@<)6EG36apvR76ZU!}+;^@(!b zx2C{OqQgby%2GlD`M2Iew4Z)R(3~>`Zj0qW=WlaVHHrt_cXwdeNd;7s4#E!SFL=j9 z63VqE(3YRmxJ717_-FJGzVXZA&c)|(m6`gqHm(WA-4O>_7YRBh_%{jTyXAZ zbvP67kauwek!*Ky?pv}uq%4-?*z^l1H7tkHhyI|)*b?}>un;~U0MeK)$$2C-!JRr) zF0Qo@B`$}N`7N?AE_RST4+w+JyK@M!tAvvx$9e8}EnL-~0Vjee2A?a2Rmzj8!RK|t z()y1OIbsPOecu3=M(T4(;;)E{(+GMas2YnOdkXKJj7GQ5Z^^bz(zK{e4qLhwP}+D! z*l3sruKS&UR=U8F>By3Mv?%;jrj~}&V8vx)=Edg&lIN}_->M{W#4QmNF2BgXnMQI) z|7YmDAF*n~IG)PR$Om9Oi?h^ft{Y^#=Y1k!nK|NZhg}^c;58^ z)J6+%V|F#^+A2j< z#DvLhqn{Nk-`?dJExlyLs%+e`%o{hypT_<5GR%rhZLGh&9=g+IIhot?a9p$q{ydos zgTDSym6!{!gEX0Ct0#cuks%zj`~y3Gn-Dm-U&NeofzZQuahSgmSkj(H#JBIKr~7l+ z`miBI-l_;AH?Py1 z!>{mM%v1PcdJTS*T%+s%tVGN07I06?np(fUY@w_+0Y6rq=K0vB*zA{q-}@Ay`M5jo zn30GYll<8O(;vg)vA%+~-Zt{g${X+I^`gwR6|m`SG<5#v2><=xu9o zd32LLw3WiDG&RQJ@jH5V?;&s&y$pLz7!a#CO^e&xP=CWj_);!_t2>fN|9?W{`XghM z-k$?3|GaeSn~391ctNmSIC#exVA3lQX0&e-zY|iW{;eOe!(W8C_ce*+i9W%HT>4X7ko2JdOr*l0t0ZpkDM~Ta^g5UdBLU)TP9P>+qiF{94GjbNmnKlV8D>q>4)h0MxlSaf`6k+OG zN)}bWL&nw}e~+#QnybN|3j}zzZ6;bZXk%6v-(fi!2kFJqf=5~LD7`tEY-pc@D~6&m zbb}hV>8Lsw-ad$%N+Yp~UK5n@T${pdE$%T*#fIeP#OTmCCQUbzOi%BjM^8M%V~iyj zsvoE6%d+_ApF66QmB0-T31-(HbL@Qh0uJS0gUpM5cy7#22sDX>c%Q8hzfG0u89b zY=w$2?)r5UQM*8B)hQIZJez%ZCl(jo83tLOw=_B@lHH!0$gW-}!WHv8_>z1H{N!zi z&KtF`G3_R}y$gl+>(k6P1$v_Wq&n*LK$TgY8AW7N60l~;Y@C{QmR5fIMjtPGX)$C` zhFnM^8ArcSa`P7+3CWyo5=X<$I+jBZ+z)fKfJ8pP8J*N zhEuhFU`x+EC|xlLN6p7$nWPTf-E##eRaRr-Q4cWV-l5sNBn&^mb7<`e-xby1W+*Yl z-DwiMbvj2jFH__`%{hQW`g1LQ9~g%}j@W@0Dx!{C8UF5c!~(-0jBcBV86#J*cIr&H zm3tHxJr5!V=0C`u)4NE=p~tY`kQk$9c?KMMYfxEB6Mw2xNZIU9ezmwkc(Dv~@Pas9 z=B2=KHvBHQ;}Np+k_D;CUF4_#59-7F*FOmxBTv{z2FE}+!ykQL8nskBZ#z1jN)Y-1b9 z-<$F&cO#0a*s2(O zMWW!{Q=adLB{h@FQ;xc)M-ogXeab21VKldJ$g!AyImIBw? zo=2q=d(m^;2UcV00jzXaU?SYjVb{@h6?HZrh-m3VsA*Qgu37@BB13HaVRaO8IYD;D zX45pCOOW|n7w_@8fx{;MAkQ4pus^4>PA6hsjT?ep*^y_t*_m%y>;xkUWt8?s(c zigB(P2APZy{II|PIxO>1|BMP_QK}0IiqheY%x--B?i2lab~0R4^rg+uf(2in>+<)e z0j|+~Lp4Pw!Bd_gA*v)t9H=-`a?2O%GNqX#m&0H~{R6nQt{DeLjzERNTK@SLNx#)d zG3#FhL1bk(dF0^3Zrjj-eHX`qg^)Pd?-><@c45r{cK48_W`E7(#HFPYM^N6 z2fG??!S^mP9N_-q{qziovFWFs{spM{AR6~;s=$_kXuP$>0jKV7f|t(InID%N@M&KS zjKw`rqAG(fe4pc)Rtz35sX}$fD_CXK2O0l@u=MCQ_;j=fOM z(+mf~HLSq}WfM8A{@FP9m@0QNAOH+^`oh?ZI_%M01h>Cxf{Obd6s#TslOHcI?<%F2 zLifSy8)K;2*eFmQi01#JXQ_s>Jm+(EGY#%6LXYc9dERwIg_?{V%{(IyEq&UgGfoj_ z4>v%qqqX4IjxP`x$6@{ZIn2WcPg$xv2&-yx5yng`+Mv|9R6IRY2j+<;}9rjfc(J=DB<78C+|$eODWSe+xx^j(a= z=8;a)`(`r=*2&=te-FHpJ(ipJXcD@n7E$e^QVglTOa47IMZ>$t@j-PaalP#b0M+=Z zYa0nKOrXruX?V))9@w2JfXVtRDH+;>-2EHSYx4%XlhyFU+gc3ro5n7W`wo-%y;REQ z6j0*-H=n{YK)cx%>{tQoG$o$iKa+=JDtJDMr4VBza0c5`Q!%Sh6t<0CAvdF^GW(b1 z;RD;7V9$GaCdJ#)Jn#Fsndc}t-T6t^Vjlf>EtTFGm4GX&?Xa>)h@Iir45p86&}k=B zxTLURHeL2Q9?+HK0++p{oue6$EB6U>H^o$})MLmuk_m0K_sHP@4JIUjXB)Y6p^r@s z7X6vXH5ZmxEGvx`?5s;ACs+7j?4`qK?iz*nuYQ9?fu@3pl2m-r^aRBg){vG-F_`%* z9FBi@3#l6~l72^j+^SdyYRz_7m_8Buqkr+g^aj)39Rjggsdz5_F;Q5q1YL%T{F+vn zyL3woO<(C#C>Ve}Q?AkZwn8MYZXB-Kc?D+hd4yai5$0yuUu>W?bi%t|D51O^-NIHf zO(9#Dc%h}t17$DBiWg%>on|wyG*>chHrMIO&R!flzZ$$xnK7b#uZ?-!gIx#P1@kH| zz_`IlOplEdbN2oy$ zEyT^J822s{!m@j_`HV>srteG!{}(FUvb-I*GEu-e6`X+A_Nz&1zXi9WybkY=$;2rb z3imbkQ<(*AC^ztc{_z(Roafz3PRU}3Svw(m%^$(nnl7|?at|-s_<+9qLR@xB8e_ia zlbWKNVC4OR#Ppfail3?QJDK;Ze7WQaQ(2l4(RX>QEh_24@%5WchR zRN!`m$kYhKui&@X)qNII(Etv&KSq0-27J%Y12la^1cf}KOzDLdE*gwR6JrVwGq%#( z%4WEKOf{5VD#xr8Atv-(JxaR^aWR80h+Dc3tqRts@#2ywwQfD+Dlg#P87_ySGlyYM zaRA+JCI+3)gE*DdTA1VL&*&@M;vFpN9C5DZhBQR^-RNn)H`E9dPMhKT=y_af<4i71 zGmzxk71EwoJ8rA*3b5Ofg;_W5lD5Py^4jn@-ZC~OXJ+QmRegvt-$H=f6H7WI#*i#2 zV|uc3A*i32iElOHz(uW_7Cdhu`+UmbwZ?~vUWGFxc0n@8SPap0N1nxcLy3!hdY2|o zucWq4E?}s28~P)M&{*CON#TAhGZW`_UsK>dmU*Lk-(mQ~^L8hRX;Ya^`tUuXWjd!js4`BWU^BFa@l|)e6L`2l{t8F&g%V8KJw`7#p&kOfWcyhdn<+eD8AR zPR6o|7 zJD!t5?R}zYnb8!s_COSx_Kl_233CG(x@cpP5ALQcc`MyX+be3w%M>M0u)8Z*=3S46-NOXw zA{lJm@j}dV6M>O{dt`ihJEU*Wpt}yvBCS@E*#15pTQ;v}=G0z?Q+#jK(snLK7pQap zrTint`vHxBOww~O9Jt2_d2q8S{`e7p!7i zQR~Md&Szjcwj2D0;4flm#GmVWD<9!>;ZcZcug0DK-qFx!F<}2?Nu8^KMC>o?uzoF+Hbp^YcRJdx`-u|i4PdCC$?W+Td2`B{$7qyTfBx9167hSpiHz}$8Nm9&0z)tQG z+=y_*vZ^Sat1d@R?U~A08ovSQH|})ywp+OQ@*N0X7{pX1n=m~wCfxWO;2K@t!RDbE z_+kOi*IGSH6OZyfzL&?q?dx^g;Nir%$tuF#kL7qW_&o45CoE6sqh0FSoG*yb_kYwN zbAvvP8y^K9-;L#htIDb3ql@gKEM;cQ%X0dD%ykqr<fp~ zT+j`HQ+8<(`MwH&?WhMq*fMzI>PPqOPo$E|PQVGiFR^RpWaejGI;QDpFpDQA;T7FG zr1UtU54F{}wKg+oe)(4Pb5LQe6&_)1Vyhr5o9|0Z_=Go}p6C0cN&KA232a!t4=i>I z%@hhjcDNC0_BDf2EFr5MmXP$mRV3_21Zh0Fiu@D3dv*46VI?Kco`|8_d!u169~*tg}(NW>>hnZAY_b$uadGn8cd z8jJ)%%LLrt(%sje0oAj%yNn#A+xv*C+XzCcE-5k@8rVE?IDI_LH^oGmee zjV6t-*z65;e;)^?!q?dodXhx4*n+z>*NBPRznFQ;JJuD7PH^`P60mywdd5P(5RChD zXlI!*`X)&;+pN9mgcV+>#aGDVc}z*5yBTry5GK8AQs^W-Q)Xoozn`8j&g_z$jf)x; z8AHJg=EnX+C`wAf)|{JYCN!Db21RuKzVTeKXD55_Wip9K5uyRzvxw9VNis{Z0E$lX zyETnUSm5mn{&TuOHqXjDIb#VYH=M$|HbS^NE9IFZvqPB$iXohrrW#(F9)bEzvl#SV zzl{J(k{|`3h&d&Ez(*62zHjo_%$m2d++YG5k^#)Jbdu;}rqSojimz(GBP}HUJLB z331(VaYR&6M&PHQ#T;rd*B6ZrS4~vS^;j@!A_cd%j_|tO zwXaDElSHH2repPWQ-PgsGWJUOVy9d_`+4tefx~lE{#tAWuVPBBnihfIW^=CVPa%0e z^gtjqcO7f|)|}}~)PuG)@g($7Aa1Zu!?tZ-1>)>3aCofB2xuHIeAwzsm>kd3mBM`y z`EbfNncBP&gSq@YlvCSCf817}v0(?XQsy-MJ1YuK$85ukvR{(;QOuo zx2=@}EKfNDdmQ2zMN?mxievDq%WvpBxt*DR--ij=Bf{lx2(i>z2eq$WK?LNfZH|;o+r$k# zdWbz$d*G9qD^7cyM#*0r?q~ZoI`{=IH(kcP8fW3^614{B$2v`^J$uG3_fpnvsX|TKQ~!t2vi!9EO*3 zS8!=%@!S!y9MD`ILKc}NbG{8STvpb6`a0T@@f*+k<9!OaU5*!+!)NW8gl02FYK9!g zeh_1{@6Q71?h{<&;tD8!Y(T8f%z;g9lej8@B&Vx3gD#bb<|cfd%I&=`#C#7~AqZ}} z2hP8x(0a-Xi-x6&^j`mFdRs;pY?q{y(Z{6%(?h?B@7A>#(lmf&XXU`|r#f~HtI#wn z8xp%X6;h)^Q9MHum|Z~b=Z~Y~=bWd@jJx5qVjU&-E|W)5eWXikEo8mzg$RCs`b&|L ziKZ&tpx`ImO3H;8@ufIa(1P|B8F)bUH5pSPVa_f&4BMx##opw-cyvl0=IK?#p?Y)f z(xESchwp7cVI&vxB*rl|6$3CdO@ukXI|Ai-KE%eGGnuE=nOs|Qs=(b`k+aM3gRHO; zCO|fn8@*)4J0KKBKT{-!K?hM03#&q;HaOG+@fNDCsD)T6NTPOb@LIqMT4Ty%-6;O1*p z@a;E1p9(|nD+$La`xRgkCd%kV-G==R_d$xzVfMy+gbu+>decCcvrP?#b4n-4!{(FB zu;mIye(iSVs^bdoKlKOPy5Tg3t4alvrG)05kun1njViV$VK(<;lcoN)0t z=K7sZTwkyj;%|;+hAyAQy^H;rpqXnpy|Y=|7@iGlALD{u%O*pUxhVh6iRC`&m$1^~ zUt{-Qe=gg<91dO*V|;!efad}7oTf&yK=0#kpam<*?3y52EMyJKdH#9B%rNebQa@&# ztwTtZz@)Hs+_X_k7&L674Wdi&s?8?|t)0#I&(4SIm(A#y7b##V=EI#@k%#BAV?pyq zGOkj4g99tPxf3;eFsdgFV^JjM-dW1`@@xuwzRnCif;luwfn;?^EE~I-d&s=h|`OpR{8`s2dj& z9fNmdHo)W^%Q5bOI9(nX315;YF@b3}aqq$yFqtNY6JAh*TJ(@ZEwH~bJo+W2W>QS`) z4pu8Qft8atuxn+x$ekJ7MCH?*3I8p0#qlrsyh5JYy|))KuIAC!^t(9TB?R<$mI)l} z7UB1)hG4>TQG~-4xCDz_+NC#_3zFE6$6lU=`|-x08Day^zq}S4ZQGAlVR~FcH404b zsW3719@t;miN4`A80jAcKbEY9)SP)(JD$?fiNUZTZZ=h)JICU%_-YKFUk9dqA6Udm zjC9N1w~HW;_bT$3NK_X?UHB0tMsXz=Uz^f?~{JtIftpW90p&W zanyk8A;;w^d1ZVOW@|hqwYBEtqHasYZyJSOFRZb+i1*vG_2gn@53S974x$ldm|v%h z3ieGXJL3cvpDPlmztLynewpKowPTrM&LI$DJ`s|`>Y#Q=nAte88P3l7RUXZ=Gqyyt zpgfkQRWT1V~{-KM`4BpJ9jcwbf?N?PU!U< zP?)udD-V#O*V^T{X8Rscm#LYP>MR$&&SSy?oKf2GVgU3w$gy;eq%)n7=9r zH|)Dgcj()}SC#u@zy5gU_!vK0?;^qtNoC{WSQVl!JsCI<;eHvI60W}i*VQFJO!pmB zdUXNvBc7AvVj~t1DaCaP&x3n9Q(21xzRdGmTVP!|f4y!gLAahO)4t(0%v8GtuRjY= zQB(@=2J?>QkX$^FA;R6gUxp45IqahF1W0sP3u^kSx%tkKobk>!?6X&8;zcc(^^&--d#EbVg;yb01s;4(pghtKDm>@W zDI+JrdF4-fS9BrzZPTNHN2*BvQ)fx=V!Q}!v z?q3Zv6Gm{=Lq)FmT_j1>+l=QldA7I8DmXY}HfJ@vOz^DI9v1YT5{$?c!q&`M{6!4W zG<`kkTs(+Z-U&0O`p!ae>UrKNmyH_&RXF#_eI)e1VtU#4C(9a$QV;0^P;F|*a_c73 zMR8vUpOS>-apkO@mJ;VGJIGUJ18kD8)%Z%H*zg+E?Ji1iveqY;l4Guu;|QVFl7(G zg*W1m`T7oOOHN`Ar@esD`K|L*PG~Z%>u20cb5mQ$AU1eYu7lldr?=pVM&F@6BXTZ!FuC+XbI< ztufOxjdvWhW1*fS7Rwq7+E1RQx&@+)j-Efd{dfVtm)}64rf&N8iVDmO+=jXz%4y5H z<23n)B6pdK$7bs`NPHVukr)z+d%Yv@N7Z4$6H(QiD@8>}aJMN;#EmB zTf8rFKQiHuG3#0xEIV`#l79ch@V;olm0`{zq{ACJa}`1H?H{Vbgu>3RM?r`ALKL>W zA^-KrGc)JAux?8(L&o4ZZqw=Ykfw8os(!Mi56#9?6xocog{Q(et0?qK%YqP7Kj;r@ z#+!xyWL0+_1{~zy<6>hNviUvzZ)gG9v#UYCW}@)S0L(mTPA)gx1$o(1$o2%GYiuM$ zCQP@euRJ6WjE?6t?|71zYo?;CurPV7l8dWnr-9$hVldj-L6Q`{l1ZDt;*O*B=sz`r zXkI*nuT2q`JZ<66PZ^+?qYGV%A1id8`%yVxVMapxG@O{lcP3IF;*wV!nji1Q4~v$7 zTL%Vvc-PE(ah?I@FPI->L3O__!25!HqBeR{5ICd(Js}!g zGZ&1SVPQbb1dy*g6;2%fg>ODyMz?Xl$d{f3o`ZUvSRC^ZaMS$AuT~G-dg?Guee4U1 zrfG5J8#3t$R*@^nZbxo(BTA3#Btnhr@Vv1k_k={@+YUFBcIL43NFDrfk;luIbLeWl zCp2xs6>v@O#3}QY&`+D>w{xJ2JIxA2_>8arT39*O21WwY;KrkyxTg6$Ob+(MT}OnO%gf5K^FlDGu-ydy zS2!9jJQa5yueT_U-;O)i-5`IBf`9$to}RjSO+ zj@vNpgELkhx5Skhk@&|Voc9eLK&{+W7CEz~5cPKf_~YAka%jRR&)6}eb6>s1RGn)U z-MoJ`z*Pla%#4PM4gBt^GKi$qW~0@q<8WvCJ)Ha64NI5HFz@qwsiS2noj-k$Y)c!* zcwJn8i;dji>w^{$D-p$_87kZwo!c<_F9G!bJ_1{T8Axe~gJVT0s?Xd5d-t^qN+$?& z8T$JL6Ros}qKr3Hv-=DwPXc*@#6y^>Bv33i}IxVNqxUns*ezV)Jx%;hSW3 zkXrZl#A!(n3o=ftp?&HEM(2?M`{%xqg_fHT9x&;~ zSg#a1(M*_2-;hQNG~#f5|4umd_N_oWEs@T=JrnQd=0h^y$^93i1mnlM&{Mo)B`~X+ zj>cx-k`L0f;=o+oTa(0d_{y=gw+`^yMJT*g3p$$LXv zx;+UWiCSY*{{@6cvtaMCG0d#7QRupF6}0&;BjPLNG36A6PYFHv$72SbTI>h5YQ?x+ zN);>ZPvWMfeh@z81F<>kMvpiMbMoTXVC}ct?6wP^XsLNJX=_u*fo~DK7cq{8|0lud zaz&{5bePsoI8W0o!eF@V1NoyEVX^nQ3a&n=itMQ~kam5*VqooNY{K32ZqIML7W5YP z#J!B+I&h?EO2A@5GOXpu7vvZ!k)#8_o)z&D3ICZT`SR z{u%S%mVb2gfEdpO=R15`CNo6w8615#6|(h2xyRFX!RI+05D>Kx_SjBiVnYY;rGKg5 z?;Ud@IP_Q0xwir8dmfVQzCFZqcMw`=WP*3`3M_o^19vY7##h<1=$ytYf*muv(aZib z{(5Kw+82J{ln)E=d!vfLcH(`gc)g8`6Kw(^CKiJCjbVzG^F5%AGr%x@867;?M7~(e zBZc)B>6Rg9IKV1FdJltZ;<`cUNG8pZI}hSZC&3-XFL>bReuRWrsu)m zyhIa?zN*6e(mZ^!AQl51lBjrJ5o|H{#N2BI5M^Kc&FrUXaA~ z-*vF&>@;}QdLLHD{UqP^nPd1EBc7N0LE!z=1iknf$91uAh}!so@4(k$Q^sdHOT^;2Q}!Xa!2;P%*0Ffq3k+_>>D{Zkt>n@<57t1x=!)_L$)zYX(b+To6pGQ6p@#)J1J zV$7G1f+c*GYvj$Vp~^iM z2Bdh7&1e?9-Q#?RAHd9^Oik1WUK_n+e6bRVK$pv}or~_UM>A;MrSZI>cFVPchHTy0ta<-aMd}!Gds=# zPm5MSc0di4Qz*qQy(c)hRTNZj2MA7RaJc^Pc>I;R=abrOb|ZIYY~%{Yd1*aOik+0=@IPX!xaOfx}P+ zjoUqnM6e4wPThnXFB350=zQEB_<(8^7vR}H0c7VKHS+yeE|t5^vnA4x!7JlnbUAzj z{>dqk>-C;&TLSMT6EuNbmL+^v59Q4LWVwPQ-pRN}hItmJz|32hz->-w!eln60u~Dx zsm#rADs2YZY%d^F!)ocihzi=DV9YJF>mgT&Gh=*f5FI0V&V>1QcHHlF2FlCey3tFx zn|%g~&(123D%JtjHYM_I;~cab;yr*UAl~ozUHuhzP}?hxbAlzf_aQM*ly(}OwfJV& zvJKFfSV{MJ8?xFV7f7cip8 z=2+xL2tAn8i$)(NK#%w>lJ4=ET>2$}>Z>hKWczz6Iw2AIHGE-r`59WRcn?lU@z=T_ zh+1gXpmdiuB%9V&Z1-D1=J%zME@1*peA1YJEE!>RuUeHbcR6vh zeu!*PCW`ZY$@wwUSk({-E~)A&##GnP=8CZlZ0f_brImPVdm+(u^aPtE2e9nf3k&*U z1a_BVAj0`T}oMB`8q=C`F524^WFjJ-u)yc#7VN+lJhrd~MvN&@Y3 zoNBRRB$|p(@#h(1Vz6NDLiDLU4L#>Pu-(g#$W9jLs<%w!W_OgLTi+wP?nndioM}qt z*qK5_N)kqS{e_MDa|C4RJ&T@K@xX>E!w2o3=>!PgNm#nBnUn+M5@ly79kmKntP@em0bSC4KjmICRk^WZ~) z2cMTSWsc3gj@gTNf7AB67P~!`K(XXOZ1wtys*7K+o;+9brB(!5Zn_4|K0^d7CW5C` z5GG$+50h(bVaqse&c`FN;{M5tFu?Ph1_HK#tTXTFi=T$g2kc4p=NMdbF0SmTiXvAr z-itLeOhZ;N9ab9M0GXUL^jRnk&2mNXU%C}HWzjo4{O=w(fgE>Bhpy0!0;`-J&*~tes&11($mxh1_o)-SHOxg>BW2F-TRg;HF~PlO%jvIy8KmK5KgtAJo0dQT|i z-I&j0qUQTbCT(I-m=S{wo_gF@??!w! zZ(YUi%L!nAem=)IigD>#pWuU;AK9a1!d&fp2+FQKxMAuV6#sD;6*c(T%TO7vZrcyD zPQ>8NpeUNWLJS|d3Sq};89cRTD_;Df%N>imPeYc6gNeu~Qq7+ecUIJb%AZmu!--|C zZ=A>Uj{Auv_A{8Bj-lXu!HALPpC$Fu7f^iYFw^vFJ+yyt=lo?Ixorm~a@q&{m@7^O z+)uN!`0GS4T#A@Ulz49Z^K+)?7I_Iw7JI?jPrl^ElsoL)Rt?g)CIBw3cu0TGr^Mpq zSrYqX9yQdm6NI|Q!seK8JgUZXVU*;V305B1t6IwrzqSA|%fob`MjZ&9|AW_!T}uV1`@RG=X+~-1*3j$ zEmkEXVD* zQ;0efGjW++Bqse8=Y|%9pi^%u&f9kZoYQP!lj2mkUb_khj&qng7(q?z!_atABi#9w zR#ECwjfHl>XxJFb9vzzwwdJGaVZa;u1UBKU$DY(?b~`2{%dnRQ{SeeyYV6rgS8QvC zXx5Dt`q(KLoRvTWh9`lKWGFn+NF;BSXMx5{JIEK2O=R#jO?#w zG%QaCDeHa^nFm<5u@2+i>sfI@z0T0l%(;su>I&^~4i zH2im#*(9+S2d`?vkx^}CulIGxGR`KGH8r@4D(`X2$xdkQd<*H7?@&8v87J}77hjDi zVamG<()q>!_Nhp7N>WvH*?Te6cd`b#Sxuzk)(yOpwvgL?Ukqd9vLQ{ngl^;tFy7i5 z*E`sNg*k`%=e@vtdVWPnmp%9dxTEHy&CD9NRp#b%OY!s+S4L>RHuJ`W&yQ@$L?RRh zV=_AlwyQD@6HS@b-zRWU|8=8er5#f-I{`8^$}z=55^r6y#O6AAu*_I%Uf#5TcfkEZ z(+%U8Lsl9@zV#Ko-TM}Mmw2HOpBo$HbHsW=5AngGNlebPU!>Ri6?y-20`tA)A<_vo zq@=S_(9hq~+BG(Kc^=}rT@z@>p?mnUDFkarEtmuO462HpgGHl1K zo>4XqB&sk|U3x&=eIrwDcNKMX62W*vB4)1if$Kt#1qZ4}(8`cf|3(8?Idlc;PRGMg zdl5MD9k&_j1z2DmkD0rpnXi4;_#*ZJyD+X7re}>%n{riV_KRiAdE>S4k>^8(ELK7F z8y+ydUV^!yHW9Bmw$b0W7Me$s1(s6yerhr`5eVq{=%W0IC$~6oCKGr;?U#UpzeI08t;;0e9HsDOj47sa5jeScV0NL zPL{b`ce$eBWHl`aoB*TUJO^L=AUmR|iaCV~@P$DT?ejOr(Acw>&d;vS-0ESChR4FN z;3MUn`TkL31I?;4qYIsE;I7RHu-JbRlMLU``C1o<{V`E)-E1FBQ+C6wfN$8fFB^79 zn7}|KpkiSn7L-Urr;0q0hC6uUt+{~5o3>wp^5D|jjHA}Rg42u*t%@xQM;8!PrZzHRyobG{yC z75&EHj@8Pwb9B%1bhuH{KrS~Y5|8dFoL+qu?%0t| z798~_i?hd|c%D6L&{Is${@F#{xKdbd#dA`0G`WGcJ(!qZgC0!=khkeOx<;2`sp@~Q z;@2GTQi{aho_e-qvOBI<-vJ+$S$K7zhVOU#qEpoQiYqp$RF7Q_XL!!wj9WMHs9p&c z+745dz-SgjKf&<(Ks;LZk9fEZ<7XW;JhxUD_9fMzFTba)KQk6Z*N;WB;V68XEg-kd z0|YZqZlHJ1q`>lj33TDRU8tbLfXQ$e+4(C}(3klL?M-^Yz%?K5Cl%69bqCCsEGWW4 zt0H_Ac7_nOTmm=aLAUb_#5QX&KUX#l(jV1h8D%G7 zX1Y!-8&oI*xXpe^v2H)wcWGU40ypN~e%^|whawKcH7iqgx3l>{~ zF}v0d?benN#p1JAk@=bZ;q^k`)R6{qk78g_UN!BxeUmL0IZ}}u`Vhnn|FJhJw83S6 zI(oMJ$M420Va?of43{p$B@fl{%QGANpQ7^&#QJ-~c&Lnw?A<~}5rufpeM(YE8k9;& zyI)(B1`TCJh*F}EQB(>I@|^ofgCtaFNW&;1l2Tgg|NX!1Rps%VbME`PKA(L;&&X3% zU759O9-e_G2Tui2)D}#&t%KCGGdMf+Jk$m$V8b3xf_*QN z3V(I}u|AYl7HXs2`?vT;CkLf2>_YAOR4`JVBU-lfGQwPh&WPqV@;+6!@eY%D&E&L=CQ_Cxp{3HtTsDYC=-=8v7Z===k3J>LZW~!|xi4wcX(tWCvf-WRKFn6%fs?#S zad+>1^j>BUZo~aq(COi_Ls_^uOJCgKq6#vecf}7ap0c*i^LWkZ zjQzNzotS=Hl54x%N8?}VWc`FNT(Z%Wh1MpZpO)Z7C{v=MzRzrz#15kNx|$evM_@vv z&=r{;M5=zC6?dJgC%0}~28$6#$g33>;dMj|Y|wlI14~k%a$Yv%thItg0{{2x?#Tk< zWV*mW3Wi-HIRZ!KPWaICG|~xxN-X$)c9?M@rTmEt4oUfb=!mw%ib{WxDR4%cjsqQ+t7UNaJqhN z8*CPOVP@xFkuI%t@YAeg_;La*C5W#*QZYHa9i*4|fa}>a?0T08lOHk%Y}Uo$0hI#W zJwpvH3ulz%S2^q%@qx_!wHp5pOJd6i)7Bru!-t!u&F#0i-h}Mm4qGPhKe+Ihpi|`=o#xS8Ax7sfAeJi2Q>Cv#SUth~owEtt9RCchp?@OUCSa1{d)L_U172N#& z4J&ssqm%9zLC^^&zNT;*HLIIK3y;l%u*(B!>yJ;k%Je5Wzv~NjYzQTR0~XmSBOVPCJg1TESSr1UJ0A|l8!c71+4-}0oWSk&coz*e9<{jQ zasiR~nh#e3^zd0>Al~Ubjy*e8vSjCheB44IhqFWATklv0-|Q5rxRQAibYxR=BJUW&o9 zPdAf+8(W3X#81}scNo_lwv=30=YlJ@u3$|A?}(m$Oa+?=HNmI#OeEhFjvt53M6Q?# z{Uha}G37C8=iNqYLs|GKVaQy1QvhG&VY|Q(-I6Z+n=RdNr(y=^xy8`8@=s}j`$3d< zo{t5aN^t$#M*40_6V2{O1x3$1y!IdA5Bla(&6X2z*l#os={d`W3ZH5Hp|`O{>yc>5 zzmYr&Hq&YIr&0f}t}G(c9#0;*2s5L!q4LaWnjGQ82Ub2{c7A$rSxp-4YsP~93^TkR zv`%15O{7!a%k#kf>%e*62>f>OI!3C#Vi)WuLQP^YhMs8?Jyeaw0mkjjYLSWH16{`p z$NKSA)p7O?w~cwkpg`gOHW0E7l#nxU2|fybo+@GpentUQ-_eA|8*QcbS1K{NycVYQ zXo7yK9?gw8C0?{60;k2rU`OUYSO@7Cv(jF8hiHMl-bs-0i^d;ALfC2FZJ3a>1IwFc zLaQ*RkKFnYJDT_6@7x!Vr?wO3ocavsXKZEZPiDg%iTAKW@Z0YCGl|+)4#z=XTT#75 z7iL-X!+e_*2)%zwn4iV-mnDi^!6bz|svUqk&RB63XcZjj!Gv2Fh@+BB>4RKre&U}A z-+9s$4<)W9-kTk$+lghO&X_iQGW-q9?YM1!;*TfjUz$Mw74$<~;eE2H`V0B+L7D5# zj6&Bp-^i8Ax5PUc#w+lyUy~wV3;AHd8Tq4sTt(u>A2=JZTgHStI5^jqf}3J8FmP z1)H+#P-4=+;*RU$$Duz?!V)7 zeh!1Rw%w@ESpgYtp9@oV52d?D3jXXHKvLV+(ylGWxQq7?t1mxrOSCUh=(7aP)$*h> zE}FDRq~TC!c`gAlqy|t0(SCgUWdd4!Q4@ML2XX2HH+cHQ4{{bbqx0}?_G?fUIV)o1TYLt_{Na zp+(T!Ecn$D(?zX~G2D0!P7K`-1jf-{&^t+u+jt<(Iu?z})wAJJhPHT%+;A3|uYq+Q61>G|J*+!$RlG~C1B;hB zk^yUai1pc(EXwPM;LuADJ)a?o&GvU;&JZP#G?T*06R$y5QkBo&Bg|T-FtM6NF*z(h z9hbbE1|2eCBG(OBxF9VWHHKbiWd2MR`z8p=cADYRNy8vReTisWvp+UWX@}p38^}2S zNq9_fa>U(Q4!zlS0)u83xR;0_;jS+(OHo4gp(`-6H501J){rMRXOfcgH0ZN%6FO&S z(b(Jw7sp1!$HYpKr>cs-=AI|}vIlZm{7Vv@{(x=lc9wP98-o=gK za$k?&)_*r}hXu(9evm zW^tdV2br$GcaNEFC-CKF;qSnO{DJ3M{y8}x>F)ycm}1Hw#a_dNS@ZeEM~Df##&WMq zGpX*}nP8fB93P2ndFCZ2tS_I;{%c9Ir&00DFl|2HeI%3{O)%u-m@c=u_t`#F_dRTg zv!DlLBv@B^A~WkY!qN~!p+7qcRvsEelb6^*n$bM?mZ1i3hb_X1wZ}p1v>QqVuaD^B z7-}vsWOkdkiZA>WZe`i3w0o$G-QBjEP@@qr9xj|Dvb>mr9<)OhB#Ao7oTP5+xf_Au^V*&D2m&7S{3)z*)8LYJO3W?Z$5ZABo zf)7q_$z;2i;**b)L^4Uun9w{3Y94%mV$(>RY$fnny6f?N{{mS3`v{0~=7TcX1xM$o z&|{kxiE>v`^glHhmo>T5*En7@eCJ&p@3la@Ci;eumwf_u=?R$N7E2ZwXu|5XZ|r^L zzOxfyiBMFP1u54@LXqYStR3z^rz-8hJ(agGR_ODNJ?9SBq`NW6vX}G+ZvgA5wzMEo zosJrt4OZ5Jsri8t8uciH{;LcoQ^PpE*fkkC)-Iy~vO1)$^*T_UEw>Cq*$b?PbP>8BI>+zVc1W0(;Z@`N1J{`YyXaMv{w(|rY_Y~`8z_ae5{IGs#1wrADh+n_kO9&RUZh7HA? ztOv*8@!-{1`(+U`q7HQLG&OFWq081M2l?Z7;g zx>rvoXKjYDpVoso^%~wty)54L)ePnG7DK`xJF(SjRrK3B9k+^(K&4U~Cc7EH-j~P4 z=PpKwOe8OhOcTZfFbQ_+$1n2R>joZO|GP+Gq8l{IRiLZGXDm8e!|wWKgGqNQTw1UY zq8biC;KZAd)}2P0JZ0hapHn#aho&&I-9~nFtI*Hk$6&&iwP?O{DDZ_v@M4$)owVXH zj5@uFuhn(Jc8r7Fe5{Bu$Z^{lR-7OZpO;9FoU1 z&fAL|1>WZ?$8Thws^C1x$wCYo2dh>YV~yWr%tIxRo2rHlBU6cgMgUAZy$Vm>n~PdM zH9&W2JNgQ53iaVnk>)s~zqT~IwRz0cmJQ?S*Du1g?RFr);5*SiI1x_S2>EoKOEAzd zlyr%XlY8r*W4-Pt(NGmh9KA&!)(z~!l+Oi_b>=W=C1n%Zb`-9K{$;0rK4-OG5R@(A zFwr9n{< z2#V##5JiQ%#4e+YJfF7;#Lvue_5?S+_pLH%@)<}C9n!$p^#$%3(@t`hRzhUUVo9k^aO-5RqLmT`uwX@m$oy+5J_|k!dc$tBYkOYf{QyMiRWUV;p+fA9Tb7xX+3zu zWDT_M=_UU?5O^5DQ7mD38R}U+gx#Cxp|+TV6s!0d-Gf`jPZfT!dh_+< z%Y;qnly{b$IpoL;JfF z2#%BEJ(tpPw0tC3SL~*CM#fZLUSI^?)#qP6rs1a*v!Q(aMdGpeAT~;`!Q0kx2EHzD6};~CVz;X+$f2erc%N#^ zlXGse!{dx$aL@{Tbkv!BOR z>5G&0uy}DNR&l{a{jdX4-X6sl+cF?t%}R)z4$%_F(P;_*~P?*lDelSdCWujV(KeL*x~3%#kl4b#_|^47~UL0g#3 zJTQz$v!}s){|bNJnthObczqsycGbd4+mNEHsf$1|bQ@kxy@)c02U3N;I3l^|E)IWn z60O`av8SpZv)XQnJIOBuBlS9;4i}D^Pn&g&vTgWaRQO zypvk+pDTCK5p~_bL|&-c`UJ09hlrMIB{H#)$GUR-6@EAkFs)ex58I4+O=$?+44Dnp zehh6zr!?1RNEaJyIoJ+;#Gz|e)%QT`ltpXx#94cW#u zRBAEn)DL(+*8s)7ONnh#@(qf}F zRDX7iAGuTkS#Mv#=Apqj$$cI7nzIy2BWn`#gl64C;Ag5&^FK|dGx$R4YhsIs zcf`=x00-(Ev=yVZjc>|yOMq+&00lYtA^HdUw1Lj-Dnjkb$wYPYZ+L#{BHnMh4&{zd!E898D+1-HhlvJ1JbMHW@h<@b z*(jR5Pv{bc9O5o6$FcW%3g1z_SyZ#Ofpp!?feB&pFy@{s_lR~8`wwnss#|5r`>Wre z(9Fc^U4!)b}p@y{$v#e&?h8ta8#&w47G?9tYDow?XqrHq6yo!pv3=C&P6@ z>DHaGbX0pEl+IFzq!vd}uCosPs=pW;FaKcc!y_@o5HPl7HSsdCfRqn}50;$C{YDBs z`w+y9N$KFZ@+=FQ90fVQHRze}B;Z-~WZ8E;8Xpjhs|NJOWgzL1QreU z(@R*|pD=2mFrDfQ=@QAebdb8VH=wEz1R-(@dCoakKEEIgQYYe&#EYrP@J=irj<)O|7tE^*X-z z<}8ePV8K_KV)l`w{_)g+$ zMext8NV}QeD5*Gln0aT;=7E{&!gtCsEGQ0uw`;dBEo{T2#tgiBD^1`p?}u4`ikSNc z3A*ggA@G-vU^*9K&{TR5&NL9ZIgU{i6-p3PM)&UApN-+?{=iKKGOzrPbmb<{Znx7 z`9`$T_k?>EC3t40HA%Bj;04l|xO<`%ahrP`cRD;HvX6D}R^CKVH13CmDu2n}%JV{| z?lPKnj)T1=+WhfHk?_7Jkm3Ck<9El3szoBaH{}kb;Sa1oN<<%odFuezmyr5Xi!6Ly z#$KqV;o0;NG^x}L_GJyh8`Cy}QSM^;xqm44$x!4r5JMI|3&Osr-Do&Jg+~>w;%(-0 zxaa9s=bsgZQ*!vE+C1$(M_ zK883eoMq!!8*%ZHg%{hxae2pP!Izr{mUsNXM(A_KM!m(Q8HTu^vy)|fe}csVyWd3n zGtSVnV%O6%V8ZE-OsiiL&HL4HMCMyq)K`za7ZR{iH3zdh#$s*l9Cqb<6nWBFLUK1M zfKpQ$nirbl4nsZ6%*usQFE0%8IR)KrVdPz{D%|-c#X~Enf{D=kxvIE`t=h2!?l?z? zd!;I2?J!k-`#NJr4qJu$!+tc9eM}xz4u-t@IvDvmRTL^S3Y-sZq;*;y;E^5+KJS_Z zChkEJHpzy{|9C{4$Jt|~aQ+TW^I{pb4n#H2j{ntDCKF1^!AB(%=}TLFXXscs|2h@! z9E-$nV_urpT8^()HSCK%N+2)xuM)$ zxR+esu|ni|ln72kHKEIR4p+5)!WQ`qG%?-67xmo2S<6J&{qiT-jt4a55~5K z*T^3iB|2C}6Qq6*q%VH$E5ZRYp#QnB=XXsORZkg4LzCChny@r5D$|9Y`A(24afm$| z9zi7IB0$dR1dO|}2p^c=5N0%YaGPyB-j(=>bqT^8UgZvsJMsc|$E*hBNDID0U;><& zv4Or^rAe(9>rwZKJLmwf1X%qs5y%%xmzi9q&)^Vf{q%yCC(&459}U^-j9~6P7g}%c zjAPkf(3Krb=a+nk<3HV5K*L%TEq(?{24&*>^eX%G0ZZYgXE5HAE(V`nNnluV8)aM4 znPRUwAM|n?%qSCh7_9;u)X#*+32!CaH7c!{SiE{SOgX#=N1j*11=6#@FgqP(W;DP|m6NDbwuzn4ozK*q?yv<8PVmA_8Lfj_ z*r3QxvgWVAZ;4mLx}ClxOd}BwN(v6?kVMgyNdhl*y8(9UOhk*h$3geW048hBVbKl; z+&{*a2Mi7>>X0?Vuk{1i_n>g-yf+7R^W1=MiYLQUUD-F=t2m1|!g$3E!hT^JdM)g* z4?I7LKJ=Xdw+AJm)AayyPjLe8E7E}v3Vkf|fWYr6O(X@b_H2jI5ZHUF5@TZ1+1-jk z)OGuD{A&(;Z2UPa)S1A2yTYl=_orYp=DoFY z30J+efzc%^=-X|2wCrdpxy;9Lk(4J3QFGxX$)jL*aSYtBc#Y2d7Sw0HVduFgd7+jq z{^(+kV@$^J^{>M5b3(j$UtS%!`$+OrmY?v+g5j`9bvjJ(HAQ|Y3}us4`Nx|jL_Kjf zU8VUQ0?k|S%Eaez?VKi;`dChUQa8e=%0|+fHVsM!Nb{Wi@4t)~QQ3AjH=`yhWW6GcSOR?WKOIgl81v>AakeR!;8UNl; z6TKY0M7*Tz7aFy`Vm%Uv#C^N7u)6C$Y|V=j2U$I|_c@WoW}E#aVVevf+cuhX$J%hS z_m|?|94TDcS6fcdPgck7$cy#+D{_*Y$7CFp@m+K_K1TzOz znjD7~GL3kbCbAWoDbQ9t9!47Gpxo+CI1#jl419YHrg)m;Bc(SuZwUk6fdi=3nNWOO zSBnl_DWq?87j$lI#Qhb$WI@Xbj5zWYiuR4iN81OXaf%Y(bqk@V<}k~CH6FK*HRm6z zUa_1e4|?jKCjV%b&Xz_HIHMi|d-l$tLH#Z`z1tqncAN&>bq{P$y#~X@i2J3N`WcYHDZK6H#iDXdJS-^M3>H5AUG%djCf;REbf2jhpX3arU^=G;LYxJOgdMY zzpCEDE^2L}sk@DY@54A4F#RB&`Mr^JoC>FZZajnOq9(BQyom369}=yVBk0SmHuhsj zNzz)iLG)1H5`5rg!rRph!9XS*PEMN2ibdo3d_%;txEkF3eJiGTUuP|5vh>na2fEGX z2&dPNVeIrO%ou4#T1T80tF-Ho9|~`73I!+B+at^fURVlfo@l)A{DN2+w}Gb6AN46T zVkZkP;;W_O!PIU9{IZk7tiXNbyX^}6a8!5?-hL_Ul3%06$QLBLYMy9iM<{qzH{%B5 za@4qF!(U$s#%+Jp=+Yl;KobUVKWl+)K5aW*x9no7PKSlxnM>r~&^IJO?-G2@z08{D zc9Ii{IXG9CA@6JV;4fwC+0ZW^g`Zgn-{fXk#4pd62(U;(#=dFXqj#h9J;&DcZ7W;k2lwAnd!)99v3IXMC|7bJ1owvcqs@u8r zGf8sZ@jBQ=+{YJ#3Ny_i3@;?V` z!RCAd^tAtjA$Sd2=AFgy^K(kY>Qtn4u7J zYrIHy+eQd0+$^>09^5(kUoZ?Jfb z4UZiEmPL4{2%S?M%7VT!&);L<^HmojS^ow$IV9OHTa?T623yg}B^RJZOPNpd7|*p9 zaKS5742x`TG4&-^*@!49TCcYNVpkc^s9*ZDJ7hVHpD+UE3Vl0^VVmf@avQ4U(MqBf zgx{^GaQyQnn3n4;hK9;#*mCF)1}CWUmVgpCD(uCd_?(39m~3*nRE)*ybHr0#nbMP+ zzQQvbGydu70D*0wf&FG7*mPBJ=hP1(&udIx$zT9Eog!>cW;8X-dP-q&9G&e z6sD@{@MT?Uj6I!!%e`XYQn4&*r~0B*^H3a^TS%5=dT^PF#Vq>aZSeX2jV$|kjY#je zgxk`gSUz5fFV&A`8)Rm%DB&|*Hyqit&~(^3DIPuuZw&8*6g*d-1iRZ>aF=T@k>Bl% zKO{3?;)n4t^_w?*ayNvfxGI;ldkUxK%N~Wx=gUsi1iSM^T?0Z%suKAV=6_c9qwr4Czl@{PQ zt!qNITA0Uly(A$QZm<`lV`0zzU*wnfGdrpNb{J6F4n98u#Gx@)!K3vS+-;GcyI*d&OFDPvg7zdzZn>6@B}RCg*k_2vG{mq4&;AvrBAab zQeB@I=9>XHDa=<|y9$m5LlJU}f^Mvoru53O#uM)~mcNSyN*>RBD8-jQK z+rf@L4+iHc^YDPGGB%G9?p{L9K;hC5}W(f@6 ztBrwG*U0gku{iB^57Sy2LV8-B!Y#6#`HV253B^+Iy;7Af@)=4$wj_)0dW{5)rV4iZ z-vpX^^$Sv$bm6-^6Qzx*$-@UuYm_#(g%7Vfr`1Am>j%_*BW^*8_*Sx$`63yImc$+!cfw)I3;dbsBN@ zayZ(3nnYd+g*Qut_nqD|aMO7L=bMJ`sn0tx@SX)apSdHdIYRiTeRX&nnp zkfUyVJRH8a9DI-b1x-1jQ+cls%D0%&GvED*YNa7JIx&pujXsSq>@-Sm*5@z(gz)^y zGIZ*ZTDGwNGOks0+ zgdp~3k2xw6MIP*1g+Y2M*k#>M%ByoQAb2J25BK1oc8ST5|MFml!CzMAC^&;|-$4>N z3MM~HgNY&ys`rn9-v=4-(gOoP!rYop&bcmLEXu%Z>)wIXsBJJ?^%5k%?!fTN(k$38 z4-6knLZ3OOU}B^nSNqoQIHBp=)M@#J%GbgE^;97ni z^&EqV`89nUSX{tfMb4v(tGmGY6UB#~z7Q+*2h^TkK!u6F*r_dJ(R`r}pAx!Oe9|F< z1dmH5SMP1a?*CkQa?E|w5E=oA5u4cA0XOhsLkAxJ+Kw8FGDUO$)uC6i40;DQ2rjaH zc+UC|zNzcQ^9Pa`Z4&ZfX|9-PsfHJ>C5pUt64}1Q%}|^2QE*IzV&g0gKFjt!WWO_E zYlk{>?x@Kd(u#=Zplq?m;SY?CDFUs>14+c!?M!TTOtkjoGf2-;6~DZF0#hbUh4R({ zCi|fu?;X1X)BCNV_|bMWJw1~iGZuDa2b=KCS{*)mXAq1r%*7!WlGx=7oSP%s7&4!7;T`)Gc0V7Vj(BdZ{;5zviIr%PMykB0P+XyVF1ifV57nB2L z;i~lUh)^2X`5X6T51|G%b7`l4I9*n!%3mhlz`wK4k%~A4IPqDGcF_UE;g32$wuIx` z4GJ)=Xe@7Iby%!7k7ksgrf*K31NQm=4#&=uT;8=r;0{gM7|CC71U^e?HHwwH{ zoG|cIGTS%#9;!SaLQ00+g!YY=n75vziSrJi7Ci_J)o|W486x+Tu|Vq>bnO0*{EKsi z-^cTbw|6g$zL+Kc)-n}8j2aF?5E1**=0c-usW@q}0i!)%(CczPZuyY_jk=P!+pYq9 z0(yvX`Z;#LUJV{qsKAl?pP9L80$$BN1?RUYQr-E%_}Sni`S*P=dc+GHxndbI^Q=Y5$m&xxY?BN zi=%JfeY-=-!Dh zV}+hoW4e7vy9OT6KZIM`C29P@e|Sb(Am~gpqD{9$A$g(LcMk&@GLh1~;+h zb;anep~E<}>_)JjN*F__#HlbJkNBki%9Y?Nm!pzi6O#LfI z{YWRyuhQe)_9p~)av`ZJ*@*`%tq|Mha!HwRo~^hV9`$(Qq3zFz;>M3KGsA@rHatXi z+Xr#S8K$^ui66c?bsUJ#O#0Pg6`8Q)G^bHg==83ftl2)09_?943<6G*jh+9&uVL93 zbM6-yyng`0UXH@V+H5>;Wz6gJjhJ!99~M4Z2iq&P`JMPeR`0))748aTruw(h&iWc$VDBsBK*D5OW9OoFAV%*DO?Z1HBo^rwa z5I|cNrQ^#VDtzL`$Efc88$zv0aE5XR8>=hNwagsxkj#A;b;Oj97FVzpTGxTTj{{L* z0gerFVzSwPaZ=hKm}0*gZYf=0&#q)+5~8ToeG%(#pNfU5b>Q6l4op^as4p|2#x>gT zB1w~s4YPspKO@=N=jH-a<}k`7hr=qTFPQb=5k@B+!*vFN*K5}URP74E%A}{P_V0U; zdVLFT{L-T;!jID8$03vxDKo8E8^J268b*G3$$o5lK#~r(;ONB9q^O3*cZplMV7E)zXCYU35Q9)Qbbn;Zi9V$waD)36L@5H3~6H+CZ1Mc@xyGGV(}f= z9PI$6!9sX>@iqvEeFtYIH^R)X{rIeW316333R)xrOXBOKGgcl-hJ zHn5+(85l~{CiFo_g9()g7(#QbN7Jvjy~J;Q`ti)4v0(q^6P()@3nO%XV$1CfxLmGB z;Oj6lPDzOg-2gH|B9$C4T0;K(C}WFrguR;f2)Gki3XNkflCx7a@!LQRu9ozjOpYjk zbd?I!&^Dxo2Xesb@*8Z{tOnwkBz}BX32c@<5dD+Y>elj6h7>QPBjnOc&L}SXQbe$ zMH1}DR9VjE4o0u%!ffI6U`SV*%l4G@iT;~55NyWXhgH2g)I6&l3hqpRYx4wm&EsLj zWYtP;Uv!eqGWm}lQ#?&yZM;fT22Z8K(+=R4C&He=`$XmGg&d}bw)LUNbX4L^r9`h$4#>2TP}t>E!q4vh!(sGjN!x_VzA z%}RI)b9N4cx6|(9&B8@M3X{M-w}{LhOwe*Pp7c8pT7cect?Cy4~uuFgyO{DIdJ!51ek~Ckt)@(jJAD3`@%_lQ*R!-cu~l) zo98p7sdXrK+L%=eyX$!C3eu#pAN#k2;uZHq3?HG%o=j~=lv#p3?j>M0?iY@-x8jEn zD`3IG2rMhu0|B>M;9F-QSz~U+Rx~ZgwK*E}>arv3pyfd7yhq4Dy;A@^tH~hkWD1M+ z)sRBL!MCb#8cKY802Q8=D3*1m=~pJv!Isu^rQt3XdF2!gwN-}w4M*9l1z%W!I2&K9 z)WOOl-dwHhnkdgHm92N@_)j#2T`yB+I;!h%_=(?`b!H-WdSM2RGWW54pA5vB^obTa zMWDsRvGnfMne^rJv$Uf73k0-YE~>_<+Byci>(BlQ=0Xo{Sj%4!Zv&gMH#fcFE~299eh+j7Imee* zt@=u|efJvA2W}T%G_5DL`d4o5G?nEM!kqexM>UMyV+NsnlX%a#3^>Unl&&2os1>(y2uH0i_K}`_`}hd6eocr3x5=J%jbz+UI@Xh$JXH0?blFTn1$=q=i?5EdRQKn zi!YN$;#@UP`%mt5==&y86fNN@x)Hk;@4k5e%N}}CrA{G-ct0QFP8q_zgB2k8w3S6J z&_{7sDRe)|XT<^EApXumYWHv_S~)3TuH!2**T^5f=Y7WDE!M>QuP5x8^9kt%X)2Dq z2x{-$!7M2lrxv>5k+iLNa_k7aeq4vf+UA3?O?h)b8|eN< z_SnmbUDYPM6+4rw@t)&gI{BbgT`$?{W432gD<78qU>helujv;SH|iDmLX@?gRVXj-v~ zVq6*IZdr+Gm+j%c$~4y8A^{gJ1ff-c8%!7wjJdP(FzfMIn6F(2+vm-K1gjfNE0AD7 zbOhRtk>wpr6=6wEGBKWbpDAw32Rr}MgnDIy>z-INzBO3nuZ7YV|tH2 z%(NH$p}z(0S$8+;8OU+np-=GT=6LpVjW25NRpGabnovGXlK+015AFV4WN7nU_9gZY z9+)?hhkxsbsbgP}D^IRKWYsZrYAita%QI;A4=EO<`H59qUIMw)H>lg5L00;`f~v2@ z?CUZS^Z)lx+^h5sB#t%0hw?5m$8)Sm{$wmVY#lC~Z*%bLp&*>wR3UmeJ_nj~Zey8w z1zr~R%_~vpBwQUSzO_#shi7hvlaI4$V{|&iepmNA3jK|uG7}T5UYIR%<pA7oTJm)SjN1^N!VOsL<#^ zcA~7!2VrK7Ir!O*M3ec(G&=V-oEmThyf4W^FPnktXH}u=WIPTvQ>MmjIL;bX2*YBZ zv&}=i$W5OB&^B02wH`ItRSR{EB%e=EWRS~Fz1vDpys%?a{=7!%bWQQX5+}5p?hCiK z*w8QX1)^Ko_0V-@ev!Yxn~3Va3wOFFgK@Qp%MdMr!` zZD*}JuEUl27g+fF7dUE!h(A+)50cZLh$K8dvy25FapPG>vSq3oGoGpn9RyG8}4 zKmUlPfhEkty_$6|`6yPnatb@Vs>CnOxruL-s6xe`K-jsb34IQKf=!3L@Yx*``+naw z7(P1~^z5FBUPqrnN3TQRF!C*%kpBUa1@7{*`O!iq=<2{#XFAX5akXxEIzjgqh=<9)A*wp_*0o54e}yu3LFT+J|rQMHJG|Oo7{Lg8roZW@Qb+cJtoLA-IG8aFB*hx>P3!%1zC(&D1Q68x2&JpZ0!!rr?Y+KEJ^C{y!X6p?6>M)R05g^Z~b5=wFQ+7(HqN#7(%8daJHl?HjA z-}|r2h3nusd$0BR-1oxWFCn7&I=W8f$gcep&kpJ&>-UU8$%fI)f80GLA1}|dh3Cic z_AFtiycPUPIDQqyVSwBZ^8Hx|art(d%~D^@2G10MTjFa{_sNwRea-+;TOj$n)>Smr zHxoTe~47ji$8GF0Z zMX7GBB9jz-N~~YPqb@DFZhbnID(caWdqe4<@PY9CRX1*o8U-U3YQn&V3{c-O0Cltf ziQEc=GtA>R7_#OfjGH3l*4h;KT5V%^cDV#x;%1_;>I)ol;R}m+oNlYPtD2n`=`-Bj z39g9~AZYPmvZ7oA*66iB`X^5kO2+S>R9$+pDk3+3l;o#ChXP{5)VUuJ)Eiljv7)a!)fn^!-Y*mA2ws`E_{Q zQ-U{Jzs8e!UZ|jY6w;3{_#{yz8l@JEatn=du-W$fi6{^kjxfu ze@Ws#t%umNH`vpdF)+5SfgE?*06JzdP`CCP?h2oS1HV1R?S;q5>L-ISV$DBvmYNP< zBArmRB^6A2@^O}29~--FFKE}z7i~Fd#TF0xh-n?MAa9XRJg2_HFC+WdyQD~LR1scB zhvCpXW*fveSwUPwHw+hgE#vb{aBHO{tlq1{BbUXojbJVZM<)6(b)E0smoJNxR`h&`Wf}Q6YddNlL&Hu*(+2! z&rzaN8Io*7)*$X7OSeyhlv+<;K-<7#O|In`Hj8od$|EFyZDRceoYkJuPi0g1P+8^&T$eP zeFM`I4Y0509GiOSJ~0@cNj|Kt5gY4R(~Y$b_JI8pqHRi~UHM=rS?dBg57?UK4Fm%Vw7?P4UjXt59@W6%_B+39NV%-c=!w&9Zr- z{52!^e?>FkX={qmojU^GBX!W+{yo$9TWEV`Wdm9DZaDtCqk{t-1z&6NQogw$Nz`dS zjrvd9&oT`4c!w^sjp#_P|4zZwp(DTs(`^?&LYP|fPaIG+8Fi*rp{mhXRFYsgZ}~Uy zn&(Zw9!Ui=vjVI&>c>}62OxfVBa;%?K^_BV({90d2i2ICD!I-qke`gUeTZ`PUO0Vpg!pLqDVH#h2Kd zvIiU7M^LX%AEEMAE^OShPo$)IN1WpRO6dB&6YUP%NW&I4pu_Go{1@4ZXLjvIXQLCu z(`q-ck`riHv zVgr4-`#4Z zA)at<^#m-D`zbE|D0Dw~JN$dO8T@Y+lLN{xKuYc<**8eoDTTM7zNI=wODS=cfti>t zQsakJ6#41v&b(Fg02+-t!908C@eLo%izUWhT zJA{DCDJT~eLr+&m3%frx+9#7n2E95+I*IUIo}}c%zjTyqpG3raD!_iB!0H$_`{voML$Osm2}kc zw&PaTGkBZc3Z61xDE~M!0(Uwq;j^00Bp_%k46l1mtOqTjnbPB!VV*ACZV`nM^B)l_ z{c_kK8%ah7RK<&8jj1a!tquD!*5bP9E=KsSy5j|*CRzbF{TG{?>r?V z*T^OWW94lU&MH~VqK79(2QuEf8(&*e(*tC(v*Dk?pEhu13x@aptI zyn2T|ZPoY1112Bw+9?Oz7V8IJS6;{BnS-!-Z4nfvhqIpU!zeZ;eEj)D$lG5;Z)-0> z^~Gt#A#)5E49^L`s_3WzWud{(Nb4dK3+%hGpUrF5O_Xb~~;$sAkTI42}CobJ=_9^vw-1_+H5uPjLMOYDvYA z^6oBK)Avo}cUPMv&L-e=>lLc57kI$^)_CcrH(lrH%nnbyLgef#F?0J3+)E!ZyT2jK z-bj_k^h~66vvkGu*G{~B%78y4S)kVDM71pPsN;qE5VO<_I-8~FkNHaU?~m0`nh*(2 z-GA^}g*}U}n?*0V=YhuNe2ky7hST`H=o*nqA7{9epyLOyYx#1RJGK+&pGv0+8iTM= ze>`93rG>H!+eucF;O%Qu=QpSKvZ9$YU~{7(jfj~@Z|$kU*p(?F)3=7A1CIj)hSUv= zZvO*A#mDenwim&u*OUL=y$$^y2$@>$kUPc->W_B;hTn&`DxUO&M+Tc>+%x(2$?s_K zZ#tE|{tSMO`hrFk9}(JXK)F|*DvlEoh0r|WcUzVQl?6jrlRWn3@5A+zbZk$^rh=ic z$KAf=2PRgTwqm(GtF21*UN8r3gghV)%SV560xQobzBE!`o|vUv7B?q9M$dOV=j^uW7 z!62P=3lNX5h7h*TA>cp3geDl5Y||9_N;U4qe{Zh7%#@`chU0W}rP> z*!VmA!w}_iQMc<`SU+<*3?Yed?pYf{{uS0`mlJX4G9nr_5F&m>!c-*&mUA-Uy8l`B zZErJkxg18a_Md_2X8>0B)I(-#1<~s)2D4Hv_;W0e9S^I*=CSwL5ff9;%^glIZ&0DL zb_sm5L-nW`av1`IxyoxB11S0WLEx<2U_!}>_504lxu-U<^gHL6MX@Oy@UO%fzfQsO z40TkuQp8QM-tbY#1m^fBK;UdWl(y60ier4h@W~2%9}*$#DuSrxh9*d8I06HVD!7%B zG5)*(wUY;Y_=@rH8FuaP1plSR5XaRhMmU;A)iFf>C#lk+yMt~D8o+&H$v|0 zXgGgTjjz`p40*0!h(z!RFt(GYuXla}vrFx)YS>ix(yvF8rx(HKPJsg^cNqr`*2LjW zn$#<&6m*{oceRl+OtEf1#C2$3Sw}s#Ka}GGpI4IJ05fP&c?Z|?Cef3AuRuoVTn{}M z#Aiz@h{(bHWbjyl(YoJ&Vr&quY)M0tzGQB%t-y1WZ^F$*x?DSc5H9*4vW zqbaIH{3aEXgwe-fe;Dy95>*36@k8GvAkAKtZyRGnV|(S!#TX`|nuJ%C zz7vz-NAO4W2wMF7DSA(^!jTgr>Cv8*MCs&aklvAjA3X|j=W=%z^6vx6s2byrmOrF1 zKTsUJwUfwv`+<|6IFV5GA8hSHP4w4K5XEd9%hw3^jI6rHcDTqKk$tVLbs=1JP(2R4a2SW3 zRpVY;{P~xS1(?=k#rwvo^Q%6IV7$K;9-Q^ZXXDl3&DR(hH824cPP~UNlZJ!zS}DFQ zU=G(>tiXTg9)jcdx8ulLOZW-!<6B7`Dj$l0(AmWpS^pVggLcDuAxEFTJAo{?(gx#H zE<&|SD;|x$4H`{0uvnO5?|G2UWTgqc|H2v_J$B&i#w@U}v!)lx2{P@CIcbj%#dF^~ z*qlk8Brrt^?grGck76yc`p;kNQBVnc@+cc;X$w3cw_+S+_<=0`d4$v!B$2-VB&q#H zb!t6wJFWDc!TKtV$gfa0)~5PPtRirwxBsgqX>}dMVC)nsF~b+6z8b+I?{nmmptQw=-H%f>a}kl&NpNXxC@KXNdAsyG*H z*3H5UM>jb3d=mA%v>7ec+wjw&*BgV%@vM{2|#lFW_%lolV*_V!u zE;+3Hq8(a74osV0iWL`@pyyxCl9Ddql&kXaD$nf%1DboQbF_-%VC?iei=Wokr$NwXb4`$v<<)YZeF-tky8IS$Xv zE@ci6Rm2B7d+^zF2mU%KiP(Dz3`#=}d{Mrce;F`C++LFjJLv%WL-#mXWq8r>!~~kL zAYI_U++%a)2hmQIl^_bA37NUt_+`HbY#sa-KWttAr(Qo5{S7~j({{dLJ40P?dVoCD zD4Nf%Ut5J9Q|+3G;e^H9vPq7#t2FI9bXG-9l5z45L(LJosy^jFS^6 zj-MoUn*BtiyP4R- zqeo4{`+I}YFJY!9gSV9M2Kv z!Un<1@>gur=_SCPrJ$d>KL5}D0{V^4hqNKj$&xFQ=yty!Pc3~5zSSareNO>89+?I~ z2d0v8)d<@8Tbo|%N@OW1*KnHq4_rIf6W)iY^Qd$M$~zuF(eGvqRIs8^`NHeoDewh^ z=U?;0Nm%}MJ2<`Wg4(}{sO7E29y*+4%C{!aj1FbqDw~IcQiRAzjSThgn?PS5ZXwH7 z{U9zg|FXmFEn+($Lv!=b?4p${8=!s__fI*9C*_uLqa$6!M|gjo(U68e zLL1O1Z~|X3-Ic_4R*JfF8c;&DO7uIu8rNRwh2TOn=&vcnQ?k==;=XfaG|14FW8G+J zo`aA7wBQs|;Tif*2}{Px!k}da==kFqDe}%Inl6_x@#k1r?ROlS{s2Gw>MyuCd|-}i zWH`8OB$qE$i8Bp`!;v@7MVC?p&YxR7o)9{@(zEBHgmNMa!*|5m`z6TktiZfszF_{l z1lNCmB)2G|GSf$nTlf6fpN8n(mMSA)3GiBL9h`z6>tRg8Ia z)4O*6^vkkFB(bQr$4BRe*yN3+KZ(r@vO!hYUcVUiLD`PO)nqEFBT9y_itVSD9<3bvFc5DY|ychaH=clsrve|52N*r8F zn1=TZMq$k`Z#bZG7rW?bqIy+|4H16k?!88E+i5(6YNP1Djt;ckf0UGhU+E{fuH!9QZ##fH7_30cpfTWVosVW!=4_(J9~k~Q z7Lq=dLBKY5RC(_!zIn9?THnNx!!tMHMDxv99H0ZsliI+mES=1)rfk;}8?sa)NnEqC zhs_%(_{4siK%Vp^w5=p;n2$F0%oTEMpLF@b6lpq2`wsSLj$w+U``Fnf@%XwW0;Z+; z;J=Vay!#@SLC0DW^>-KZX;Y#>z5_7#j5pRD*aP`<`bDzuqH$HU7?%w_#7B(q{DeRTtws8yG5uFim6H#ZWK%uW_?;vAdyLX|#SrAbys_d?*6 zpOC+x6mD3_;jz^5;CHEt|&V*|9gkxlNU4FO0@OA+K|3v^Gz!-GE)&vasUZf2hL$il>ji#|$R^ zg~CND5W651CyZ2PBX2Cl|7zl~_(22SvQ!51ye{Yt4uu747Lc+sBfiH^gyp+0k&RMP zbo448yn6B;q)KuuxUB_aYoC%Ek5<5loljBA<|)%k(WX_Q^3d843!}Fgz~(t|Z2Q49 zRB5>nDc^Odm)!$)V*4U|E_DQRJ=PG*(_`TeI|+QV2b51YL({nxq(4;?O&Wx({v?0# zbr)mXDGw#6e19(durx!fU6JgN;bU}4&BeJJTXANe1bSy&6!>5Q-=kwK3U6s*oR@$) zTyU>Eh=eNL2;6sBkKX<$bjqK+f#oNHNLjf**6oxA8@)kb;~bCOOr9?vewQdMok|>( zRp^)Y$KuYwgE0EMH4J#F1RqvCWGlA(M2+K~7{28d^0%p&qA?SaQ{#!l(hhcc`#F$k zzX(f>7O}nzH}ZPVTX=GQFIl+y9SG|y2v|e$yT)_Svb7*bD_Vu_^i^oKt`rT`?PJk_ z@#N&+U_5nB%nZI=C7IQkB;i|#(7j5-jmGBS{pS(+#HPYrD$O_U3d7_p|B*Lm2S8?C z9QHb|$Kc#x(E1+FMALdms%`_wABe%b=K0{*Xa(mgd~u%6aLf;u=I>UY2Az}>_^j|2 z?0ESf#(r~xv=VoiGhrS(7gLJe4|<4H?*YtJn}lX>DT&eBOL`<~$wR~W@X1Ra!&;o+ z$oX!Tm7fE*JcSOw<}qaTN;tZ67&#WKcqL5_vej9%av$!MOb$fO`Z7Mo<9rOP@=Q%;>>k&iPnL zTgW!KE-03*g86HXVWHw-$e-+qFe^40sC;tQ(#?-LJ{3s zL94r-)pVW0CRZae)p7vO=xm1bH9pk&X%P{5?}wUsB5u0hhPq}X(bE1K)Sb42{_8Vv zM)2Um3xT(DwGCH!MS<&PYo7Mzb$7Tb-o=G6%z9^hsQ4Gpht$-PnFo7TY3pB;^x zc)l=KS%i%V#*nSz2*sl{;nO~bUboG0i|+^&`8B{8iPOSfM{vIW1D-qcF|1s^1xMB? z&@VZPe9j?jE*W|p=S$1N`)>77)cwQ23JnJ1YgHI zP`&aRZQZ=6nW71;S*nD04d1i$v9T!9i36`GnxN9Z8m0!i@~6?O__1G30H5@@#fD61 zom47{`gBS7{3n=_a8ayk)(vFVQ}%4JHl8&c!WXQ&E;txN==_(CaNzMQP<-AidNg03 z4~(sXXCsWM;|LQ_===jA{hJ9l4Hmw>InF+x1REMIliIN*%zJ4x{w^!ROPRrNHroKE z=-q+%s&3YkwSh}m#iOfdHdqhK1jm|E+#Hm};{EGcUA+SM4)G--yEu4txt!I+H4u}z z@!(u-pr#T3NEUCg!AJ~aeQta%#pTahQkZRo(?~;S7iwXReE8F?*NF(z6F2A{w7;@ zYz0N-9NW${L+RfSHEi6JSES45FpT(b9JJa?^V^ym@b~Yt?9|vB%xLUR9JZ?ncGmTf zz};Tt)v|AFfx{S>@hgc~U-iN60e(2q=_sD-YsJVScP3@sC-~f-6BpG^_H()&j{DPX zTM`g~Lq>(cqid(}i=#7!JW#{)eSb*zn0QE9V9)*9t`RpSH#%`jI89v>&R@S6#ve^d zctC!v-_INe)u8yK@Ou0(#8&<93H}E^Syr2+)1tk(tJ||c4#5)D*TAXWj;{(WDhPc(B@%k z1!%9=!+L%kqCNw?vPC8s|H2!!)dwj;LKRq^6(x=d^uJ7uILmhU&Xs1uxKh28D*gbXwdXxnB_71R zlZL^bH!q=J?K1kLelKY?4g{M-Gd`=pocp>A=N`-Ef@y*#U!C{@J$=V;$*Vj0M(@|S zWKIp{E9F9vq5{8Do4`9SNbsHbf$`>{SmHI5I}BdUJOAo{d`vvds77*E=qA6_H3SW< z7)()EN2J2f<5c^R;NGpkJ>GnRyirqe#yB;uR()Pv7))?o=07rUY&SS8Tn(ayI_MlV ziLTcC40i=v_M|)&@s@8V$Pg(D+MF<$7ORfKlPmQ3AJIvSICz>_8k$p&6S>4dVH1nd z{DNI6A6WRpJPdYjB4?9kkjYKsxy@m1Zn#^-|i2^f3;wI{BXYW z;Y(ICZWHvmPo_6O!-wC{>Goh27cQ^dXEjR}Jx^ujr8i2DLZ>r;Te zT2b44trUL`x+}hs>qOTS7Ey`k3iQ{?Ismg1@P2_AZCHE;uP%6l4mZu=RQvt`n zVK<1oqB}vyVD8TFGl5}UnePXqSf_m+K_@-__SM5+|DPGq_*<)0p!c&1i zmoWy9*}KS@!Wh;ka|vAk{)KZ^>!`;4E3o`&96OeG7dH*Fp?g<3L;7R2TBo*sC=ON> zX&lg}cN50b3BxRze0nq)vt<~4(sYKH7S7}6DhKfsr#ViU?<(YT40yuAf!LxmMDRlV zBu9qMhQ{|7MF%o<>4Eq%9CE{%tV&b_^?k7<{vDba3xN~M2X+GA6GGUSUaJn;uX9?cc`LSfiDQAJX?}f?w ziL7r(24ipx2fO89z=SEtdji3KX9)Z|k}p1GvlPY;-vtFj<&@p6RdM_q-7QUm7hyLC$!*0^9GoBv=?#%oaxJNC$MM00Q9a%r`gx$L*Us}a6{i2 zcdn|2=b;PfH-SYxRjdvp?+bI}w{qaU^c;y=S%~)@aP-P~%35|Q()q>rV6=TCY*;sv z)b+l%jk|i0tuBg3=Lw(Tg-$-YXPg)K><&1r`x5kzUxxbmyTP>a3!K~3%p5OD()*?E ztRn1`kXh1`Z~SzCyX&`%d1)J4@(8BkSNg3Fmb&Yc}c z!iHyo&My@>3Nm1@m{MoI_>gVdi!Q4CK z>-!v#&L|*Gy9Ac<(Irq*c@reYokQ!4VZ2)Q2gZx{1KGQk-Vba;|NWPE%ZPrIo>2^~ z$$^m6Jb-spZRV>6zXat&kBEEebn(b1$6MJ`S&dL-`a-7M_dSMtLhDg3gl8t?S`j;{I>`Pn*O+-$FkZ5CGe z+;S6-$hKf(7rg?x-BMg+EN| zT;0IEy9QJB@1fA)@f|y7<)LP50h92IfSqm6n68B;pH=Bh=5O3hFQ@Ba@3W(rWS)o~ znX6FL(1xzl!XfRQC0o7T4|JC1;Hu5xWV&z{tah3WP^-+AJeGh*;ZwN0NvHUo*pEr; z3m%`4U1aCv1Sa1^SkRj_xZ_qJXfBrG4PmF?VedfOyPTWU5i9e!%qT}Mri=ei0 z_f1js;{KD6CBkM&h@cw(R{8ed(H-;d;o+mzC<)hY%@1tO7JB)G3^ zjA2ll237|t(Ids*u;ssU0EHb`rsRNYZ!LgJH`M7KPs%jTe`m|e1&5<|U9Hu7cPeo} z51$RUg^90!V7h|>U|_Z=?)E)&ecJ$s!a8wASYgelDsS92rygh7Bw(lSRg?_!K-X|f zae&ku;{N$_*WU_oX}BSugmMj;|Aqvvh)8 zhmL}gvNNs7TMR2|!%(qd8*i_b=UUG$qsPfzpmsxGC@mK8uixw7_6a#2diOo{`Fuv% zpIM~Cbsq$+7|EyS#ftUDsIacH>&TW%$>L>8gYog=Bv>?TEhvsY0RA2(WO#}#OVFFh ze}_%tJa{cmGjqdNdMmCVBrvpH5jtYP5F&iCQh2DX1=6Y{LV3R25e`hCLd}KfA z+j$pm7s&JJ@WSQ;YR-Cop7HsZ5>{KeZpX40aPSFCcXg1|W2Ckni= z74BM?@(x--Zod44HRXwDw|F*JmDvs6r(@v39|`_+uO_|gGKn6}oJr>oJRsb82)7sf zaniH%aCG${a!E}dHu^`1+D~r>?LH&$Cj+VSb|n}VH;}hWT!O6V5>N?OqXjRQf%zv* z*ggIb%*{B6E+Z(6)Lnyp*Y}BYCOj71AN-kx&c8tF+xtN@T|@BU3Vz8_KcJ=-xYcTv zXwBP`;8Rk?es4sWvqu#h$L)j{0~;~I$^zfqcYvt2-ly4_=Z;XP4`+cg_pVGg4uym0Y?dqOTi*|vAfMdD$Wg>%DW@k47IS+C;FpUJM` zrPF13CMr3FCPyA(@h)|GMc|i) z?APPRP3Pf{O=&2-O^)O)lBdPrJxRv!&9GvJKXUs>R1w?)TYdETa)lpo-Xsb7>j%-2 z?aD;|x8NgM>V}F_WuWog30u?1OtNm)SJWM+!<&1L!@Giuut;DUE?YeU9^I-U#tD+t z&qkRpzdDv~W5a0t#R8UAK8_4qJ{mV}%*Nf$FY%RFlc#){PnTcpfp^z8)-D+34|*M~ zka0eN*2P=F(g}68HzbVcRZ%3BJZKM99}1Cs9s{@MwK%sukA+An@HE*bGGs^m^45W8Ll=0T=Za6kh@CF_|jE5z@ku?evNzga3c$QHjyK>O!~SZo_-)9O?A28e~_aIX2sui)Sm9;#N@y)cv^tmppRVhsBi`xwr_td?d(! z3#wV-`7sc>ApkwaBCRRa&q`zHJ@ zg+XoV+(OXoy@rQ&aq!NLBrVT{|Ib-!XjzqvN8c_bH_9hsZnY6VEP4zJUQWliSyE)Y z&J?j?_FZsPu%{XPGzt0bk1swHu&!O!G)(Bs>hJm}Wb+o`@wyBwpDXY-3`Fd$#sq9i zzKXu@ZZp5Rhtcke9f~`*q21&K`0k7ctPk)Jr%if-|LGk>n-8Wiy+Rwb|4S2D-TMj8 z?qouo;GF*B6fWe{4nWh}{h%^OLli&d8ECm$!|dz~yk*zL_D=dNZt=NLv**4HHyfwI zyM6}q%{x+2+XqmwNg0>5Z$`i0-h%Hr0p;5SzM`Fw>(s1=<@+z7O64NnJ7^Mb8+jRK z-L%5qy%R7^=pQyVM&Y8B;dFQ6Ix69lPGjyxun4oCOzF>i4E&c*%Djxo%A@vJt~LyG zI}_1n@>ttnl_sK+zvFny&!zZ%T@&osw3{smok`u5>zPw%DXt%To*f83fD3nMu;RS} zv$fxZKNaS>|1~bh*Jaz_(zIN1eEvQN2^mb=%AD!o!`X1->H%;aX@FXWP6EsAFPdx`83rJc+yhgl3muCaVu7u(2($bZX_4P#_F21_ zrTp?i(UK@0pB076{ZomFhB3^D_QVfivh+&@gP3<(a9?L5tH^&&)_)(yBfqYqqr&^h zU~PBNkAY`wDuhn`N2LyU{ZxiN$iBngcO4S-b(G@Ah0g_s`8}2;%wOdEC-Cpl+I)%m zIZ=t@cP8;M9GW}-Foz9GVBs_)R%N8eK5zL?{NvS9oP^P&+SUed-+hahT1L}=5jJ#6 z_5$hzkJwDbm+<0BBCdOpfvodA8+d09i7-AUnxRkx=~=S4siK?RjNC*U);|D+`aZJt z#&DjrP6q>oGv$YTGujujnwAt&YTrrdJc%0qA5r4I;ZgMAu|%?Fh9&rJY{K?N6ZRl* z65nf(g@b>_@|8}Bv@^68A_Hac+?5W9I8cP5wY$(z$eZP?zQQ`=r?ER(QruBenHNt| zrp2uSvroO73{laC(!zTv*;|a~=|0ptFH6sF=_D+r7Qa|Jg7v#^Y>(|MTt9Om9&1Q~ zm+B*U@tt0{wI&Wv9GL?xZgK+u`5JBxD2B_sPlIQ>4kU|X(O?$fX6GvqaPtmyp2&cy zPB9obJQGaGW!C310cz%sMx45@c8Aelv1LFUemAngz0zVb!(|d|J+DZ(w1vO{?gNP_ z2Vro)Lt;fi3$A!RPa#V9zgs1M4pED3y3YoGgr+X+ad8XR{|07EqUe z8%Ps5!Ho0t_5QTyL?`n*+DFf~q3f5T^H>1-ZB;wrBWIpuPl?=L_#u0kX7XTr6yDy#^)I zK7d1_;I%!x1XYJb(0iBDsL$_axD4EWLFZz;Pb*mE1y5#~!lHsT{?U6`h8RGYQ z(O_-s!M}8^-~kt-`Ba5Tym$f3Py>KqBoVS}#N0m>V`1wmOO!zr~Ub&OS z+lCkO!@|Dt_Pb_y`fe}eZaO4txD;(``FaKMRkkAe&DrpglwwlF9nh;dN~R0B;nf1Jbw`yzYs}OT~Xn^o>b0wImP=VRS4tQciCJy?&oa=4V zH=|CPz?watF~_9X#hl>EU=$(qiQNQZzA)p*h1FZ&?&gDNTGc1^5e>bz6{gpZIYO>YnoQ-npg5V+?gat}PtidT? z;8rey71DbA<)8#GN;`q$OQh*ntwnU$>V6_~s-1mQt%5$OOuThUo zn7v$;W=Z+dSH}}cRMl~ey?q4#4jD>Unb$zS^n8)zs1%~V|1#Dz`~mk>hal|ZDkxMr zi3&4D@`ROs7}DB7hOhn!*%QnW6C2SuGlzk`7JU@{h3Koslf*VTh-f?kCSBji^S$Y? zY3em(qh>*mcN)W%Kj>svBHXh_k_1r`yLqP-Do!MV?~RdYXdVt_2b##qv2W37_b0Tm zn~2#LONguWeB6_k#CE4#WXxG`stLXIWmUg4VX9Pr={8g^(G})I z3RJsk1buc<3X^V6g;TwOct34Ezn?yo%RVq?PL~J51uqG>Ip!WVxMibjbs_vSOC^sh zT5wS3JElyJ;ChwyfIUs*m42jn+5GY3vq37;5ZGh^x%;55SsFa=Mv}!oyP)aU3)r{q zEu!uyHpuTP@g2Ss532z?FjJMk$SNf_*U9sTTHB!bvKPJ<{@(WZRI?TH1vY1Q66!AJ zkY_WD{rdI}Wc2*-{lPHo?ms24NuH2nzNY+NP!06D<--*fCm4`th)c{=pnTUId}3OI zO8Ggqs`0OC<Ug<4P%FJ|K3hhbmu5G?!_4^!ht0t@o0-FP~HC^*W%F!B)E zTVfz%lfbdpv}Y>gB)G*=c^XxzgJQwOIJ>JA=lN%|80kq^Ha-sA++#7Ts|<_fAV%nn@U2{KuH|$8-!n04H3=+ zX=0nQ#qe7!Ma%q8!WKnIG=6*!w%i?nVN1fXLH97c%rz31Tb;wvVJG03av=aDEm`bj;M?(YR%BDmDvJhLpfm@g^K!ssOe!&v4Fg zeF)c5<*`GDih?!^-M>&vD6-r~TDD}u++SYY>*Zw>^;qL@eF;1ukxS|~2zR1^8Deqi z2-+Qc89faxxMxc+d2@X~89Fu^j*R-kb}sP4h!thzL`Wt%*W*G((-djnm_*DgT#40w z7s2=8PDplM2#e-#BgH5C#Utq{Y*(?Ozqc-@OMhC3F7E%s9^T!~_joICU(-@hmeHY= z<{{wrBN>+q{GidB=7Qhd7M42UCLUENW_ec{*!{JN)Vo9<0&iLJIgb{@thYgU8!AzD zMlklx2|Y_o0rp8A$4QgQ(baW48SESfWA2)ufp!Gg1swrd;sc3y9Wi{&Y&`SlF?y^X z&-bhfL!WL5-Zfo}2}{**Bdo3Q-CWp0qN7BW? zp4|PsCvV9<$WP?Evhi|Rtm@`1q&tV81q;QW0pGzWrIK}?y@uILmp8r%2l3`Nj>wb zc^HyeB;twM7?NCwzwaHyW1bJN;CY+q+q8M~R2L^iAMxR^LA+8%lP6wV4Vpri7?xXr zVOy+?;ZZ$$PoFV4gQMs$ehc2L>4l7W`!Hvoh#9E%Vg7}uPsg^6d` z*4 za~?~CXa67j<#<>n9cY~3eW>%WO_(u({2E~nSx*v4j_xV^Za3OiH>{Rb3_c3J8PfEK zn>GA>9m*coConm?UHGi%G}_&b5YPT>2b;$v2z z3wlsXOPNmd&4WXuv_NOgax#2CDr8lvpyc1nU}pRUQeP!A9@#8b9i{~Z(Np+Sg&cUI z@)~OnC86Xd2{Nrq3f5Fv!H3K5S@P_Yt;!6`RG1(#8>C7oZ zV1fQZ7_}`3Dh9N&g^s#3>0J@>Yj2r$V+!htgxPQ55US(#9Cir(NcZXCu%Rs&RThoK zBO+m@IL4o4|1bvG%ewUKn1kfzi+sV)o=ak_l1c81eb6lA_A+X}F`3L+@U&w;{CLCQ zdRYz`)VdB^*+pqVr&9XhV22;O-!SLf)zpV>z zd?)~i$zD*jcMRnCnelPmHn?24TV7bMW8?Dht|->O1!m^lCcbJn#iaQ-shXvTiNS@q zL1`fi>=;e&^?IVU%L4ESmZse`R=8hIlI$xgN8J<``sAt}UB593YkE4#*_t3ok2HnU zG-c{%-Ak?)J|)$BDj42+2rT~+-Vax%+FkG9<`v;CEiiajjP(%ySDd)2sU!YU%MgES zd5`~|XX4hBC8VJJ9C#@!()S&Tv_{xnpPisVU%7t7VQM*OW^o)vLHT@;g+4ztZ5*$h z;leNevuB0hx3l<>$5=z(YSFivB5eBj75RyB+#PI!$C3)k5A_X#b5Mir7%9!B-m$`i z2S&0NFJF>VzIDVr_c6RSi4=VFqxe3@XBhTD#206nk(z8%I%=@6TUdwKReFm=8qT1$ ziSqF1zn6md=Q^(SS3>{O13`^KtI}}1Z z=Q?c_?LnnT+I#V#B^i}uWJk7=Rg}bYt|Lk+q>$3kke0UgP~ZFe4?I8J&wZbBUGMkn z_46DVxzvfYcQ2=NUo3@~!6J6$m5#83n~Znm6JfbZ8lF{|3PH2(vF7rxsFldXGJ#TP zQ6EDx*FA?Dlha_CNrQM|>UE%BdqnTF`@n0F3KiAGgKUf)eY5rjbXhd92gl+t*Fv86 zWFJF)akFTZQz0wcVTvOkKf)dFB%n5P5WTunllujp!Yqa1Jng0x{>dK!WnW`ad%6bq zSu+wht}Q0-ez$;6k}f{JlZ#>%3#k706%3DVW8a?UVv|FPXz{uI_$)O8-0D|i%ujVP z`1(Co=CKgM-j9Y|xr{|MT7ze|HT>Ne2RpvCfVcPyF1ox^U}e99r3piTlwXC~sCu#6 z+7ozP&YnLR{tDIKu0dxDEq>_xF0fzn9J0^YlKURVQ9bk-wyO+*L!-nZsr|)Z^1PI- zpy{Z1R^Vqmol0ViGKhV)kSn!lV5{l{7ss)e__4!`kFnZFw~ic56IM(>&zk%AF(83n z-B`7ndRD)|n)8cpu>SmE z7|^0b({;u|uWmW?#ZIQ5?tDS5xJq{E(p~aW=x~~L*TLmackr9>3CuR~#TW)BU2vu#Va;?{u=*o*~DU{{k4i)*udlFGwPyG^!;#e$Cc(T1)1fz17D{b5;0e}i=y45yD1cDjKONsR?Z6K2MV_4eXUP8a;V_=X0d|acCuV>9+rzh3o#J9F^ zt>cpDp}^7AQ<;Vx_Lb=RZUuG>pM_l)g?r`OY*G-hA8Wr!(qx}>Jby17R~iVuL#=42 zeiaCp*3YZ5Dc9uJc7Guk#spz#MGpp829mvxs@RK%mmzf405n;XVF$O0S`H7 z!`35}qQk+paM8pP%ss;(Z{uk^(Jc$z<}>J#duk~2aWuHKcyY~>BJ|rM$(J8o1$zp8 z&~d_d9CNP?9S+>Y!SPMFQbCcYMQZR-2J+yl^%t#8(qN|QXXGu&j7W*1!Hi zOu&e)2&8o4u0gz!d2^41``Eigfj*h3Lho3-BuguY3BBiIE)p@3G+1ITy&|&^ThF=E zCp#SJYQ=%zsG^G_)|QfpPcHELh!EJjUdQyRo`FYC727x8ng2;t;ydD-$*x0!fX;OQ zhOCgLdckK=Tp^7?fvdsHVIIb1Nzr1(RXA*#4=jF|2k(ak;B>13ICe0HwF%#?$mVyb zyEd7y!Yr|erXOxddO~{UL}SZ_y+nDYBww}GlxEkyCTqSNhtkYEE_E}N@7^Kh90Cnd4_9%ka6&lNDcvkAt;|6uEWY$vJta^PDV zPu{hy!5tRH)F$-y<*r?ch_mEw0FWI{{{&b1McGhzi;N`qzBGzO-+6vNsyN{WtNrluEFevx%MtBOdf85I^hd^LtKX zXzoID+L&xhQ^!rD>bq6B%F#ElL|_$GR}1Hl8WF7M?!Z2UdSW}K0Ms|d^Q8q9X#7@^ zng&_n%oQ3iXiz_MTCdJ$3|a#>s>AS!W&!jL7{d=9T*=oQ{8;^LNFCYq0L2-<|KQje zl6>7IMNGXU4be^KV9V4eI3Y$8ZaDlwhUhQfi=sXLWP$eeey_$wmcY2{(I>_|NTuO_03;V#YTgU z&N~P16!$@p#WV6~bPV%9aTwjJPatVFB=ZKZCZn>2UenN`>i^ECpx)z3jBU3<2Z`U+ ze$5Kl`L2O&SequUJ$PJPl^Mmx>h-bxtIgqQmn&)5dKqiRFDE{mPhov=ATb?P0inS{ zekyJ|u047KuWTC%E(sd^>#{IxFKA%5qy+EU!fuRDZbyfldf0O38}z(ii4(e0pyih) zZ5q)Cx20=AM*RXAv*jrFvyJ67&WZfF=3f5OXFQ$~7&_XwGvVh|CpxXiftIbwhpj>` zZ+B7x>1@lze@ELPuKy4^SR4j*-RGd-)CP{jJ7M)NCH|aOpu_WbFj!3kmdS7A`xhwi zn&CF=oCyOXxp?9t3*P&jb_iOw z6T?1Q@TD1d$?_cs*g~-y-dTJe^Pg`P75MI?>sM#dvd}}Y@nr!&cTddq>Tr|y2GQC>n_c$C)MC=rSmNq)2i^y# zivvS@nTeArJt^!KR`%7x=2>oZ)!;JfV||O(2ba?fkdwfE*&k8B*d$O13ZM^{YZcaG6^FFQ~VuP1&bRw6H zfQ?#vXe9?yP&OOV8(b*U^q|Ft%ZSz*<#k-0z!&r=b=?a+b zs7Ckaj)d;VLs4$O;G`U13kPqPL)uVJ2>%?2kqe^9jqA}cXU%T-D>GNf`JRAS^;Phr z?G=l86N~?us`65!WRcA%2WmCB22SWv?$N5?BI(;k9%S{hbO3yxA!Mw*N8{}FRHza7{A5rYVOnk2b;gT~$qd6^A`!VY*BPsa=b)rm znvST@rrPV8MS5FRuq`PZvww`BUPY>;>#+=#ZTbzR|7Nov4#T9i@OA6k*{23|k<8(-;Fn>}Y^S|}tAAB_R9693xgEjByQgERS2255VuPR*lY{nI+M>u4!N4qL#7?LURX)MtpY4Q8UP>O|^N{uDJ$gnP%4 zCD5q1l`O0qN`K>fnEaA4n@z*WVb2R>*iIXKf5sD>zL}E4zxtg&9em4{Zv23fy*WZ> zR0`&9iG&Lm2Z6rC71(lCg-oA28^(@1i)wOLF~Z9XJq4Fml=NTpv313-`yW?--ya9c zf%kA}R1dkWbp|zK43OWe6aJaW0WnZC#S4{o=;+mgt=}8q()0qD&TROZPn+>@mTm_$^ zBc{rxKuPKuU@yC%Zm|Si6aEEWo_voZbH@>p!(N<`6i)WL{1Ih^uVk}ke!@XthNy6?m z936BDNH)F(o!L^LdbJkB&mZDO*($c_Su~UKKY_USu*lO!8@BAcRqg&-;3~9b!Y?}*lX0vGP>Z|x{u0NU`(E^{lwlrvqKTcaa zgVzO2=4Vg6g{uP>(}R*n$yjcH7A}+rEj~%Q-t@wxpp$Hm!f`xQGM`s`KF20!{}GAV zV!UT^5iKTehqTd3+%#-F#fYgU?FJlrG4s)sOC;@ zo-ySuOZ;(~9SPE;rsFkeN%9aLHslD*xh#dBlMHEfWF|z%J;L|3yO=@mLF_rWzu8D1|Sd4nJm%=XTPVq6C1OK^Z;kb<_1ZGYOtY7AZj^!)qubP|eOJxNV z=-nbuzRS=nUenOUWe>KWwt~A|+h9hZ7qJdMfMad;LF`Hoe4V{TbWB^G_bN`~54W4q z`|GlZ?p6gl`>^1x+%*mo^F`3H`wX5nF(cc29^l=EQm|1h#vcPez$T~5uychqGhS>4 z@~t)O?^qY~Gd7|hWfI}=svAsp*AUvbLJO9w{UHtdbI@Cj}|>}VY|MSLF=ef zq-$&_tdjJ@u1s&TsdEEt`dEW6J}iP>QKN7Mc*#^-oSD~wA^3M;Kbc!>fQEfDKy6|U zT<}gHpX$4WylD%&gsN1(^CinS&L&P3y;vz9B6P$v;pVbKc&lg>9NjMPa<^?F*^-0E zd-Z=r*~^Z+my+S%QtB~y`akio11ll$gfsQn4P?VyW2h5WyU~LW5X+UZ&|h(g+H5{Y z<$9)obHP9wdh0eWUlYdWN_~O9^Azdb%kSXW<|(|?_6nX|Xayw=n_=8XRsL{{Ib7c+ z!zE6&V#|a!rXYC{28@^E_0hgCVrvoJKAwf{c5QIk=Q2u0`H(Fd!O%ML9hhpwE~5uwia7Os|f! zk^ASf=<{o!W3>c**|QhJTTY^$!1;emS3zaVYw?v`j6}-G@=Z6?iRGjt=yp?s#J#@@ zwq4=i{iB4at5>76+$Yhj)8*KkP>xS~bur<94j*YE;?j8nZIdh z7Y$#U2Cp`c;jIJy;Pllx{Mo7PXyobx(M7|_x_PUF`QRFXiyVX%r==nEsvC@{DG~20 z9E^8|K4Xnj&3WE-1?oJx5(*=_!FumMn5HpU?D9?@KJ7aOkyXE8$^1O<%=ke1tqdS{ zb*{LgKpvU}?$N>PZ%9$b8i;MqfL8}qFlWgk^8Lap?6K-Vql`Flw%!;B+%6AkLGo0_ zbuQXUs^NOk8Mtn92CJ1_L3XGT+;WXTr&xns- zZzBIaCwOW6Z@9x2L4uIC9T2k$CYU~g=-O)BzQX{t4K=~MQ|RA(I|XIg3e+yI8dt6< zBMoW7Zc({{=xk2_t+%%!%IpjqfY0G=brJNXSYg`XI@Tfdz8|m6A!cI*UUOhA#tlh; zfXX$j&_U?ZJN3I5f}`j%c~yR3TN%3D(!lY*PKc-W>r;QFSeB%BxcY_F9k%310&!_+ zWK%!9gOR^Ase`I6Hq2EZp+{0d%VPFILVo0Rw={j>x|mPeok0HHxQr@2`*4KY z3CO*11*bH7V{rL52m@tmemobHp6b%tZ?^d7>{H@0`2uT9dOk)s#+kT|*XKtf9l6J& zI|840Ix`h^>CT=NWPV^O8qIkKdb83Y?}|5l<7Yv?o*jovUOprL?nN-6E<^PpHp;ydrc^1?A$w%_m4Sx*PW~n?T_j{n{|b4=oLI5`kaYIB z;~#U-y@!t*4)NWqQgE<)0UH!`3+7)|=fT^rgXqg**xs-VdcDlJZP*3TbbAx%{_aAD zw`hZ{q65D$rh~bR>%q2RH2Qo~Nt`67}0@yf%m zspBADG7Og*gb6;QX+$Y966}KHFhF54E)#OZ6Mtp0DThlzynZVF8b1@}n(I=MG7`Uc zUd6+ai|Oo5Iv6p22jrv}Q!WxXTRT?>JnRyCJlR)r0Uwqe!g*mr9$4-)1hu@v^FhAom^wJ1n_Wb4*E4GrLTTdUr()IUPj!Qjz?x9f> zS1O(!ufvOi26BE*9}kam#u0`?z&t9Om^G)9(vDt%yAwv|ZqGQ5 zDGA-kN&HM!0Q2^ih9%odF-UDbuD6*5H&%UuqmLi41BvsUUzBw~;--uEZiKdrr@Ic1 ziLr(clbj*VTao`4Wl8mNQ*qkhfm9)_j41xD#`7|6@KNaEUDsEp@>6bz&)rPJFX6jU zS^E=6>n&$#d4~L8ViJ0~3p4D1HE{prO4zfdhPBJgAm0LoEbfz|%*;}f4sskoZbVLY zzO6F?o3=bEE$fvxC+S1-DGcd(R$rTFhtvfv|A#1Hj)%;@?kex>pi8>ZI}k9K=PNDOqmge6dMWW?=HL5W}9(Q?uz|&V%v97a$?b)afLpLRpmz^1y zBfF0c-kC41H1Z?L-yON_mX|2&^Pc@8Bl+J2MQpMEQ&gyYh1a(~2D`Et#)@WA^YOBD ziJdi#!f(WSh2Xj~_l6U@>%gwD75d_aQ=hORLUk+0=SC~h>f;`o8E;C@UaEtkQ`+&z;*&7zvK?e;Wuc0cA@1~h z&b)Ja&^XwJ#bw7bGoLmv+q{jeyOf6Wn+b$_%0bdyMeK-|fX@pu@waOadGxPNU9RCtubt9mOOXxH;6hTQ$KDgG?9f_7oozk17NxMZWn$M+>N zsuquV1znJ}UMxQ4H5FvP2&^!(6Y%(57zErpf)7Ro^5X(yVnXFcP~SC)TBlydN1Hct zw`uxt!d8{KgC*Q4Si&bLmk6}M`Vxt$ zv*1-1`f-Hb6>L$80}GYEBU0g>!Q-0J3qMtg*O;Lkc7=W&4FjS}{HXQskeIrXAmW(5zwjDP_@ZjfWcPvTO& z+xTRN75`#r%5|qyl8o7XWYFq4;wRlKU^xYPaEC>XqTtbS&a$`4&^Pi8u@H8p?4c!jXI25yoer$!=wUSI9?6%c z=iu8-IgpoNhc7luRo|QQ0*|~jCj5wy^DwQ(V^GSnTg0M!8bY2fr4s$r{J}@{6gut^ zc1Y0;;%z(gvD)uHND9#Bx2m?|jpT9&nYRKuCVPYC$b`+B_tsyl%1GW0>h49 zXLB~>!r~_{asAI?IOTc>rTssMVxQJy(Eduiwbzt>AAFq#MyA2H{NXfoS2xbgR;9ZY zB*)GV{R43N39JD<9&(}yeo3Z-s{BJ7Gpe2?WJtpX?=f8Uz#Wn3L=E07SWY4;UckMt zrqpGcCk&KSr1ozO`GD40@U}#or>)(9T7|d7pCpgsVeb(3cYP)s>Ty((ZR6H%Rv7l*VCsAS0jTH7a{2Z$Ec-l)x`Y)&;MF`BH0J_a zzTymd6*Ct+C*{Gq#$#-E=><4A^Eeo*hp-o^WuOw=ge|+G@cJPoGT#d^+|Ub@Hz?An zz9ZpSu{*waX#wi$!4PTG!~R5GV?G`C*r0Y(uCD0~es$p>y~c!Vop_6fWQ_5-x(n^o zHK%R&=V1QpkL;_`HSn;$2K&g-~3T~fD z5yhK6M%K5G6dAce+SU>@*|He^)mqViM^BKJwm>-YQxm(@p;_ zW1&nlnL6|o$Oj%4b;{JyrjvRwFf0Ph?XCGe`2up@dImfZ=1jI!i;gRFM9tz$SlX7r zw@Z9xal7UCjmR@txNbVwRHUHwm}WS4bOhY+xetkEgL$U*Wf)uWN7(h{!}M9ZAw)5r zSxdQq*}WThYv@E$y-tLe3yX30bybMioq zB(U;j&Wg(-V#(h|M>ahEBNlr()81oYG%(W*pWjubKPD!k$HpPl-6nvJ@oxb?!yxg^ z^b~k??lm5-uS8jgiSXisDLCD_1K-DG)1^O7f!0DDdOcMR^M4KECe<3;KKVRpzAwqw zlzbq4dzG-fE|Hs`(Bk57eRwt6oNipCK=q75z#^^^4 z;Oxygl7Q)R%CTI>PWZiMTp&G%X1l22_<{1U?`A)~ixT*N%BD2FeiC? zklQQEk&?GUUu4d5FdSMeCaoiRs-7L)dASJ0{loa-iZ~pav>xg{j3jygfU0N;-d=AB zI`_2#SJf$i`EJVmT;(ISc;qt{|ImSdI%7uq5!WR4zlhNXFAQ*&8iQ=m3 z!2i!^T01=qYJUueLh%vSUK2x--suYr$0y<{cRl>AV}_55Jh2+@W1C$sGhES%>8lp< zE|*uJ?EfD`T27@yb^a5NQIHfZ`Z5i#O^ED}YJdbAF3}HUrl6;!O zEgW)eARoBnCA*i?DB5Pz0>%Yr@kIe8b%Va)bhyvz&Y1C9?O~YJnh5RF=7GK7vsocz z@2)qFr#CZp!FAzLeNY`K`l0O3-`I|ZN6zZJ(T(BjlgGew>0o{)uL-_Hyux9%gTPHW z23H@?Wa_geDS7D&VHuqKjM>D?E^6`q2oI`OS^|f=b-2lsX{>4BV?1;02kF1G8}|!+ z)DIcAnb9g4s;(b{+VcPsu74I+`x&sxNnH?sEtFlzt;gz`p&Un)K#9^ocpmYWE>{$>B z%9VNI^7ERot4@sv)wE*X6<5q#txP9Bl@Z^R%f^s9m&B?i>tMEDA-c+CLEX+jB8mO> zaI&uyP3G>0fji@ff{`CC77Opb&`z>k+ZX(IX`$}eEHZuCIQ)9j3RkrpW~)U}Sbqu*xf3T+{GK4zdb<+jXT*^MVQI|k>RGTL1ujDjQXp8*f}DRDkN?Sk zBz-iL)emnJJ-O?S5BAO$&O-8h#K{Dh`mUb^8!GcZGH1lK52llkHwO_n3m3r!cm{__ zcZ-HD~?~FT8(u=XhYFe=9~$F2oN%?&5&YLtv}2Ho0{- z059#khgV%S=)oOpaqRZ1@ZZO3ayDi$cKeNj9eocWZ+HS;U6M-1JvD*AH^E4{T0l8< z7U|k9L08W$#=Dmk>6Qz_u=4gW*#21o=3J~q=dmdm|DB6;XLaFmaT-*c*s5|Ne+#Wj|X8&47b~N|k-91|&w78I1A5tav%kQIn&{>R%aT z8r&--n$0`q3Y~fDseO_aEYmmR?*^x$fxi~D`97PTIxI&E^rZQoxFV>K7rL4?9KMBC z!1X>~ezeR6uS^#9UKeJNAGzL8p=8K6JGR5|n>9>gIf6wVeCT=*w){UOMuR&Qu49fIa;IN+w7!{Dh9D*a@-}S>RZ<9Oux2X*_PLIccHW%L4 z>qM3a9)gbRcHAt|jz@oc5A`9xK_#FDRjnjY$G8B~E0y4x!bB7XAHp7H0RN@FA3AEL zT$t%%MF)x2!X~hW6W2e3-KvLBwRp1J>8@0 zi)n>-$YMyrk4i_$g2ZR+$evqb8L36YGXDfCj~+#*O?U%mcFRHU;?uApYdX!2QsS|K zQ|C+8EBLvs7@{5~VB%eY6>={P)nrm&yUkEqlRO@;sGebgr@GNH65M@D zJR}S*0Oy7(mXK)$t5kC!TCWTG4Nu{xLuXLNWL^dw3zCGNq|#|xOI)Hc+1@`JU52BLiFdOUcwO?2qiHt;q4D_U|R2Xb?Q zh^yNQ5wB+y#r3NBNu!0NjfiM_uD z+Oo#Op6Ju4^5!X-*r`fwZkhATWnoNbS_-g3;ZSoT42ql-z#l4chw)ibGDd|h@=>4< z_Uxypze}^5>ra!+zsK?FQW5yh(`LUHQIhv#EX=+-8Z6Ee+;DmTwamB&A+ak#2V-&5 zv%&CU@eF}6q#;@|%#kFjza!y^3Vgt-CU_r_TAf?Lxp%}bl((-HJbi!J(YrUGt^m*^ zZ5k%bYsXKrYpKfYt+-&_cXl^N$o8o%)pWMDoWS9_$FKXQB z{D*2hcwW8e-O#5Zsg)(9r(&kidA$W*yANTX$!W-~)g!3^GPr*DH`uNaPV#l{i|QMN zOw6}gEH+H&Ej7i#gJ>x{(;tr&eFpsCFM&Dt_yzn~--CLFj$G#15eVOX1MIYoY0mLW zkkj5u45!SXhPHaR^RB?y36$XFNeVc#NRpOv8K+obN ze0;bHwe7c%-%4YsX2)xmYL^k|(#S5NRP&)P_ zd)BJOV+>O;R>KMIE?tVPKR=>_nm*3CT?bK;%VDA67czf+CdO^ifgcAW;6|q+*=8OO zKZZO6He2!`f=Cte0Up(UbXgd0LC7gLtiIZf! z(T%R*S{bj<_~%+0WIP*kM|p73)*QB{QaG>9|BV++ba_;&Hoz2W2ZG`R9Wa*yV7i^&15Kb1Tkuu*}uq!w$ z@Pcch!*403uhT>et7uT1whANrL&<`Kbds=bAXUm1V~<4!N?$W$RnfblF!vx-_s>SS zroh$uLfOi;3bJ0=6#GtRuXG^u?H$1wLxIa6}F|O23s$Nkaq_z zl9;FULT^fo%VrQ>|4@VfxFg&lLXGKuI}dtTD}{eJaTw>93%(UqGxQl)ijh?YG`LrW zF7s1C?ydoSzZzlAZBM>^)++w`%LACFO=;tRN%V279glxF1t(8m0BsFc#PxwX&DRLP zh~O$_J4=B|ELA2b^9Y-E+RzfExu6%^$mCuZ;-~$7JmJC;I!mbbPJ1(eS376(97z$M zRXq&OPf(&YEBZmziP9fq-^19$c6{AIeb!=pjm*oCrvsB0!mqNkXw`WM`&N{*EQ{HA zC?FOC-39mMi#+%zcp6`-Nb(ogl=%BS6R3N%2OYIV1vLs6qRHE7xHj%2_FgVVoe|k0 zz4%Emabz8SlTzV}t3q((Yk@6zxEvO_Rlo@O^JHCdlFQAv9pui!-~Xh+ zfwzt9x5X|fxVQpKS}e)g?+X0W#CEasGlt8wui*2iHE_#F26pe6%NG^>K|j5xOuonx zG8|{}{z2)&`7Dl4-L{#VeLumR?A{4|Vrf`YJBtr-szPV0U$}g30%@JOnBTTj;~vAc zS-C8fxcwNR|JHYr-8Z@p;qeSb3&ybCqH(4fy0i6;Ma3E2pvC_7^?*l%T~d0^n406NDiPzRrlacw-pbwO(a+5 zhcVfHYdT|AI1XhmAlO`*#UZi=A4y2MDhuATR*1S?r3Kf_R9O0LKO5pH#jC$bQD5yC zf!pB=uLhWNN}EKETH$1f&;txwAQ@Z@S0>OZ*-qRhEk*T<8}a*xJ)n9k9VZ%%#k6VbP<^JUNNkb~Dt9wn5^o7Cwzmba zci<~{{vjJ9ymG>A=C7b_&ptw(j0bN~ z4h84D7uZ?S1FlvDxcHbbgIJOe!^7v%J7hjI9{R>6Dm1%TyzF6xOJlGpe;=E3=@I!A zx*nu7Z85qx9M>wQ5|bGt#eEsR@XMfFe5YXy2JDDtE$f8N`=T~{=k*@GjW{L#zJOq_ zL@35zQxV*Tw=aB+7|JKPn#1&uUhw%`F|1jZA>2uik$8b4^eM<3w-+Xo+MyrF2fci@ zqkTA#`T3;V@E<Hne@U%jJ%;_mO-*Pg~n3~Dk{!Qnx&t$pVxr;D=+#_5{ z5Aiuc>%pRDC=DJiLABCk@aA|`K5LK~)iW>_SF|;_9DFtm(F~7eV^fayL zsp}_6+2ek!(f>+{uDj4Z^Ofn<4WDt^#56Mg(LVMxCzUDWXrP9CKS@Bw9@RLKXX{7P z@o59-1MPIM8XLgUOlpbZi91*_U7e>sn#6tt$l@%Qn=tmzTIw?_k{X(r(M?y5Qs<6= zR68=1-k+sS=L;G1(d`P@rNLO5%s#xhV-j~fvw{1G-#}KE6`t*Etp1^8qu}qo{+SkWZ=^Pt7y+1s8fNYuW6FQ`SEd9k3h2pA?w! z=Z)zwsKbb^d(r{bJM={XcNiY{Ach`+|Jc-}0RQPKqU5i4;9625%$%FR-%{A4Un(Wb zybZBW;v>1#F9qh+V%X1OVRA?cPW>bU!DaVDD#c*p~4r%yvlYKU5 z!d0d%qQ%1OHOIvV`)_yb@(XBI&%zDbtso<@WtbM)#$XH^NDPC~~-E~CYyN>-m773T7 zW<%K49^6pufN!TrfRxa)^VsPkmb|n~bo)yos;!YmkIzxyd%>C>d7aC8?u5a+AT{dH z8jU8G{$mYxGMxGSM6qiscJ2QR?afUv>y0IyWGxB9(`?}Okp{Rb?tuXBZ+)Upf4zAQABzi6p(dLZU+X|oXsYnNbifUU)wpMY487-+!8Fz(J80I< zT2rhseMXYFz5kPFt798`e;{3awIc~LI^lxb4GpmeV&7 zQtsb`D}(GHKueQG_Zxt$$2*a#rYUub5nL<(-DE3a6eKN?f}hL3llnUmIAfSB?k^Ye z5*d48M}{i&DTTqc`F8m4Nf?}}Dg)nt$6(3(5yVBd17E1b61(9P154gw%L+Hlw)sJt z8YYsTDLHt3kP;+IF9VMSe?)R0df2H?Ts+8n6?Pl^aK4l(aMr?t#ZvSAaCL?)xe}M} zEHy#Ms$_=XoLOohYvYCc`?Tq9;~(tl%v>-n>mZ6pbIG&VF9b^@vBs>4^=Bu-+`)e2 zl6f`wf7Zl^Yi97z<2M^P@jWZ6d?zv~oQo^fn?!%5!!hsH2gq9!jOvf70Qclp|Fcnr zyAG{v^Pt7x9#sgB$A#nTPlL#GI|XX^p_Q%hdCn>y%_LHpi^<4Izk&X^%Peb!e$~x3 zh+Z9lp!XkiEVvD@X*s_BGXft)j3((LC*Z4B`-$*6C5{z0@$YbRB7eFK=S+Eu_g@?X zmAp;tzooBmQnRaQ@GhaRut1l8O^XonSlWES)orAZNE5z6L{1&X>UgUfl-d}K*JV$V zQuz)x;f+4tJ(!D@wWCQ=^hMa*+dwA2tpOADX?VwJ9$qkf&0Gu8aj{Jt&e+AoliwGR z_fx_#Fxd_(1E#|4KU3iEnUQR?c_?(%{2_zA-^1*POC+V^1REG~gf#dyLZ;*wJoc># z_Jnl;xgaoY3-#e)VH_@~jYHQz2SC9*8-LEJ#-PF$+>@BcUJK7j_j-9)ZkUMHliac6 z_7a??F5I`x=YrD6DH!W@iRAn-1fLc$VY)6P(rPUV>v+_VDPngLbD{(G{f7C)FLFv3H=rwMue~PgU zWo+)^actAL7tp=$KlHejNxnr2Ebb^eR1;NvUEI1nGEy#bRAPYMq-f8ydv7Xpe%n4?P$? z>nhv!b|Bv0lTS_@mSp#~SU{ddis+*JQ+(U%56Lo_@NPk;Xnl=38#p!xjn-#?-187( z@Wzn1%9}FPW*xeGp&l`RnJm6^C!B4cI1I&yZv3X@2X<0 zXYAQf8yBp}Xu%o7Tk*!OeW>ekmShuW@yhYfh}BqAIMc8jC#33t@{oEQZsN+WPsn1I zL)GX#i+3*XM!T_3M}#icCslYnco6!PxI;zpHIe;*WoYU%jE0w1ki_;e@b|4EQ@3ZB zyzC52N;?A29S5;3!``veTIrayP+xrewkPDC+(BAL9|4E$y3}}SE_}KB0F4yyqK)^qUcl>4INHN8wC@~7ZF)Z%&*pJ4OayErY| z7%Ua$G5Hg9f)C>gdp5He!u<8{TB<8MO1DGlacR1zw+qJ|6LuMqMAYTDpZ)pW0Z~DI z;Z#BdXI5|HOM11F7op(-6SMJpWeTvvjs1BYaYVN2a+&4)0K@_HHKe5IFkPkTt`=HXw%H28r0@M8Y$Lq!6J)8 zWYnERk!jey3B`$2xh6;|9>0ON69RpujEIFmIISi(FoITGxgzoAWZ|JPqUeY--z=jZCJoALxy`Vcn!V+say;{j#Qw>lMo!5<%e1wBVnXmf#8_C zibMQ|i(eV0;^AQxET?HU4V`-eCf;2PUndDpUT;HzvWWf}=x z6i4i`eu#;gG9MTC5jt+|2id2mNE;l*z}!SGtK)m1KTE=^b4$S9Qja~FRfyhEiZxRlR`ZB8`DjohLH75C^7Y&H za&O~FeB5yvZjkbkM151@1Ke}y&x&|zGAWV1)U2iT`Y#}0SQEbZ(aW6vN70!!l-v`b z>s*Frt<5|Oe>`1zJOz#B@%iUuS91T)R|wo0&&*YOA>@V|RoZ%r>U9`G&HM*AV`QGN z{QWKVW>XAp?zhI0tw*?=AL*Q!<8AV`U=JAmtAKdw&pj>O%FlK)xCdQk>`Ux6p=PoN z;rZ~K&6hf^pPF!9>m_K~&kAT=>?w#_;|RxO=fGm;ee~6|)zE%;7PZJrqZVy9$cjZ4 z+?`b|g0~k0bjsUBF#FsIn)PKi4U;{HT|3URxtk}^HIg?3S+483F^*R_wT$1e(xeUa zUbhfA=S2|QnG2mKL&5yH18ooBdFwlWvEY~duYXlCY!|O%+tSlR#sC;T~~BNB@7LA=-F`Bg4YFNljhY{X?YCz2&nIdF|l=S=i8IL|Go(23`^ zmWE71qk;x%^k52gUg3y|CbLOI$tId~Nzyhjpr4Mq5>FjvRjH!#OCY_?AZg)9M$9s! z@n`2?z;QM5X~YjOvbxE8ufIXZm`Ii>S51my_}v17H^MrTKA8LYEgVp`7rt&i01FRK z7k)VG10Up;!LdV%92^|QjlUBCJ6GRDQ&$ac>2zoA)5bAeM(alpRF@6E}uE!4pJ1I4s z_y*k5tYOK!db~VOo#TqOP_c0z@z1;@_&e5t>YepP;ao{>`X+r+KBJb6JogfAUVY4( z28Rd@a6pgAPT0qLVV%z3hqKSlfn2r-tci%mrHV@2HScbC8T=pm@F6it|PF?&d`pq9n9_h~l*WZ7I8^SC( zbSuP2i6?M3&I@*~REF4_o6&E#6eme6=!z`^MEvv+6w4{lAFD&T31>vPgVhYOFRD=a z4aVHYJq=jhuE3q%U&y^OevGR#`{2TU-dDYEJ*uKS^hL_T_(}-~G@Zfah-Z>z@8ti|0oPfvSFqZ>xJV?Hwz8nB3YL?7wY6ixu|YynBTJ-{I8^et^RBb zs*K0CV`DHRDO?~w-jj~bno3vOeYJkGN}M_`h$ezUEwH_3iJ!;40R4~W*zS%|g1Q@t zI6@r+roRM|X8x8#0Oe)y?i#6x4wO-9K5-S`n}dG}`DK{c+eyP2He zceq+UUSv(G36P*s02dUriB4EFpW!!!qu=_8>G_j*n*acK-^d&ZTE zt?NOvw0R_2g3^4g&7d^&8>^*vF?A0;T2#>sV#_U=&ae(ml-NlB^mxL)XBmQL+ILaJ zB?5@5I7Jl|?yOF?;6ck?aN@mu13NYGO8Nq>A$uRr+_H+r@-e@A5ySAT{}|npIE{Xf zUIB|Ip2d-mg6J9DNicCR6_*-5Vb$x!soAsts8z5h7$};e{~b*hI;)uBMqTQ%*cLu! zMgji>Zo-rM;I(HEvX3dFw$wNL^TLitBe$?IDy3J}Jd~$@6&rDzUJS4m__^d|Dp52(^dlkgZPGatU0MPRpa2t-fy!3*zC;yL9!w&On;PCM{2?mMPVl_&j# z-J5kdg&mW*fBol$!>OV)cqPx$X_erk}weob;W*u15@D?iX?Zwvd;@k#5OZvOV3FCG+p>w$wXbR7v z$YeG8)+1i*OQeQmm01Zc`%A;ICz<#qNs}{LOt|iwlW=BF1rDBy!RLk>Nz`mlSk>AHZ^hPe zSuz2bvuiEwUU8Wo$>DcgI(Ol)ZycK`{007xYp^YAC#TmFg)bX?;CQA9rkAcky+6KO zddViHexZWM2(8g;Xcvx%eT9)FMr2EAJ;ohDoUCWfGcFBr;4WXQe%Q!qWeu}F)pKar zuLAzR)!Dj>F`SN(I@h3o37qDT{h-YwcDdrVpKMRrQr~=dv!`eL&P!c#9 z|NW_Czq>brY4!zZKAOOWpygy8UpLYRPjek5MH>p8m^~>qT%DmATFsx zXK1_uD7X#&vI<;QMl$3dY6n5SIHn&OVh1+u^e0jnQ9zNFR=G9)uV?A>0 zxynB9{*ULAs>JZl1}$!-qZjvk-*x!W^@9l9Cc*A0rnK&XJc_Lng7>G%RAs*qJn#O6 z`*QA3pmdMD{j(jr>qc?=Yb>DaZ#(4A7NyY_2(Ger7d+^1uX>T+VdW?`6|#OAWB0yq z!oDfL$a@EEXx(=SFCWvtYv$2pk)3Fj^$`aA6hVKW5f=q4;ll4u;{tZ>rNhT% z(XZ+ZD&C&TIeBKFK#S)*oDU$wCr5CW%jCf?xCCwQ`$3PzDEQsMvorgHaphP|@`iWo z1vP8Kl+znf@5fdgsT3mAZPUjE$2Nj=a~*gToy4%ob}(#a2L10+Azvc}Cyj1InU+fM z)RgDK?8NDdiLYQ*uLoSp=iS|@xi%IBt{79g7i2J+nSMEs)S?bz;^pw$3jVqKMv!GD z%|x->m(;9Chuz{AFtc774u6k>tcugPEcB;9OgWVm)YK81>t3LntN{KA*RW4*114{# zVCFjmg0pqF4$g=ZaT~#%{<;_|X6&I+ay+L=G>PlC?f~`4yJ^u689FHM&IN{~^0RUc zN~Qn8t%A*1(dExf4iGGA(uF@y2eDW$8{2(4;b+k`;^18fVw-u+z4}+Ik6TMz`MHk) z)#Q4PTqHw!Q5bZh2(ow=>~8fI{9!nmwjYqAKBvp8wksRclC8>g>K#$~Zn7`T`8)t~ zboyBGGob*s#Iwn(u7mI3Vz%SpVobKkqy7d+Czpwl@B~iSZGH>9ZWgk%bK|)E-B(d- ziWm#~u1T-&kOtLub8bNx;WT!RqT|kb;@0}tLb2lCMC)A=s5I(R=Epza{&~!Qy92BI zWNDtQ5aPEg!KQR^y2bqqelrimlYw${U8=2sQym3Mb}xj(&qBHK3GsB()p7LKlAZMM zp|QB3RiEq397!L}84dOOcjNOGhvk&p7sHD zFT6!|gnk2ucO$rz4RTN@od=DUm5@4TA*)Y}Y>c3%f?OXWjs)08D8DiJWF%Uf67Pek#hleZl=p=RB8A&O7IJv6*IC;i@ zIOAIZ*;LSh>&;Ri#deP1o<|dkzVJgA+QAk1O_G!3qBd2I{9~4G2 z`?*DUxak0~yrqEMEyIGygb{S9oFiV*%7I0fMCjPwO3-`9GhUSJ*_tE2aBinNwvO;c zv7;C9ze!0L`C|mG;yKV?EcSu=(jO>qY>b6UgM#R>34Epp1vhTX3BK$+i%v3a(6Ql~ z@MhRqJnVZ8T&jIwiN!c}59-L>!k4^f&+MPO3JK{jpKV*K5I1jk*hM5l%%DBtjg z=-hTg(WT2^cxE5Y5{rfTaYs=jq8dY1)WG5q0ikzPHIl3JGZ!Bk1~ zJa!T^1IB~g%=zS`-*oO&9q(1JxeP|*m(sC8E}WL36E}N|J6HL^35whmI6Yn?pjPvQ z_e_rAT(sU{NRu|VE>uV#K0Zpy>y&ZC=2M)7?=ifcpG<2f{2&_lL}SddIVEB1Hj4Y_Qcg-ev!PhhgxZyvV)Do^a2s(GPfjkyne`qhZ{2`quAQW=LILkP zjNyiy*N}P%N%{suVdRpt7_77sQv6g<;@e&9^$5aHi-+L<{4T7VFdF=&~MjYS|F*^F9l{J(9i~(xyjyrJ(M60D3;yj;r=$!VxE39Lbq8 zp~njNy09I`h$pkS&_FWPSed)@^$7^VCSu|2V0@g`2j6lZlY-dGtp9T`(t#{UVX|D) zzd-g`dI^l|2qU``-odV$B{=R>0bYD%3O|G8IkTgp^nlt$7=JPw%WK_9veq-U_->=% z%(fWR+mnqopWnfsmG^PVtc@r!ITKq8G~q>;R;T8Xv?u0e~xG9Be1BqE*HaN=wU zxNffuEtay>sK^KoR7iluLq)DPI~?=(s?zH#C)52KtH|Zua#U2>l7KX`}ViVid%uqN8g9Okp9Md?KQb?9lN=8fhHWM7=>+H8Zp=` zSNL7cl8v^lhAoqei0YH7tItgb$;PgoG*5gNXgR0iL3J~%`;bYk_&sCoZEb{o9S31X zrlhk#jqVORiJI=UAag|u&Bj^@<3oMeTEA8ByJ;b@yjBA`d`?F%p@H-(-61cJzlAYY zn+0WYsqmjd7#@D82Y&*EHtQmE&`3HKRK89T)(3Qw=fV)C`0#^JNqQgdDt4f!I@IW^ zF?sN`{td=1d&RR+TtW0pIl5~3fa1CyVb|0xgywbOJdJW(1{SO@Zz}iID1#-t{J{6I zBe;FCGsw|j0eE2jYLX+q96yF;qvIr9q^5^pL{%Zved7!vGHrsN#`;{8;v?oQSq*Im zM8RFB77Qm^fuCg)o^J}N5<57L(x(%_eRVk&JbK9I4PLM#XQWwm-CpQVdjQ+~Vo|R_ zlY8&=UErkaC~)0#1wF$8;76qy$dHY+Cq#zRGv5ZC*K;7yaTML?AVqIBR6>+v4Ho(s zafa`8IPZPW;BDVXkQX0&^_{jOSz2C>NgfEcOVdbr!D85Vr3LOD6^G8XU%_O52?pO% zMKNvOwcRl&SQ{4%#=P^leED0O)FfGYG{_$CeW}oRZ5t8Y%=_c>^iguAHkKZ5M5XUv zgfm_4z@z_w_Y#=%ZY)M(AD)1%x{bJCgADhdIPY-UDa6U%qnKX^!{wWbu>6`O$h*BD ze)CJPby6AF1=wPHxH{LoUjz5fIgWSoCz1vO6`FIRN0=a$2t!@Ktum++p4rkyLJE7p zTzCmHhu*VaQ>?g+k#Z_V@(cqB32WN)HiAw0_lf49{p7>$xUl023OZsl-~A`_XO{UJiTc| zTIT>enwNo7_KqRX!^By{*J9|9D2EiM77R>0PSVy45~l8phPFZA63TP?5)<&A+h0<5 z@HhG7JPv1w>w-~x8u{3vK+}A^penW)<9%o0F{>QV*NlMYMisd0*m^8oC^1$*()4*HP z9{f(cVJ@Ri!f7`t&hl;umTcGz`&*NlDW9EfyPgO^CB5XTfg7Krf5XPF7Q*J8PPpZk zIjWexfZE3|1-D14;zL=!#%M}nHUVR(Ws@Hq+AKzcub6PZ*2KX5E+yzljUhhvL;RU~ z0hAQh!?y7wxeY(_tNc>7ft|A$T`rdZ&0CJ)E>Ty|uJgsIMP~$A>$T9Jl)?Tc2KsNV z2x8hi==b;u}h50HuYuWikC2_rz3#0i2_%4 z8&?FS0WlLm&pB1p4NYT3*M>>y|zvGTBpD*jgBS3vb(58YuB^W&D2QUX zKQ2MwSjbSmGbn09O=eK|52)}EV6-+3QrA0l>OYq=`#ZucUF z_rE}5l_eKBQi2ynb$waLSH_tRvEpdYSD;ukbP$r6U9P&WJNF zuiYTkA4huL-^ObnGpk;B^L=oWS8%jy0}f2_;(V-)xhLIv=sHc2`y{bj;IfeSFxzwh zZaj;zH}>G*pfjhoCLdl5N`uzOkzD$jJ!m<}m?^tg!I9M*ifUD0o`F3d4d=i0s5lt> z*a(kv8XzU#0g`x6-MzrqWXYdaa69hBoSrDqche@)23Esnm#u}+smnpw^o9r$5@EnR z8Ure-;CaRc%rA4sU3WcjZleMm>NkXlr6nvnc7QE+aK-H7=}@yN4FfmcVGGn|U|P&A zR8iWBPo5aU`ce60Hh=bCZ{T3WzL7K}qJxbuC7i-M0r!JveE*v87nhr_Vj6+-K$ts7 zWbHOH<;R5(&}KnviX}M=o`W;q#Q^*H7`{nVuw7==WICU_d{mbNPqgl^D4nH5;jsa^ zKXV_BkF3VHcddB!lQQ-3_{*;DRl*uGNm{4W4M#{G&ueys?W22eTJbE{@T?x>P94SN zPwlApIKoXkVnOZQL!kR+0iFuGFKCQcrLxNksm>OjKjx>$4TUA3!Gg#5GBFAJvc>Q~ zZ6Nv&4BBiv8;dfzid>UwAWG{Ka&)%_PB@ncgIqUAT&aZ@=Da&zk?% zWjuU0f=$ZK0g*}f$yCL&WPM@+I9v{gv6DL3b(vdm(b9yTAL2Qp)915Al{-jfy$7^R zz6kagri07#?=U*L4qD}NVB(^9cNQv!l!z*) zl{f>VXm0v7akOo}gMvfS^xb|98n{IQthcO$Wm(mb8*WHoof5?zmtdOq1@a-S5gZb| z(B%7P3~+aWYqFX!X>JpA|CXn>L)U=fPkl({dG~tTH97NnBiKU0KGu1!6#r$s!bZEU ztL6XnVE5`b|KMoDMKf)rF zbHcse9|-%snDjlnf`27PqOFw=ij0<_Eq|-=hny7mwfmniT0e>$ty6$`lN;Hsy^dg? z8pg=nS?p=~8}f9aELXnN6&V9`D!oEbL@0!0q-wa-~n%#>h@=5JxhA5;Q++353a4FtqVA>%68MylIMF3bz27!qWOWkLR8fT+X_IhckQyq;ntG}ltvU>` z`1s*>Ed6Ck3pVYh+b>*2k4xqFTg{4$^|pg)zs)Fy&*J=3Cvs|EJ5W(co(>&3iyMV< z&a=wUB7;JcKi?Pnk{>N2#dYSZAt&FH;g2%IZR;8lS% z&Dm^8foBn(+%SSw>|YHl`3z>+!U{|ulg`BrOLI#`We68m{$)3oDA3l1Dw?-MlPV}A zky^J%oO-j2K3?Wc$2uufBfhqH^3n$#UtEDw<51WTScc>vf>P*Yit-!q`U0LczquRN zoSKUbGi|~9RyMyMuEwq2brWZ8j^=Ls9mOerNQcv7meM-|4m9>_6u#p9J-fP_i0@?y zs+W+2T7lAB%${&Ac>Y_M8PtyIDAEp0Z)(NlmUId4Rl{kTShmS3OD~wy@ zjM1h~nc<`}+-vCq+*+`PGk+#Z?ayyh8X4GFCn{!W=quvLfr1Ph^`t_oFtGl=)K#&L6w<-k0XV&VC=i#T^!A4ZLk z;IaKt@=t*q0dAZo*!EnV@^GI7Q;u1Vo!JAwN}Eo!*pIeQ~{3HEN+MT<&# zPF+!wTJPdr2JzJ(lbiu(w`?ZS7Jb;CorT?bh3F+V0~TzJ!^4^3_~Yjx!Oj^0=$;YH zen^MFd}UXBZa1@TgZI_r- z+DN9fg=gKVr-S@?{_M=&Cd@7uhX=;acwyFNZm?Agni6G3nC=kSI1H~9`p z(RPQ6~?IAkBr(pP*7VaL^j-Pz_UP#+UI;bK-XZ+RS z_S}--BxgIJrSf?0@lRjw#f;B5cfv7l(?}K0dPXpyfeYtme*kS>h|@#wWnj;(xtz?{ zEJ4gEBNXkBrhh#4b%3oEB1g;2}Q~mx4pODeOC+ zt&x+6LE}zoJm0?9ro?U*vcEp$huk>osG~uLLY4>yM?A#(&u z39ilTf;);MVZ`Dcq>dWFwT4CTW%z~gu*L~g8s&fsFFz$Qt}DQ^>pj}e{w}CgnF#}t z(dc1+jJ5r%M29&~;k8$Y;Mx32_%ba5Z@c)w;i89vk{y?XQxge%*BHfVUcAXn|9cKk zW4w`jAC7KX0&eNwKG-T1L}w&_!Li!LT*Nm%13X)uKDM!e{R20!r6C-3_*yASun9LV z5TRFn$}s-=H-XO@QL;ii36#eaK$Vu#izFzpY-MZKanLY31X z+B*W;GXqFK_!^v?thZ$un+t;yN>Nda`XkU21hk*nc}uctJe{DxWa$dEJY9 zCprrx)*Kc_6=jn5MdyjF?IKR%YYqN8KLxeiQiz{HBR<}1B3ypc344#v;B-X{xnoX= zI6w6f__TF`S&cGi##|&`+v{+<-+=YyYpO7AAOTDb$}xO@pg?N*6WDcnFPZwn6)XHU zV5FK1*|5#=Oj`}g3k|ViXa_onJt143jpFjP-m`ssQ*g?jXp}ETqUYu%m>@cu6O5RM z?}d%Tir*iyvpU9{z8uBZMSq1)zQwVMzs*c;d67V7({W+_*$kYpaVP6oI2*byCo&0N zo=rF>1e?z1WBr0C(Dm*kgFF|Cj^B;bub0BvxakDM!$33eA3k&>@Gt`4_oLVNV{{u4 zspq}m8_zO_-I8eYaSWF~Scw13AKRqpkEd%D#5q@}i0m?EM@ zyndWTzeZVphs>2NT2)4_Y$+fu*|mb(*7rf}dNewY7KIBdKEqJB9Ew|62#579VA4aL z#r5(jy1VYciK_GAP@Fc#cPzxILB4Qxl^*pk`A!s#8kv~79Lax?LF{<<`XzQpeD@t8Ot{KkD~3ys+`BOTo!u#q(Jj&8Z>(}V8H}2 z&SPr|?AB7H3AH`gr)qIb)dU$Zf9Hq=& z!EJsQU{~sU=+>@ab#n8;sVxmM7Mz3mC5O?4uhS#1<>625IM}4t2yyfI4DO%-F@Gh> z?SA$LJuWJ8-{KRnL+vYSoe4+H@H~Or1AC~Pav7?RJY^O`JHRAsH6{#Bq0(ORxGC@l zG2gQm%I;P}xlNkz=j}AXH={N9{BW|@41|9f~T}iw|>*P~b%8*ulx$3iNKg5jd+^lPzotXs8#Ewl$uxksgDrYpv*$ zSOw)7h45|vNAjfh1W5F(#)tgfYjd6kkq@KfO7sK#@m!f3+|rARH_Yg^Z{ ze_EN$^Xp-uXZJ&#TN)}KO2jOp?PP9WX89Zc)~k^DS=xLq7YZeFMm z1RGsMWm|L1$m{@x>;b?j4lsfDxd`mXP(fNIt1g;Gm**PL)LC^f!?+#39!>@E_OGD3 zriUct4v^2GtGKhbq%i-J9z9p^5Ok)Fr!862vHCRc;+-``Xf4tVd;8AYH2uAS?oD2h zVW~#tc@INlycmie62Y+XUEnjA1ga0m(WLh##MAE{JN8pZ?5oDm0;Sau@9+f#Q&icz zzjL_IJrdmSTn;}EyYq9myTqXC9lXuAfN65dkX>7Xudl1n&o}>|j-5B_-=K#czE63t z0nc>_T}iHPtH+3y*HD4y`GxHECi`qc@sM>2xim$Ht7-^R7!}xQ_SlE6YA1{kRtR2J8u!Dq=b%kJiRstoDXfo|$V=y0;0So2M zu|pn7xOylMe|_2x!FP|~_i55}$IEs?ql~!joxQlLIa@GTU&hY8{{|Azqgd1LEEZ{X z5$~^h!DRQ$VigA?c%Q90XYwgnXqw)GYjc*8)s9YV*?vb-5IP%YCBMco!+Tl9b~$?G zT^iczgpkQ=F2c2Eg_x=tLM}~qs(Mi%!ulp!^YdXJ@V)byEczM8+V$T+!3qum{*MVaN^9iHX3vs48Uo-7M2Ctpqc_g!im*aM>po7_TWMEEVqZu(^B*w zG2qJYjin+r)n7;#3QPPkeC)(2lR`F>Rv>t2PES|&r-O$TT;8xQAP zm!fjwBi6z#fX#Dw{{E?dWSq+wDjTlFwIr1=3Cjuaf2R9%0|ONP+X>gdNeP?%gTeRW z6x?vyob_~uLrIGyK$j@BEd2*nYb#O0zyKa*j^QlZMd6RpZ7?{|Etv1?0OGG0V#aa= zxfEt*tcvODrE%fsMX*cd2DBZ04r?ttgh}reaO}BT=(M~Z&FuJ0X#EFvdDRPHrdvH8 z{MX97mt>;D&g<5PoM3k4Z$GBrku)HBixT0$* ztT6aS1UIvUeV=9Ure7Eo9eo84yaSk><{6al9R<28nwhS|bh5;KHKy(S$iANN#A91^ zalU6aTbdxihs9|al-(n68j8m~>XbP|R)V?IT+Hw9WcPbd1MW(KRcq{VTU8Kp(%x`l z;a1#b)+5{$;0ZLg16t<-ru|eW*@m-WJD(RBirmV^U$NsZ7~Vniq+D3*FcmBZ60pxR z66gEbV%IS}?u6fCT$&)pnVvM_N=KNIiI&&ch85|8{^oj+wRniDwx@#bj$zV#$Oz6~ z^@5K(?!cwHV$?b46^!*x6Yl(Q9!io21w@U(f=4#!mKr16yfl{F`utYtn5u<09vQRT z(s>wsTpofZYS0;yHr9!5%9v_cj`?!u$@8=Yu*{?fO+|FkSWT85-Xeu2eeMv`6b~us zzIf|z8a6$-iiJTLIB}gRtDYUq?nSrYjQiF2ex@b#{Np(eqf^jD_?h?7XQBk|L;J2v zc<0encGs*LmTmWDV@6E_-CyhJL9>x>2188!}`+*GOBYiCq(iAERgX3X~qVcn}6F>~ZxaIjg6 z&Xzeix!DEIMUKV`69chk%rhlj-8omu)bzU>3%l4iG#Ebb!x4MiB}96ntpV2!}$&xb45PAtqC7cN(iCZJVj(-YL6SS)zX<&Eg(N?Q@tN3LaC6Et2st{5bMvzz zEBPGAz=22Luqy?n-OND!X*}-UeV3eE-v)Iqp9FFizo2>NYc>$mj$R+0!3(haqv4%pLv0m2(!T1><-C`=l2vXpWr?zbzybBIr!}4 zJ;X|JAo5`XWS{5<(|I3ZtIZ_H=$?pPlTEqBY1*8}R8P?M@q(}J1s{IsLic6~xKuI;>Msni z`**d`yg36VyI4TS%0e7UJIrD#wqjN3L(&u*ii=9e!tE+i?(XJk;B~1K-fDZZdD(Bk z|J4#q2~)&p_HK~5?E=2dn*M95cFY3{j_2NW$m1)pPP(}RsFWXCN8DY+MHS3oD|`bp556aPq0ZVUQ9 z*)9wzcvH1%a}xOO2!|6g;Vf6Vm}Fm8qBePwaQW&kKBxJcReNf3zLCS=;64b)0yJ>v zy*&6`a~teGh|$i4rkJ#_5EscL;1cm?7UcCf|gMN;eRTtq)+GYZ{)|DoZ6#Pk@!%DxlX}yy}Dg1&l4Z1Y-Q+tdM|IOy7d2-(a47n1~7IH{B z8=wCBBwV{`3NF{p0MpX7@bJS0INkl8L|5@ljGSoL_(T;dhQEW&yo0#G$rz`Ot;4u( zSN6@O4^Fj*@{EV znBTjS(T?W*RhuEwsaH@q{j5M*?>$QJ^V3~>VligBHvMYMp|E8>xxK{-?x|H_Ph}~L zo1brOD)|qkI1Z<*E5~!9VR!;ZlI~1<99rXoxBX8*4!_e|bX0`Xm^g!t8dC`U1;8}Y z-onO;et|NEfM{PDTrF6H&D#0!+2axDWC*C^haejN+ZYZeD^QV@9n81+DeD`b4C%5> zI6bQpzxbVE;_cJ9k!??K>Es&hyp)Q8WdY#w;uIEHy~gB_W2ieX9pgGwN$BJ2tZub3 zY3x)a?M*FgyqhNPWWUGeOU%V_PM>TFd$*z8oku7a7Dc{piigGPq|x9@4_a({MO^)- zu)C>k*tbkEPA2nUetKT(^OXGxH!jdlfBv)WdJIKP6vy=bUkp4!0^%i3@pa z#vC5`L*$b6+_i0%?Ed-tcx=ii)}87GK1p-Y^u=5V+awKlPj29Injusxhl5J<1=KA= zg1Y$Kfvb(4)H44$Xhuw+5wGm2@2iD);ZGM-WvXCrLMZ-|bcLF{^>FgTOj!5^=&On2 zL0|bPoBm@1T#_@RMpK34zp-f;*T{3(WKLlGy>z0nb-l3aryOok=p$nb)}Wn42Y&Ky zWd)+qV026ayBqlXdTk5YwEZPKwktv|`YrrA>WF<0uE1!DXuqrmTUzebhj3S{Z6APH#D2}RNEfu!WqA?!cp zibk@bc)|V=mf3~#KFI)I{aJ^O!AFe;WK}K+O&EE zWjsT#P1^!={>nk<`V2PTPnFt#OJHMWO@YCz3r+pCY3RObaAnha zyfS$mlyC3Y zs!eb3>VY$`)ID0bpm;o&v8WAdMNbo_9i41^+5(rmFWm>&GaM@Z1F8BV&mOX7IdYkv-h8y|d z8_$3Na0uUP>ON$EUV$zB%6w~;M%g>tQ&XAuB7Im_d38QFD zqajJ}ISzVu~*X2HScimVAEKzz(IT;4a;6eBzaY#kU*5da@ZbW!hoX zm{_>;tqama)uAnT1S`#*3!T-XFj6xRZ7L8)^7~h-USxphP8&2)V0hMYDo(S{C#lE% zL5}Ch9k3Zo-@dIUB1u~bH@^;+>efOBzjr2<`;wLVUV`I03xugt-w0Q%8%d`x+9aHn zat4O3|7H50w&40-`(ajUI6U+|0B#M7@G!H$%iXQ4@T({czpg{~+#+1RJ{}7u<_CWdEml9&~f-F^ZtKU#-FicWaj{5kVJcN|`I z{lXm!rO2zL|AF3t5U8&X6KpamL)A-JJSX5Cc{1uTvyiodiDy=UZJ#l8jh2G;&Nn2= zJPHH87%-pw8>DspcqDx8+Go8B-c3D&F2{9*o_CI*jA$S(yBG|aE9!CTR&hZEK7<{L zEilOMT{RnRuyLMs77S)Z;tEOKs_({%oDc7U@)v%CBOm_YU7~H%>5xo310Ox*=W(m7`AT&oDA0Io3$JL%;{FIk0D%%$GH*7<# zyAu3-z!b+@{$QQaM^L$K2y?tA;^Oay7~fhd*cLn*tQ2;U5%$-GA!9V@vBq0aTzs9m ze{Et8=P&aaj0TvYv)IO8T${uWsl)uEQrPPhiH3W|z}Wo{t4@BeB-$$1s#ZM?geb98 zRM_$pE=)6K^7b>qV5N^u#)0t|^0AOCl!_#C{uRT}dIhdsHW@=C^T|L~9nQ^m!I_t| zcn0n+Y)cx;E#>P%hc9mgs$4RQIW2;w`~30Lfi-xsh}#2&zgASPYrdD=?~ z1TU=0SQ+1ESG+1oPt1|W;Le4P`vlBh?$AZL5<{_AZgo$ zwsT`)tgauVEx(UbEAI#jO^ZR;!F!|c^Z7RGOn6kh0-FrA=+_JNu<>7xV76^2IaI5I zVQej)f8~hz4(iyo=&wNR_(zo5Qx1NYr-IqNNRUvH1gW>hF!Sa0D!IgiIM7l;`a|Pz zK%|=;*t;EVhp!Q5(_nnI!4o5PO=LGm*9pHhi;$L$MgL>!y#KlW-al?;W@i^EA|sl- z&UH#s6iwQBYY!=ikPmg@Wb{Pz6=+_-r?U(a)$ z>+yKp9~gEwMzq)e6jan1fmv)c#Jym6^^!4H|5JnkANtr{`DZwAp&CzotBG6YUS}kv zQ#}7+APXw$7xio(1ZzHYz<=#>xO(Xf$Z#lyNf{Gx`jP+e$FGy1CfpnPE!=U_;4f^% zcn#9EPm&M2{SG{ywV=(|2C`}3QMh9z{G2zzV;b|(`GgNB`X%CY`55SHW&oR??Ns1r0fjYPs%sWej0q)5Fj+>pRHQ4RWYY`2d>97jUlJWb)<4T4FaSlgxPj zmE74-jG6D`psI7i@`K21{9(WD^>r8=& zIcv$ub&`DMv!$X+=jkknSh0pvC9r?Kkn>*}g`Ept60^<8urPH7R;||J9kwH(<@-N8 zdFc&{8hL;u?V8DMY%PZNVTbWx$!p}+3$bi@ia2V*0hYhH90t3(GUtzTaG`?=SKOI^ z3(~zsPQQI2zsUg9%VI%F`!ZhHrX&gp5c;+!AHuM?XUP_`fhZxY56@>=v%R1R2iFBU zB<@>cy$x<`@6Sm^Ut>;(?AyAGO1=3uzMeGvDzqm_gso3vpT+=-XNdGBuF zxs}GS^2cPT9IQ-P<2-yAuL)Vpa@m##9?;R@OvY{LUE@XUu2jG#(k?2#4R)5ThwaaNj{U z7`C?nw&V@q{sEG_;*va{5%t6VMgDz!WRrx8yGqc`HeM_mlFY(am|^DnVmNAFgTu9Q ziO^a^l4|wP;+Q>rxCIzu(ad^w^fQgc5@LtvuOa1(3|&@o3a8wD55;l)I6?h5KJHe) z_eNf*82gkgEDsg3Y0V&)YKez0s5#6nae{2;(>VL$Jlv%*R;<)GfL<+5fV@L47-HPZ z)>NlKP2>WBQJp~&`47CRd4lPuYq1*(?h}9GG8kiRinDGNqUz{+cJAvI;&Ua3+3FbZ zX@A#15uS(2_?M97q|MGl3iSKi;NUmycz2LKlr_x7pS+3Ggk{;k`pJlVWefh-s6{37 z47l&FTD*zZ$bgw!n5Q@tn>4Fn%$Q@KR@}jSmlAPI%Q2MyegZ?l2V`9z!qwF?;rFL} z7Swqku4znXso}}2Wq20uy(URzn!8c6^g7N+=lFS&6WQ_4h!@q}hu@Nih}868@!kzX zKt5qC(^zA|rbvHdIp1Wtg6#^ZxTAy{P335C-xT3_n@-A(9RWSRW=K3B%^Nbb!P}WY zfN~XjX&!>uv(D%__bYS0xE1v)K9dP8#jrI)g*S#siwAeL;|Ai*7CsQ>o;&xjcuA^g zSa2SERt|;1H#P9~;e8NWevc$=jl#cM-V%BFCUi1%COP|8RV3sz!sX*zv3*IRID79W z@n3cW2i~qC;TwuDa*jNemy{IP_CG~~T}QL?F1NAXCmIVjwt-x&CYQX`!$gVBa8<|! zn|7y&9Mg5lhY!LW7&u?pDQUs@OV5QaKU?_k+izltZDeh320I%UhbxN0P&N7{8-3b< zD^Dqf5H=Um;t4$5a~?(=+DQ@?RDyfB4h@e!AzJg&M3}`?=m)J5tUn^`Cmss=dZ@ztddu*u;Bs;lIn zrtZ#?{X)Z~G zgEzRhu#&Hn7Z3%UCm;#ZXHQ=w8 z;6Plj#h*8nfXo45=OeuZy3Pr@nMWAWE(ym$(LW$9s~;1Otb*yU7sIX2N<29wj~SP} zV7YCh(8hTx*G@liSsFJfWUCJnj?uT(VVhY;;s+o;VHEt?w?`NPROjhgQP{~9Tr0u?PmQ*ty?Zs_UvC^wu-bv90yD&9^BJ~mVvr9{H7BTCWR7i;uH4awRC;=m8D$ zyx`o#)xcGgiR18R;&7>z!9aeC*p+N#?^iIUP#{VF8tAZ+2ST3f$S!=~sSjBNk>t}|3B1tOjZ2CrK>FDA z4qCN`F{D=xde=3u14n=3^gopMWTc|+M_r8%kBfeQA7AFznhaMO}*S3w;#2 zbo_oJZZ#kg>Mn0)P1D|!WvyngZ0AU@KAJ&>RCSTu;ttqYRV-etFppQtok1O+5BO)9 zH@_G17H#is=Z{`Y42?vwDPGUK039y0KDz688O6 z1G~>=ARTW>WIZ0E!L)D8X+a>Z`JfEj7EAil~M*RKPyW)#G0tN26 zB0G1Z4dpi}pvI6u7+*SqhRhwnWvA-$#LT(uQPK%g+hj-&=zhVQ-brl4R|{HQJYKAq zvYH>sR^XNYj^JYHP@#wVmw3v;#r)mYsXV4J1fKnVjQe%W_KC ztUhg6cXl5*cTM5nPoCf--`1dmLJwXZyB_wJ=iu{iTD)kQ4lfn`br5G8^XVb+4zHSS zz_bI8VPfJ8$nBH{cj0b4ut0)NNIwVXHp=tOP9l6K^mqFx?xYuLZ@`aPv25<$W_F=H zhFrAMg^}yMX<3~rAG|t~t^AXL-+oP`3;n}j?xL+YBT1H~J}yUH4{Mg8Hx(1tPh#E^ zt?9QIhKet{v9{lipUI?L*2aWxKB>k1U)yt!ZTqm&a2k&@vY@}*RdDTr8uo4fBdmUO znyF`vrrNSm=qBWU{Eln#+vC9S)JoFGyTMq!PmPzY zv*(B6S8>fz1|CpKU3saNa1W#0tu&6Ge*9NqKZp}qS2c=jN;l==ih z<0jz@BU}2+eJK4PAB%BQ_4vDtEMl=mm!@pgqPtt9=(9&Y_;*Yz?AxsaZ-WOxWXyS- zz3MmWrk)o1TLR#4Z4xnD84I1lcV};wh}SrdhE=v&(0JF;L3!;N%($w=s)Ezum*kI% z@k65F{rXZ?Y;B3Rk`|!H;?uY&UmrhTm4>8_YP_Ch4oqOget4S+=jvV(lTt0Qu05^D zes&1Xed1WNB*TJIPiWhCNgNq7li!^fBsTp;sQS1K zIP3IURCiXP@8#wQd;iO9&66mIv-kvmqe@|HnHsj7Ud21RMEv*gNj!VaWn8jRm+orQ z;J-y#_+tJk7#Q~sT1U3S$O$P%<<6GKcJAq6@sFT;qFUV;z)2Rn8&4dvzQ zKtG!~c=>9=%j6`yqIwy>9Zew@-u%KjjxDS+wqNYr(Et;Dm0|nE7|45^!M5$a&$??9 z@JOsByl{BME=5XXN$Usp_W2JOr7#qKQ{nD%=QK#Il!3(30bED*DZUOif-lWB=yXFB zk2M5Ud=n*siI)esa2-rL6AeqoHnSHr0tx*q%_Qk^Y(M1xVxUP+0-64Pci4I z%o^K&m9u>vRj_fWF{mYcNAnI+!EW4Rp2H*PT9?hR=#UjXmU9D(&hLe>SGC~Z2s=7{ zqYL#t>xi~@t$E`TA3i|k0663z8JVO=ZHMPO-1?!%PaHXmW`XK#T`hytrn*?8rz6^W z#T>MTucG2Lk>u#Ps{)7XiRg3N6Nqe8qqnz7aPtR8P-$j9-?8u(f2?zk4}Y|r))$NM zN|r1?cq$ZEeAL5(XH&sx_z#lOMCg!TE!e!TPv}c6fXtQ>eDgv9EA{nZ(RE|0Iz$fI zFI18L5^ABq{|H^^ro|n-K467+3D38`!y~@C@GYAAMV}|DgXJGtx_!SU{dIK{o%g+z zzUdfCYp0LIo}BSi?Y|K4OOuB5_4UMQ`XI=7EkU(oU$OTGW$65sr^Q9G^Z2fJKQW*m zc!~D_$o=?{ZK_OVSIidh(*aM}m~kQC7~>{ZHkgkYeuH_R^;;p-(({U4>K{~kEl zoA0D4WmT|Y+-~~cCMoJR^D;gto=+DJ4yS8k{Ak2bfeR;P3Kc{DkrlSKd@Ah&%Sr{X zx{(T+t@a@Q?I`YUA4r2eFAH6S$w(}hva1$^I`u@b;;>LCOc(OD*74#5s}Fd{WgI{7 zXf(Hb(gt3hI<&m02+VADkyC18c&JE@b|;U+?2n9f#aOU-zZ1CRhX^dQv)TQ^ExFFb_bg+5KQu}ev1?Ah za96|^rnTFUcGqvm5l1!Q$KBcV*Gf~&USdhF)n0~#LwCr&O|b%_PnoQmKAfxDUV^;t z*Jz(J5ISe4iI-Xk-hhDfIIH^vK(ZREmj(Lb^%b=FGZzYN+gXI$PV_qT2Gk60h@Y_S zBK3#$xY9o#npKi%nOO+kQSlXb^sIzI|Mc;kN*-z*x()T}%Jl1AbAC}rj;}9cQ25%L z%w6;r6!kxn`t(8Y7zFR2nJe+g+Kh+C9wQH&ofvl`#KcYwvfe&mJA2HCe{rkG=h7W` zVz7=F*31D|qK&8T?nB2A4{%Gq?-1GVitDU`V2S-jJjx{D)2c8muW5rr)!QJysG9{e zcc8ZDCVpI7ho7cqkZau$tY@nNe>_o|>j<5IOO9^B-okm%bhL+zz1B@8>75ZB&(&l3 zrFY=$ssQq;=na^x90Ox+OM{wb7^F4}dEDN|EGV&BWP8rCVsf=24Lx-k?hCAfrI|m) zEe|W<+L|ts&Pr+$b2*)X9|dUbD#NBG7v&Xef#axb>AC_X=oWJ36o7*xiKh zp7~4syY3;AiZ}=3oKw+x@dUbN<_}T9J`;G-pil3$$f0dg1M1}}!WqrWK%0M~XmJavTn{g57{i;PCVmTDe)l zB|{If((^4id~7LR?0qBhluiMSo}uEghHsdOfgCQrAw!i`d=V{ZD-%Bd=j@Mq43w35 zK!{ur`mP!XeS_q1*`o+iwd-*V(VPcwYNRL)uEQy1#{5U#1(^9d9L`^9ViTi*_YF4U z(Zwm)FI5E1${H~GM-s?gu;&Yh1n}GXW4LejRUozc)bYF+A9);w`I-^9vdR?{!-ml~ zH=%ducp+vH4R|Qm5AJ8$$ZE?>Sf!OMvZxDU-@S+OV$n#VFl7eJaT0cA$F5_NUkmR1 zoCddLO=$8zb;yspC3vE1h3*73x@eyQ9(d)(i%YZ7JiHvA4;@SG{u@g-N#;Pv4kwtU zVFw)>PO+VT-yl>!B&OSB=q;D~Xd&!TihfD+8P$u>|L*{DX`U?R8{NdOFX~Wwc`kOj)T`A;7>grASdvrf^REOQs2U&6Z^>7k1Uo zA+XZg3x8L*<3vLVzJFkwXxYT$WX^-l@OrfdeJNSb8q06uzpDdKJzN%A<~$=!g3i6u zUY4_8n%rhZ8teHNhV=~;TJ}amm1+eW*WMy}_q`fF%58(ppWmUnbG^{nlK}d8L+R8O z%G(>#*_q*cVAv>gx+pygohOgSH8)3#Zy))D`leYd^=S^S8u$VG^Y-)DAS?bw^aKqi zPvTiSMicj0d*P6O1*B|~BFF#KlbB{%kd=K&2JUEqr+I>w(f9?rypNLIQLBmW-#&C0 zUxv1CvcX2J51cYP#GgVH@o`!+o;??XwmRn2+IKvC@!Sog7L34s4xzX+pHdxA2 z{VQ3JyZW+tMz*H?)dhVGH7-Ehf9xY(#HXPur5BSyiDOa zgmz2elg;<^>%*1H9|9Q?14@hc!;Vky9rj+o!m7RIL&w@4W|(nC z0Lw_?=gVpMZEhMAtzQHOEyG#=q(|(xkhz$6s{xjOkf-ZrZ@{5Va*#DX)R;))C7+5@BwY z5+r(-!NpDOLbsj|->dfko6Dbrg4P7wIer@`%ykhfE>0wUcecaz2nSmF|?hf&^R2{k#^m7KvCxgAC67 zT1Qr@?Ss+Ue^}1F7VM1w?GU4H0-x-^3VjO#D}IO>YKpArMCCjj){%naXO%HaAE8U{ zpdydIJ|2ez?*_X~5;SP26b;QZr+zL^&`U&$`s=P6@>21d8a} zM_}Obigama!PbaqZ1`#d{o^v=r_pJ+y{8Ta3p-0|^=>3Dm*4?`qxSvDNak~X1$~h;vjr}V z6qtWMH?iaAve10RBW${9%-{5=Gqul?=uoS5^w`0>@P5=`GHp}|8XWz~Jk_2u%M}K& z+K#ef)knDO`A2r{+d{rt;uvK4DB=h=AxHl65hyWJp0LP(4~?3F`?pl$jb0ZvXk#Gt z&aMH=^FN{OpBC@A|H9!=M=IVJs0{Brj*bzuTFFLW+czxhW<_aZ(hUvwh zt^b7+t>$4u>q9(|Drgy9I%KuhF8Cy5>eCy`asHk0JoTy@Uh$a7h7=Cs4P#7k{_Wpj zJg0|TO*iDzR*mJ}PVtynyb%oyPD12Cdnlc+z_LmNF5&&9ARfFLj0Y?6o_-6E@KuK& zl}flvKMucq>}KPxsDqx^BF&tb7D`w7s_D9|5xkj6JE9_w-q;8fMzLSBs zC)UFJX>u%VatxV#{S>>u{q9}hoNo|VdlurUv7f~6E^5H$*;mEY9kav^gHs@!81dgf1^uBx z1wT8@#>nQ40@G;(jJB%;mu-H+?-`7hDpw&xm?Q63T*8r~%E9BCGG98e2&*4h!lxHg zK_e|i{A2QH_HB~A_)dV}IhQy{RxS&{g8E;m?>7=fZxL3VuYf&K7A$Bg;+4IzxT9bk zBy1D*mU(&5YLkeP+4aoIbUi39*o=SPd_$eR2}1t)8s0H0VDI~m;QJGr*lRhFY_ojH zM1SqkF{+h}S^t+wYxSUPz#fnbb%&^om&|= zRx?_kd=6Y&(Dz;6F`IlZm{OHX+zv}{xqCMmix;DB&rQ&Mbq;5b8QKWNjXu+Zyd$zRTFQN;;#{&b|MVlxlihcz_!tD2O z&L2E!qQ?`zx}v*~GgpsZES|M=ERLBK!q&M8UdTBI*r-+|xMi6D9}W(tzSSM5k+7Rg zFJFT?S?`&0K|gCVv>}nV?_+CH7%`oBg``Z%!QK9+VX1-)21E;nmENCG@{PKg60-fw^_%gepJ9|p|$un zCc{2SB=o3Fo{N|I&ob?W2~fC;3q4)}TXbg*$P7A$3tN03`|&BTF=!z-669!eRJv$N zlQh=UzanV@+i~D@NeA_bE8xZBov>=?JTMne$DzUYXwwyj=`OvbrQnn3>LwRr*OG{{ znkR`9eI_%rokowu%b@c16!uKM0nb#-SbC-%tacGNJ%-sJ)qfH_ zg3K{FT+mN1$??^~_dW8dB-XmC^DM)7riJw^bmCat`%nX9+P{#iS2f_6UOwJiHx{cG z)`dY4W2l^I_7X zINYkW1A;er;F@8=*;aD|k2`w+-4X?{?qU+%wE*JIsnAl(cTCOw6UhIY2R8~F$h_63 znE%a@0$W0pI#1dzo;ocRja7xSPA>~WMvO*pUq=>hzEWH{BZGzQkf3YqKBMi;Sa8bv zTrQ>P4Hg1ZLoep3{qLAa___BHUdWx!;+Mu@xcdh2qrc_?8-5LALaz7AG*!G35C|H} zf`#l)I+5#&Wn1N+;4!ZbqW9u7II920Zp$|k>Q#q_ELI7=K^;2$WiG0hoO$u0~a33;Vd>?%d*1*1HUc`os6dQZ$Qmwjgm#itaq$&(@HuU_p%V%rZX|2{XjX*1!0(cPMs+?Z@KpK4h7dH>`Wv0`U%# zOvWw^wQ30dO=?4*f#GP^&<8laN92_Jmh4fy!N$i+2>Y=saN0N!$Hy07V;6_Y^{QMf zvk(JZ!W?X;si5-EQ#dlRwd~#_NqlQoD)Led6V0!xgqw1?IOy8{W|w(HK{gOnCC|{P zpJDXh^^@3JD#L?5Y@uhKeFWokbzr<^3_jMlhw3d~;nO%5arzRWbLezE`s@o8T{c@y z69s>6gk(L*ds790onK(ul1}VA{}L1<)yb$SOVKanDon~2xNdgmK>2|yuKKVDR|Lu8 zf*}mchv*Wst@7~HZUc@DmV?P_`|Xt<){{mVKYHbM1cYZ?#xL)_k=J?*K4dAd3iJ1H z=~l7G+3_Da86xzw>&3wjy*xbBIh4u?@A2`QYKXelBtEy~e}B^_a(Vm!@jmT#e7C6% zS=d{2aMY%rFR$R4>|k6uX$uZhTnnDRM)HjNH}FyEb$FWDBO2B*fCgTCLVjL<@9=d* zn8ST={T}Hmc;rg(_sh3 z!LP0wTw=Y7{1#^1c?-|rp1+64CIf*}kv|)Y>~;uT-LL4a$!9h=yA= z$s}bJv|BY1xSJ<(nW75KaiKV5;s^1pjC@>Ya|C5$GGSw1HaXm`4%tJLVZOW!Z9K3L z_DDTOi;E}Gf0#W6;uZEisD_*>RuFc6!8mfp2sC*+k~Hr11&38Kbk9C-yl8F6Pc4(> zx7Cw~!!b8pXKTj``UNlPlb>)QItgY<#u1%kmEz^wFB91^FNC5yxU_8o^*sHCUF-;A z+ixqu@hQD<=JlTn$3qHyg6RlqvfYM0yRiYpK@xZ>)|7snb{d@;a?o{BlF$txcm-8# zMAyAj;bF%G{BfG$gM;VDS|SYtT@jy zrYA@G(1(?dT`+6eEx&w1wDfqIfuden<;gj`<*-?|6uU5NN(kH zfIr@w1o|2$$qmzS)H7a=doKNg&qKc83&m;p-03rE^SX~J^p}vW(ZtyLSvB+gf`wCgf_|L<{GdYeWk?Xu8C1sh)>Fb0{24mW9Elb0Ayr&rUHp%92exKy6nBw2aiI z(gWII-=`(yxZq(iSU(pB`5zHM&2zkA6#?(p#1npS68cP0rmc6vD+c)05&a4IwSUH@NF`4lX(s`-j(QY9tOL2uY!TAqEVrDHqpB>9^qCdSmjnT9NkYoe+t3D zof+W%D+RM|8-Uv!12kPHMji27e&Eew!PBw^509$AFE?-EBKJ*LX!%wYY4R5yU&|!- zrH51P>ZhbWKLqNeJK*nwK=#3UANnOp*}7WP?^dBxzk1_Gds`eVzmQ+{8_VX|9A!5S*0FD$ z&Y-L+$)}Fd5qec5z%^GD(+k`2s#P(oaxeq?t?}&9yD9i=j~Q%F7y+58$JlZ$fpz7j z#|uYSK#tHevqoNt4e2-xzbBt&tDRS2V0kWr7kv zd9M-n3{PV(9wU>E?gFE}xM6AB69{z9rQt@HIE00x^Run65cx_x8m*oqvXl5D9D%of)AGH z^Sc+%ioeNRM>_8jw(Kc@Kk2twbNGE|%9rMAWAxy)cb@20*F!clOosbgH8UFsY;KF}#YE6?mh) z*yE7~$Clm4u$jvIgyAvRQkDWkdbRizK?i-c=>#_W>VS{kO7Q{NcUaXK2|>FDfcmaO z@ZKd$4E5=7pzSoI?a-tbSFDDu5=t=8F&0<;xeVXWDY23JL$N|)1uv<41~2DSLv-j4 zwoxS+bXgkw+wy@ep5MpT?@Gp%!@F4Hj2O(i#44WtXASd8)M49GX*yZY0g_yGxZB_7 zXtnJnx*twJ?ZZ}_)GY-aaRRNj3!>Ja0{NItZDj6YM>?v*8QMBD!BXcgmh{WTE|#7%!oaML<}p0_Cn6}uWycTEA#e{at3uQ&-8JS)Ii(0J}m5%C)l zLxHDeqmL}&E496RxArzNZMPW08;(Fl^E7U_>oAX;xQq9dMdHN2mvM2N3CKCPLsFwM z{I_11&7Mn9!A$|N64QuNQ#wj0xf0zgb7;DOkZJ0B598kn9*jZv@xtJ5uuBVJ-?$r~ z=Cv8O{3^szF2PK%K$6dYq|E|yU*Nmzqe0bFo-aOnkqr$kVZ#h3pjA*W^Iga=<%tm+ z0>jB)!S8fR!UDrov~hdAG#}F%DcX~&&DjxsvQ^pxTVD_3n`>8~bHQ#=_aJL>w$)I) zd&(5tyuaPS$9yhq=ugCXyUxR)-_dZsCJ0Z4l^F+3psK* z8a7Ml)g3q$Uq-!SUAykGFKw>)?ywCw^=67Z4ZOkjUV_-QOrH5H35B;bi2N`eg}zey|wd`Wu3L?|k0khl5oLH`ggN->X~H94M;vuMgN+n4?%#>i z(5!eSdQ3^cltcEoV&7%l(PzL1CY}K2n=!)`2`Wg?mLx1dk`BQpqigI)$#P{VdIOfVeA zGv)3uqr4g>^&^)wxQC(Hwpr-EARN+u2cbtwC!CZRFCOMyD{5fsJZ-d)C4ZHPHjl?a z@R)o8ZNcAh7L7T;f_2TF7X76@HfW;Up?8q?(aCAOeAHFf)~%L9&g2TV)VbAIJy5g z_BJknXBuUUIjQj0?g%(`^#R%BDn*MjkF!y?X5i5?Z<(n094@`zE$9SRpzv8lD&O}I z6Q@J?$u=1;N(RFb(Iw3C{(|8r1P)z+GSA$i!w$@Q%f|O>&;cr2uzBQOk|P_3x1$E| z_vxDjUiENH=^qjwwacK8l(mqm2 zJW0?B-?%IDSED*{%KDk0p_KvqO5|ar(OTBATIjd^l8*OYWZ>wo>9A8T6w~)zBx!{U z(Ku-_+3|TG_^q5NHtFjkzv^Z~naNYMN)^~{?c3SKS(8cqVpVFf;U@S6OvmuY9K7@P z;Co3+=K89ht>3c}Ocs7bSEmQKR?!2*NwyWolx6wsY07XSE}R@XAb81jb@7Nwta$A7 zPh#(j8)1#}O>hK)>kLo9Jha)WXwjHxG*a}vKbKuqf70}xqg{pKj?lincl$)u@o2~$# zKXXOLChWnKA!k@r+ckLnpb|9hC%}LFoTvC=+bdFNEO6rnzD1;0;~91vei3=s>GSw2>J{48$B`~mhD(Pvg5tAAha8*nIB1v93z~ig zcE4W@iC-F+W|e|S>dhsraZ-VT+Q~xRr3xc5zKH*Zb>llTOQ>w^Ao<=pEZAEbTb>I0 z!~xAD&|y3V$;^hUu4>e7MiyQVQh-^)eM_^_hgN?0PO?IaQNPET*Q{NJ!vBW;u(Lt0 zm0cvUb1i=U)8`X7=x=eqsq+RBXe2 z52*8f!vMC``eH+OJ$lN=vtPFfEv?mnWfdP$?zoV>h)9F)u{)snZ6GSQPZb&U^s$|H z11o0rS<&cq45yzlf;G!!A)>>Io+AvlN38~nL?xmvPn1Qo`-myg>5VGEm)KhcOZMSg^VR+znLuqP}`O zeKnO$*7L;fL{%cOM2inzuRsrJYjd?tLtxti;SN_IXzoh?F_%AguvY#spDkor27QQu z0}1DF>ODVv8qfuxdgO+2X2(hBvRV zLF2BIg?lD2i#@?4ZA}?O1+0ZZ$GdW;nwK zHhPHADdi=^A|S>nwTw zJ{HTpj^N4%ZBQ)mhI?0glGBq$0SzfY>DmprtVR#aZtBy&^8dlo8(rACXE5LR?<~ZI zkAuwh($q3-8n)SnKyW9z>DK*Vd=f% zYgkH;COXo|)>6E3&m_^G#|k`g*e6l4^*e}s{+Sbj-ob--+i9!wBAr$;9ZqEA-* zbeO$c8rB@P!o{C|VeZ{M*gQXhNP0W)DfeYT$M`n+qQ4DZURwe#PEN$(Oca_P8%q;L z65=t#o2I5#vvRpwarmaiP_HzeTWa0L_VTlkIP*HTs!!&J!o9jQ zd0EdFFqsU|Y;a`ujx~-8lUboc{X{`=YP0OM8Xc?BFpR z-0_&Kzy;q#I0@KhA)jD8OOY zk3hp)ANQLCCyPe);dQbLpQI!~!^L*A-L`<_k1oPzqlNp4 ztU3LD+ZV9$9DDLZ0j?ZBNW5z6;lPR}QY;_j5I1oOObA_%KXl_^)ar})UG)qxRn&x< z@xt@huEI|)P~_5Ue}VYmYoc9~LJ~GkLzw}#u(Wa*yELX->>ynUe#sMIO{W^a^=CMq zKKmJW9G%A#56a=0mKt(l+GczsmyQ_@^|+(*I+iun;I-l=)S7aMtmqp`AFc3bzLr)r zP_hWB@3!OEd&dcRX@nE6%;KoM2!_otKn0UPI2Qc`yAM8LLsJxSsM}{8>GF@|Jk7=J z*$t4ce*{&I4dtK{&uI} zjrpmD_a+^KuGk^)qazX4+cu!m+n3^*&K*x)=llCd3PlT@|`~jSDCqoJ?G1Ov71% zqC_*FK*oz*xNrFZ@;R#q&3s3)G3U=gb-odAbrsx@BTV@wL9xr%O9!iM%TeE;08ixp zL!H8hV3FAjXJkXM{o@%peMp7gy)~ar_0^=Y@z1fzUyhdwDF>B4eLT|B#r}zsa879y z{`+)*gmaxXFF4}}uhsTi8 zgcjyDjl=p0YHTZ>g(^M9!oDAZ(6KhWCSnC1-kd^a_W5A=_SazfJp{Y`2;OiKu8*(9 z=&?Kk1BFWY()2UdgHFc9;g_t@JTW$=d zC;Ts=^dn_H@bX2ms_sU}UAJ5mtTcoc>sFA*@10ppP6(~4cmTPrDR6&95RT8y!-l92 z*zQyStv@vB;^Z`#GiopTU%vYsU%6$iV`Z(J;2f+oAdUSdB7fnp|+exqB!RcEbIWI(50CjsN7H6y8C!0bVIa?pl`q3N!4`qt2ZvbgM;RO z#a&;ztETA8#hBl5{H@L`fcUp`%_|o+^=vjgy`?~}(Imd7xj`6@)#A1f8<^9rR_1#; z9dn#wvCL?MwQth`(z$C4xE(n}N47=bqS3Z|(+Dp(aqS=*c%+Vc%>Iv#+qsro@6HGN z3;tqFrMcjFB$Dr!Z-9fMbn(*e7o=w5V0u__I{Wpdp6$7@T*wTcqZ60hr5mG!qZHAm zwkq3b*>^R3-?W>uFBN>y<_@uyh7dcwpv_~B6tVQs>GVOzb{ets4t$?&Oco`5Vf9Z6 zxZ&g~*sXU67DQ~O*Mu5LUV#Jm{dAv>)zYwj`t>rDHVTpKdy-V^j5Sru*Q_@D5KEP- zb5PPnL_FGt(D$luaKy(nZeREq!}||mz%_mADbhWBg~>!-zsQ#ElMWR3gl(lxx|LL- z;Sfk>HS<>*2e`p?Wg0WB5+=_24=tw1P?^qV=x`T}Nrr zwOG*&>Bm9W(hf@}nb5A@{;FI3_eBwUHC8brHG~m|9*uaS!k^(d^u9P%B$4v~vS)vQ zRbkiRzpf;ReU0Kr#DNRHJBH9ZC_QxqPkU`mWBERu)H0IjWhF58)+wMHIG>Jtp9Kq2 z3-RjMd?=5!$LXzuVX#LW{OEfR1?8&rao|hH{UIo_?qrcfIuRdiY!YquoWZr1mtv>w zCEPaG8n?WQimaQCgT_X~)~&XDU856D zSzfS zKZ7B-V$2RmOd7?5g|)nI^JKVJaRJi(^O@9kX?oY78R)4s`v zmoH%%*6OS^Vie#HDf-?ZNBmVull!ihp{<)w;k&~};Es$7o4a}rgp)kdayA{lo$_VJ zr^{gX7etkSHN@y?6pprd1IBlf@#2v%46DpUy@Ov_)x`U#JwqMdcFv*4b6>(HmCvy1 z%X~W2rHiEn+JS@J5Njr2ruTTLes+wku4x6mUj#|6 zK0DGTsEuxknW6s*I&`rm9@rva6rJU%c3%$mzuLmi2h}rM7e#1k`vK>^_2KKE2YHXJ zDJB~l!kT^kaP-#?n-)+MSRzRVTJirK57+V5X%hG>z54;IW6uBus(fLe-;o;S#7@B*_O> zoq>TalLf6w5zQ;V&YDAmsAr2M<;w5a{H@LC;yw^m7Jnl*7w;4q+!S+Bc@l0nYa&}4 z2JpU5L+Hk06VZ7)C+=~r1678@IpAxCdw%O>KyMh#L z3#ZBx?}2UE5bD#JiB0YD{JBC4oJ{FJxt_6XOXNmevs{tNuRSI@uF+3c`VHeMo)^GC z#|~UQji}Yh{~-0GEdR1n)k@{Sdj8SiC|r0i$LC5vtlHEymYs}~#U|%=qI7T=tV-2p zA1&%ho8CJn-!~AN7I!dRhmEji`Fm(K3S~9+19@D94e0)A!1!yK>}i|^bRY2GW7c)D zOA6(9S>ip^?%c@Mq9RPUmEf&MqA;%~(&|-&CUjY!q}iEDe29iK25o91FT+N_lI5l} zXk8gKyZPj%x?lICx^d``?_Vyu}0&nLRm zb0gRA_5*YAXvcPuuj6q3ancR$(dz>GfeWf$DJ-X!uhZzAY5sir=Uj2eQ6UoFAmSFy zA$-}e>{CiAI-Up71bx0_CN*j@blrWRDt_%j6~A54V6ZfiF4Ne z#&=^LlDES8dbndf`L^LMEWdXbmTh^8hJ%Li%R3&CbNRQ~yW^`c-$j;q>m*}P)g-X% z3#&3xlP2+l2;%Ia80M(U-7>u4?$|QeZ!wxH%w-}rEdYvCw@~#L`#^ijFdSJPPbF@x zrd^}`sB73bF7-2z&VR5CWh^pDt8p;6WR>FQxL8QAQ^2D8_rY(uHEkMG3co&0rH{ux zff?zKQ2TZz7R~#Dx7)|__4Z-p`+hUNYrQ^gDA0$ZSy8z5?IF;(s73NScB0%)1^y>* zGM&=*AB>G3&Re5G@Tlel?%Hcf`h=s5yXy)ZH(ZB4++xmG%&5lI|HNqRuZkwWEV`Kvl4}B>b@87AW4?|)k%T9>L20Di=FiI6Inj7U6bBhbrC*} zD2I9Z+4SPouV^oGgd7VKaY>WWbXBB)=B(Go_N38N&R&vQUzx@8uliDJJxi+Yl1ZMI zSJRArd#S9`8Tci8zmnW`!(1EQKuee1%h5J?WL=I_A1L z1(KeOgn9wpv!Oc^9TiR3^+-k9@cWWgWxO0O8fZyh$5>(Edk2<3WfII;HdCa+I%obz{eR7edR(3V5s@ z1AiwSf(JWpfvM;gdX3g0t$hYC!8Q@P3r@qjG+TJH{1eoCybZ2-+wsy>AKret5JmS+ zLxFW|m1bcib^lv}S*rs{(Qzr7b!GrnIo^W1GB~bHc}vEk86`tj(*Pm%`gVUVN?U#a zD*p_fr~D*YolnS;Yl2#L`w;;XDMyW~UxJyeJb`yX^!VZT5IQ*q{R1OKyf*{hEQ$hk z{c5Y%OK0=BLKL-6RnV^G8)90rJzYCnnK#N!=97=s;zZ|kn7va=e5N~*M7{EbQB5jT za*6|Vuh1km2h8!4_A*fUp+!BXiNIvLE^c*Lx9+^LcrgVzYee)#r|1^g*J~&GP9tJ}E z`8#Aq%u2DxQ$;!_*ODLacn!B6T_eLW10zP+@s@j+Az5-0n_(4Bb&d>YOP5E&fc^|x z7+8*DI}<>1w-m2AVZ}Vgn9*dzBzmz!3MBXMr5h91h@ZyYVWpGz^N)q`Y>!o|fK(RY zk`@Oz^a`9Kbj}h>Mck&xJ|Q#dqGp+$)b4i_ks@0 zJ|O&7GrT}1TzGzbLU`7LCv4ZzOAsKSB?cyWQ?EJeVOj200be~9SO1K~w+F}YG9gCv zmTA#J&&;4EcrCi^EkomqOBh33`2d-d5H({CHytTY*SdcuX;0E|)`xg@XXXPWYy`aM zjH5b==P=lRqYzD3r!!`ZWK;h<6MJRZ@ta?TXJ+JeIB$3jwBJrZ=TXHVw{9v89#{gp zF70R*GlrJG*+N(7D?q=D3S8NvPkd%;((V4g@Pg4N5xeP#-%szxhwo?75CeN0yi}cE z$xS1o&23oRzkrOrx}K`7&xYM43;F&}1DG*(VXEf}oKW`;>N2W`@s#DUvUE{1bfoP&F_ACs3q--73VRxEve8l*%Vfk1x`tW-M5F8wiv>Wl+e zc)O0xU*^uAmucX!muE=ZwFvsPBOU|dd__~Mm0{k68n)};9oY0}JT41h#4A)4N>7aj zvxRq{Vc86N{dps-2v3Eq2vt5z$DY5R>5G{fI{f`qHLBU8OGV~$>FC+I7;X~IiG245zpXG*$7bDFqjtj z{bf(YN8sRQW&US~5ZPWm6?Y~K0q2_Y?0Lv>bO~)kyX{phCiJpMx7rs?vO4fape5@) ze}L%f>EMA^acIcW$)la&EGD-U&GecuT|$Azc-w;SvO85@GOci4?+n~*+Ki3wZn2$f z%fvGWSU|Mt4RCwA5ekijtG|@@xMx}#N=$o#k(&Ro%>OjpQoaN#9utVIaS|Tz$^!4} zr^u3ENA!r1g|CqkaAe(ayeJ?%YC~Hg%Nb77wGpu1hv>}o2Gnyo3iljcao58T8ZsMk zzi2PrS(ZRu=M10&r+EnxkBN{|Zi-X>oJGec0ve<99NNzb#5s?&c-`0GoChA~E{`|T zrKj(Mr{fpsQ7uB;5ju!kdgRfs&g4~fC#fFhcCX*GKoRtdVl{}AcH z`LO>`fXG-npSMH@UaN3HE*+YWnyTAD3C@v@8usLRr=hbGa(&3d9a z150rB$|L;6b1VKLtripmKZ(C)NYNtU+UU{VP%<&<8~(DG#gh`|Q?Guj1~Q^dJ2?-l&sf2m8Wu1oiw9t7Qa_7!48c|5O$oR4+oEAE9{Rxt!qk-AUPE9Z)K;=Hn(yQz_qL{P&Pr*sLLnp65XMG;q~(2G_|^% z-;FWk)uGn}rBe`^=ij9vPb0a%^%OdN3*sFQH9luhA@km~nO1J|KvVM*cw=ZThII_% zqh2kb`~U8ut?eyV-UTUqUa&KKS1y9pe)fFG`f#di+Q!V80fz=VI`zmwXgiVvX@17E z13|?CV6~yr_{% z)y$OeikCV~^4>@@ZofoEb?IT#!*pTWZ}!srG|v3DlDmIQr;>3$z$!qW2kTbRAea5L zp;V2o+OU$kmc{e&UjN{GMI6Z6Y@|B|3ja?y0`_NT5yfCf5-%e|f8NN1k~kT0U$YYi zM_bWflQZaq7rOlI)^;3pd=kA#D(TKmQ$c%|JzCX;friTjaMl^eW-PM?m;Ii?dWzuY z(qUx#)>LX27Kb;ZeR#8pBww%n7FBl$n`GWFc-eXeqSwD<)?bZyuw}craoT$@uQ7tf zEf4Uy`FJ`+eJ=2Y&9GHQ7j`uNA<;|npr|-fymU+iEbo#B7<>k{*~g&ynB5SzMHXepWyE8BUrw0|bL`Bmdy zeF-iRCsaWc^@)?7I*yDe!>M+&1RRDWz27c}^X!JeqKGkYqthPO_~nQVonnxySz%6_SW?+7?%Z?! zC0J`%gdcCW<9sC>9v?k`q)MEFzJ#X`b4rI7NJ>!E9k#+bUj_?Q?dZxpe`b2|4w)r4 z9OhL_WW9olChIdob@WIgZaHJMeR8w#9-;=9oDT`;sSBcO&U0Ab@y)#3b0lAX#h7h) zqsrD^T?89q+lg=y0FL{d@X1?G=twli@kxi-m5VE|MWPJZsz%7TXbz8!-ZJ;qszm$E zQ+OoQ?RN;(`GafZu|T>QUYY%6`cZR0|IIsib*qQytfZABH#W0 zVfsM}TvxlCoDQCg&L<2YsY3(0T9a`9)3!GFl6Fzul0|)kh#$zV|SX_gs?^TIt&#@c$>*bcrO(##}35`K@- z!7vYQwf1%}-sn0;diJQ$){|u*HFYo@a$TK&n*9wlZv>&7>T`?~qGweXOu#~|#Om{o z8v=szg-A-GP|(pLxs{m<$CsXG_H*RG)4>$%bYsOb+ec!X#$Wuh`WUQxk;Uln@2GQS zKaSL!h39XyL*cM-u+&EhLq1-`QToyXQh@_1=@)L z!pwIa+E}Y{H|;P?U;ms8D9nZL)fU9MFc@o7Z-MTdjiN_CuaZt%T}+pKkAYj4pc>r` zwe77?~$7&08~&9qh=@yJ)mrmKJWkgjJO$ zsH}VveJ=_~&MD_1f2blvU8;bIe-y#L{4)A=sp7d8LEo_zMG5^p;j3>t52;H>fDD%?07G7<#To31JycvOf@-b`TL<1fQsj|f;ha1dN5 zC`MaDv^vu*M1oV+fYCBzQGt6U{1evGCbwyHz3wbhAl%pQj(bOn*->Vq(dDn=0Fl_c1Ql7{4`ru3`%fc>BUke7!LOI$|90 z-iiUVY2F6hyYmfuKkY72{&86Z2ErV-Sp(GC(jd~XQW*Qr$JhR$_+*tnjMBf#GL19Q zxJLsUjwKkT@F$!bYAqMnx!&s+AB=*GThvuko`O|KU; z>R&>u*HS~BZco;?M92OUkPV`N|N}iif?SFhjzsWq(9V}%#Bf^7X5Q@c9?>w z_MId?GGMePwwd0GQ^NNpQ|P~lYdj)mBzAa5;7J7$y&aka*C$5cX}{yRa{El{Md!V# zla41ku_Lr1y_g2wxlCN!PB7z()>!T(iT;t!tSEaQ?n^1aLGQ9K`KSt&Q{D|{hrfVT z<2^8^=MkCVDar5G_QSSahoE>%0G(0k#b0S2qsISau{v%F|FLQ*C>^(fTl@N;{{CKK z(f*H*kY{}LaXtEngisB`6IfcVLfiIuVMuxj{cfj^G7pdPQqxHM*FTHbZy0I4(jkc7 zh<}E+N<8@Fovz&0U>*Hr;=%@4JjUcuA6iwoiw-^MMF)OA4)ea&z!A?n>Q@nf9Z9vq zzTn6e{r`z$@=R&OisLly(ItEpyn=q*-3r+;@4GQIZ)CTBaM zzEM9_I{KL8yGc>Uv0_lD^y9e!vCPq~NId<1BsqPc7Bjmwz%}?ee|YyeHSkeDcexwz z?UR5YwKSqZQbOgzDM^$Q-bA&>ABMTXQfyg%5!t?_4_>T(0tYsXVRvUW0Oq`{{1={s z7h@mejT46aDbvFL+LklB`rGWU^;p=sJ%=pIzCb`#4#rosi*4Kg;`N6w;jh9$^7pS9 zCJo!eB(7eEIEPhmIBSQn@Flr+^IHSwHTo^h}QnX{w4d|H_FE%iHj-!0{Fs<_vJl!Qz^ggZte(LN;&sUD<`)LqY zbIitF77b{SeT$rUWP-IBcdb6e*1-OP3K+8fCMIrKMiQn+lc??#SR)yUqKnqB#kT>J zom;>)S%Qle=i|)fA?!?!fYGRW&Q6(J!AX-|i)Z98w(9dHZ1t9aI6D`3Aw7`RI{@kV zYK1+xi?t=m6L%qs`tQX!0k=JZFZ#O{rmL%v2b&1dd!|kwu_bu+<8w#|iG%y(FnxN^ zjmPVE!G)R>xU}{R*8B;jX%oM|u>w6lINBXgJy`&Cp~WQ8cK~nx>&e#@chN$l!RVNK znhth#gg_Gr2*UFmxLNv$13v2rpKZ2^+>vYQW!?Ffpq@x zqijNt7XJ`*4QC|&z^slA{SaW-x*e zV#@{eTIA0#8Zf#SSNLesDO2LG2|IZf@=6o76=A<=2>M( zPYvq>F3|d46y&yh!W*un7pM$`>1U#vdjKOyr!F6sY8NN-PmR{514^I?hyr~A) zK4F07r*A^XIe(n4>dtDG`Ve1}Qgr%079wW}F-K1WoFYypBfWD$^J5Y|zdVe`4$|Y* zvCr`3sehudm@UL?K{gm<7h=?Y3+PlCEM$)~xSgsBX(t!i?6PZUC4}$vyM##R+okw< zVmu`J9w#Xt7r?(X0X^0wl7XtX$@BTU#l52x>CI=7eDI${^or8s@k_Vk#ezD~HJXUF z^V7h4z8IgXkH#@scS!FJ8Ezb+fYv8NaK+_h9I|OX`k4#Q^50tAnLGp6JywJpfjV44 zu^LuO)FX*d=7+~@!{AH@X1-4qq}KU}T8)mvN@2DlVKoU4^_P>IbMx@|qs=(UWeeN* z-e2^&BMcRu>}G>IBQWLVKCoUHhp}Bf%=pz>kkfT8uyxN)kHh;@5EpHVc zuMCI0hmTaFXmm>kE4V;g>@&6K>4?mf)e%v1gi|=!+#%#nzG02 zzU6MBd7%XUyUg)t<+wpCi00-3|era%}%E1PxG~%pyiHtRF zSh5go>g;h<&m|0VpG>bDzt5C*8PnAsQC!#aCTNb{jy;|lIBm5SZF6%+(^G@sp~iKP zu-J?0*Mfm%Xk+LCZEh>Cj8^`d)GgJNdkK0+%YbTj%k?mJN@hD~At%PhfZG1@`V$DH*IU zgOvj+;OyaKNK7b$P3H2{V0S+{RhN!r9Y+C|G}9j^5e4 z2+pTWAf-hs(b?+<3?1C>U;tf5>coRgtmxmp3_T>h z@Lks^!p@H61J~_jUwYP}oNO`~e^!buj-OYZi_}1$`yDw%;9hXi4zu=m|J!*Id{r)Y8*-OW=!Php}p&42r@v>7t08 zFwf~8_p zAYhAv@2Szj^S_G9&6MdHZNTmJNn}-VpZM+W9v?bbwYVeoeh8Jo~Ho=q6gd zaDk-tMAuwMuhg{OtNIo!6zVa>Cy%CE`tiT{gZYs6PsHaoI#BZPC|*q4geO;4;;zrJ z?6E?ac-OyBNWO8O9abskmBn(@^3QHA*)$C|ZORoT{gL32%SLfaw|VfkF^wM0SqR&9 zSXVC<@~QRvt z(T|DSn1-D(Jv4!XxBKYo;NLsxvFL2!4uZjrHZSa3qhvj?(G72%JI4#oWwSen%fLA& z7XC%w!IoFQVfY0N=*?@S0lvt6?Ns<*vz^%cY!t2Ck`4Ptnee;g=LlJebKorPD!MeY z6biB{S!Cf;afD7ZyjvQA7mB9fy$mG`do+l<^qOJ+si~&Of{GQ0;lpe31%UY@S<960&EK za^XaK&?B~G=0O^|Uzck;+=4wOIn!!7KR~R)8fafFMQyxZAW&m2CKxTCV}jF{__c!QM=D?=@HN=vb`7BrI44O#k!n(r0 zWYvKOxX*D2w9UWFCZj5jZ;XI}YX^(Q?V1U0hW#*_0EADOj5_%_xaYN$fY8dslVe?B z-DFJ~dL$1`q+ejoBHhaIsu8d-cM9~jSHju|LBsfOE!ga>L`?yQFSB|Si&dWhab5%iF@axuW+9q zEfLKB8XHor5Mx~2na^fNEC9;@F>H|ag0)Aw$n-OE^u-Mm+P>~C`uZjRK9cYQmUd9(K6738injk@nJofydoY?y!!-i z8WXWPCKOx9KzP;M1+C+?Fl6B(I`?-mHvic!TKL78r>FIaUu~L#;;DoAz#196+5Ll* zyp^V^=Ne#xy)`|Ur_PmH%=v^6b$aKl4b3Qc%|5FcV3KDpds`BWzOAG9_7j4-x-|AEQE@nBQ>g2Z$_6wUgX02a5);I)blpJew+Kw=9Bg>OTt zY1n1afP-gfRek~5XnXOSr;M;bW&(OdYH;%18zLj+Y!tM z+uVdE(Q$FOjv`gwtAkHU!|`&lElWH7i&(m3a!cd)IPQ)dA7S%|s3%5I+oyWGyg`Fz z$9=;SZ>Mt$rUK)dRj9?LX|(a-El_c)!V9h0Y=FjIs7;Ze2E&#Bh(F@r+5i2W$qesbjY7#$SS={j%N|PI9!$f-6w^O6YgR5 zf5+JOk}p`CDF^fZ3RPQ~G^=)gtIvI6Wbo6Eqx`k~X7;|R5L;|^(c-ML zBsRsAyS|p=V~y>(>)9f>xTKbpj=MlFRf>4kWkX2Kf6szVB&s*s&&Hz5!FeX*Q|y1pL<{lofYf$JVvK`LOOYJleaNS{@onJ?cxzhNHoFw;>pwT3oal&9vkR=m~0h$VSvLUdylI4zf_w^;)owbEz3b7hIkw&RdJ z|39*ILkguG>a_n+J=NbChFuEDJpJ~6_~>~$*Zh@F`N&+J=I)IHxH)!BTu9YZ7xIlK zETQjZGri&D!)AR|q7&Yx6J4bb)JOUkm_L|K*ZcO0mggslTRaZYTR$~9Jj+0F1 zX$9ovu3~;w^D=rZ4W;{DE`tdp&ye}DS46Vr2lz0L1Ysg$L%~&=Us3&p`V-cH`L;Qr zz3eKCjb*TPT?^dy$fR?YbctO{&$FDn8W=rFnJRwRNe})q<7dA;$C|(_vPoJOM{6Bm zU)Hw57x$sm2b1yN?>bWTaWp^aV?nm~t%bv;^7P1`kAliTLHx)QiEd1fIC1q`l(CVg zU)FA-3msD-!>0%aS`6g{BiRsA+kdn7QgX9>HM*nxnUEmnfMWZ75+u9KUuI;<0l6B_OhxogOuQ4u*yh+A4`h>N(|8R zZ5|(;HJi7sC8#y{IjIP;r@OXSQ~USF=$l2Oxk^_YUr~C69-cV`M~$t6i{FFbUT6uM z;Hb*X3}=$$(QAZ#x)|cd-GrDwohUkcR~&kL9G=)#0Wr7%_SwA0spAsaz%k~0(aA)1 zRA~iW`sxuI6B;aba7n<&vlRG-efpfZ#n7$)<)QT&b1-VmA=APX@WNtwK7Ywg^7-Rd za;Zv&-h5#L;ghZjd)NlteC#&PeWgmzs%G)=PYF+Ja-tcrVmKNuK?kpxLenPyVP~d& z#IN}_bgC#AreBy3RxY>LhP)wsz{n&FdY^37^v4s`>Q-U#lX$*n)EPLnbOXSP!8Gyg zGJb+u(wUu~pdo1*b=!ZCeoqhJ&GS3qV0SiObLJ3#w|p4L%TDLLqb=~#=s#5=?bpmb zz?fdi*u=L>|AT;2@(|UU&a*qhVB821-}OS1{vMG8CmyBZ_kn@*X{kLgHFSevf7L3p zl7sM!qXDmLYKF8~NjSBl4*k1l!6e}y^)>Fo;F^Aej5{Ru`C$^O9t?_*#UUt z#SQ#2FPy*Cn2kvV)*_t|@37GM0^Gb_gwj1FOxIQqPk)(1{oPi=mg~D%q4IL*nRE-& zF&SpRIEhLLqePB`tDKL$KZ*EG^AU}e7YHHGsz&}+%cjl1< zM|F_bd&Y9|A7Gi)6h3|73$gF6OZY7|6zpx)#IE-$$ceO}%xujLnrI}4x>MwtvFr_) z9+=DmxBkO%{f|-c>|}1D>Vd&Kzrg|b737S3Q|6k;|xOm^XAC#@^Y% z#~qQS2ait#WkJVI0v@w?p>q2Z&fNWKmLK`GrkT z;F}HPw-U{IFq(JmcBZX*wjfiwnT^kU!^(8z(X{F?9K37AMpX=<$qw5{_V9@U=KC#< zkoboF118ZCjzT=jTL4(mff*;02Cs~V!P;>{0S+A2f) zb3@58>C@usdotqSKaDU#kaSjL5W!t9`s-2W(rk;4uup3~a>i|W6}aq*9+fa3Mz+S?X8E!Yn91i_QXQ@hPVe@yeM^VH zrX|mC*_T6bAXpFO&NRX2;zm(Id@e3jXcT!zGzjX1F(f$2UF4}Tga;dqWKWax@V-|h z#)o;pDOn*~=wA(nhB90i1@yo5Z*Zyi!JR1+=;{}C%s+N5Y^rS|iDok53%2is>~4ou zUSc%4CS(DlEhpm9T!U;q3L+ z6v$C_f^6q;wD-A)UUrHk4|aFsto=2jbFWqC`Uho9QQilShMLf|BVF)DW*w}oQs$~g zkJ0qUXV^OG0jOI>(C-T)nAqnJnIT;Pm)8uZOT*)FY>99*Z6kP16dt;FzjMta7^+rGukzvPl9|jZi?>h@tRsL>Dp5Q-X{y?o>5PKybBp zz=-rERFJ^Yq&eo`%>;DwqAcPqnFJ~?qsY)kGoJfVm4B`IgvWYPK|%UI2v0o(k1Z^) zyWELO{@za)D5&$T4>_1O33y4lRKSg5s&^;=>)LjJWN#(V?^>`qu`_xHtvq|GcJN?}FjMEutTz>(aE&sBSBuso<=P{E|kl`cJ9+Nkva@5UU zi9V7zEOJz?zz4bxZ2co4a}ZMtR+>??@B4b#)^(Cq8YrT1MgycuJp(Q2V~|w%2PW(8 zz&RBoS^Pl*I(*m+NWNH$LrtGlb=zmLd#{B#qx(?tor7jLG^EICXUT6AsRaY+zT^bB6 zw!kTw7?5A;R&`Hq7(C4U$Vxn(kn+^+IOh3anDn{_4+WM9`3zHhW@C#R1~ouQRynGe zyJPXlmE>k<3Y+#}G}dl*#7n3CL&f#7V#h0jI=V0cuF2d-J(~qsv^bXqJqaU+<|)wV zH@@ufc@tjGYH{+X4J6~WaA$dMFHRU(jB>>z1gUZ)Mo${b`b!RrFRV?)k9Q}d={gQS zik${M%&P6^`+1oY}k)M*Ms5Wpwm;&Jx~8vdv!;g!@NB6iQLNJa0FqO>`mC zM_QrHm(57Jp0LpDQK;@@j{hoGqN`1-sL?tdg32twF}(+7<~0I;dkF8`J`3YERbu!J zP5vZjKdICF2Ens9Ir~tRDvUW#U9aB7&of;3>{a_{{bng#Js^?AA6B6=Oq#H1y(WF< zkdAsUXL8%{47%~(gQ|UTE2u{94b*=oMdR-#(J{I$#O7`&UKyuM&up^8QCXA8>4mGQ zR8u_9*>6twX2>CIc#B&|3}l_jp1f6s-n%QSwDf;Lw~v?VoYzy5<-DaV-TJboyPPHlN3WRvBn$Ldq!`P;r#8Ktd9?-SRU>(Wkm zlN1iS1|OtNKl|YGW&}ecvTzhne%X!BOOD{8)N24I)#>sw;hlQ?VnI7H zid*lyPm(-Uxn#v7sEK)w@nbGA$2N75`;<%Y!ubZtm^+AmUU*u3+HxoU$Tq{T#kp|g zbrjyb8UmgFGjtw)IlW;RZ|^~g_9UVdQF_n)q-11E$cW5HB4m&D(4?uQ(m+ZQjrX}9 zN+~3yGD@VPLPBK4?|1%$^ZA@}o^fB-_j|ht3)VJbM|~z}e3%9=GIDM9Dpi1RNd~t$ zGL2rMZus|)xv)#jo#Q6~@I9^;f2%&@I;S6m`9DYD%W);Jspk)Cl?)6WVNI-M~8#zv@o$E(*XWzc8zdMxEYTBE(qkdn#en>)d2S=1 zi}3}QjIpVX&{nR5GBLhDOM=jHy%u{(0lt0}Wu;)1rm7TtkZ}bLDvCk?d8>=hvvU+$r4mX(N_Y`@x*i+8~r`!h_MVD0l3gV8s(p z%o~oOcZZd^>pU9ie2qH?Io;H(bt&bB#zLZPGX$;tAher6@MrKCUO^d*lc(5XblF9^ zF3^>e+mS0gYP}1U|4hU^9gTE}lQ~!nTmlN|s3wzy>5_?D$+JsBhmv_{<82jTRl^6V@VP9%cey9gzsVuDDwFG0JmHT6? zB%Jy347|Nt!Ofr71ahWD^iydxynnw)XdE7G^U%dr@UQa){yx12^ZQ4j9?igsSy8+K zF^1ZNYjS>{4LA{%P8$173syHr2&d~@;zZVsCix8)@iEL1v}?t}_BKsc|y04`5|LSJ8yAg8_FbK7qhLM@-AjT5ec?X6sNp)QL(rvi_y7U5ZmiO`tZK{YSQlCv8fKsv=AzpQ?S_}mws zB}CxdGiC(-29hhgs~8e5K0 zO1XmTqwP4gqF@Z@$PJh-mPfmQQ4FJxxt#2htyU4EZ2~CTp09q8a-z4 zgKk;6lCzxrkbVpwhBpZ-(V;ehKAZZ6TN06gR|0dn2g^4?t+gWR#x5oZ^7ll0~%I4(F9i$-a(5c~5Lb@UXSUG|mkude{( zJ<_agMi!cd`~yK`Fvy&@gy7?2e-e6$-x$ zhv+PW9;|*o1t1}c$GU~UyC0487V+YHczPJ{KAyfP5y$ri3*bVLGkmiDCg>~8LF2kO z!Ozqn=$Vy^>s+^^$F!T&?5;meR`aCQGV7tMx`^v8s0F(OJ}bNGC%0O9fHRR1g~2DA z(6XdWxc$uw;R5Y5;4kchvbT33k6%mOyjSD;|JHCqTaPF`o3~%`;aH)Ap`w#EH7x4+)22no5o5$nb?_5FCuf^QV zkbJ?3p|$jMqdr+yQwOg->Tr~s4MhDX%G!>Jg5$qdJZW+srU&MNp|K%8I4#9iG!@~I z?Afq#lPH@vm`8iWesa|+H}H_LJh?7b10A79Nuu#M@=-6IJE9T9w6aWK$;Rh+Zcv_# z(iz}Y$)~An!)}{+RbYakCJf}^Yc;?y5^X~C%|?sIu0Idj{H46OTtw=brm?D;@) z^|n3J=wC!PZ);(5Z`whooF`dTT+DWK3Sd{lVv_9Die<^?aC6yTsK~8C-&I9yZ_Iqs z-s=L)|1!7EV?9;9$*YWXQ=n}^82Rizjokh^pB+@LgLIXxv_g3iBue@^N8Z1UB@=fCro-`iI%8%R@>I|sBkU=Ns zG`)p3^L^sqtL?b|yh4z3;xmvZPgA`J8S=!q6@*p^u>4RFPT&24&Qv?YXa6#r!qo1I5s$S7aCUDLte0wn=TX1mNWTS$UcX!6xArS`e24^<I7N!hoAjq*V9Qxz|F-}>-<(G7Hr=9{Hxl6W z&MFSa1VLEeTF6Z(#y_@0x4 zp`<|O+qS}?@#k>m(}$pZJ`;Q9J?A=a29pZX4W*myq2xA4dbJgBlY&0TDTdztZy%xv>&V#-ff6)DpXS+@QD!4c04_G`51lu+1*gdsM-0Z0c0-6FRZ{$L3)DdRu z%@O7Q`fyY6O{j>U&cqJZg0`***l_khmugl0=mtd3!G8T$Uj9wzH_5=UM{e2ch}&TcP2L z$9PsSne5-OkF>{Yl0&iqxK=2_jt&Qqk;Y z`gcYUany1obF2Rfd$V-dglpUJ!|JJ|Ro#<{tkD49{x$G;84q(T42M%s&yfE2bJ%9j zgDhL^GK%#m(!#BSD8H-+8crS|ezta`%f^lMdL_Yy3D1OjhCk>{pJLqd;S!FTd=j0; zMu4yTGn(Be1NRJd$^6DL_BY+ZA2~B2Oz7V0C>NDz6|CbB- z>cv*7^WCcQDNLiY1M2NuxUr(++2cJ%6Sre-C!S0Er?Op5VL+grHS zUrUW}5}wbEXL@O?iO1i?#G=CiAMV?ZoA^7$-q;x7??F{&nV=3*i!;H(XCm09hQMo6 zCDw4Klu2igC6Ny<)3{Y(+?bkeWOgY9mAwhv7;|mtY2-5s(}M8d_6{mm)5B%x-2p>I zz7sY6I;XYb1dHSGyBhQF3ybu8@m;$C%jP?d7yDO0Y}zbtM)m@-@6~dqP~Iu*);Wxo zxjImfC+OB!lDKtO7Tgehgf9Fu1-u=_{(h6kx#=lfmYWmFzIz5P=;)CCuB}Y>+duR& z-_M*!+2P4wE;P)uk6ZtAI(P1E3^R9q3Av5KAbS2Te)N9JHH9d%(h49_d%p_BZ@t6m z^7X>}dHLWyp1*qyzlHYJqe=N1Gc5h;hTHhL=k4~X#MLlVxZ`XNR~Mp5KJXd2^0nIV zd{Q$0y}ArH93KY**9V1Mg9SMm8cU-;d&7t3DQu`om(R?JF(MaDJ}zB?hps2^d_6bv zYQZoFzpt-oXrIJ%V!sFtUdk?7a@X!h^|kN!^LHd-3v z3B7-G>A(a0Z^=}0EIO5V?A9Z;FUFF&p^~h6>?v}m_#W8aUdU}bv5f4ooWPvoVu?Jj zH0qPxP7cgCLGLO|f_bD5MMM_REGb^sr8Nf>y%vJ|qlXY=p2onXl)X_;B9kLLsLfJE zGN~YqWQ~2v?Y{d7M;E&g$<#=Y9v4UwRgZG>hDF#~n?TYtDqHwLq6r@SQYWi_H`A;q zX~GMiZi4vZm1M;J8hl)r&*vpBu~PooU1~bvk-jJVt_v=UZJ)vQ8S?wus8TL-@HB|W zl|o`y0hTSyN4pV&INX1e-fg-FXAL`Xd_Qup{{+*26Q_c$%MCEU#Ou8>RME=&~$Oo=_}W}DA?^U5{d$d#84~c@>>)Q0b%I67#IPq0c97^i zjtzZ@pcgr1@_M8$)6=h|Lsy3B=;27rjH$3$cux&xtqrGry$i8-!E4SgCzg0y7n7t4 zW!vT+DXzH950$mr1dbEa=)8&$B9eBTXkRDLdTcUI71V=SzAS0lu>?)j50el6X{_2V z6%6-0La_tqh@JR+@YGkuO$LQr>t;E@?ZXR%7H(DSnd&mO^!-#eW}_aNqFc&#IlSdW zrW><3iM>Sf+YNGVo&s=FA_XBXwq&~JasfGX9>;ImL=<(Dcy?kYwB9<*DfgM8Q#hr@ zZin#QXiY9mUy_VEP)6dSLvhg4jhryMhdRGosMX^IBr0pKz&c0`EdM=&z2kJr%2nq; z=ae=)Obx&q;cNU^C2!mC$BtY)Jb-Wcyq?UIIqc-qKScKXB&NDQ3LWYy;X$=ExzVY_ zX+|v2o)l8vi22tF#iF|b{r!JQmd2>aRRp0dEEcTqnef(XNSc*M4G}51?n1N1 zNA~949aik2`&@FvOBNg&GEi|wC_dGkz|Br_MNjuYR#dqQ?>YOR$Tc~1SH6c~s}*g# z^eP}mu>|#+6-npQ2r@n_7k{p6qeF|-Y$dOVLRCx{c^NOpTAzM^C=UyscOpTPx~79O ztit@btK7KZZlSPa0=leHC&NP_D7BntqDZ!LJyW#6YjgsUw7LP3`#6mC5G5Dqj3Lo4 z#Eml|KK;Wl0R3)?$X$PdXl^m}pzTc>$4SFK+#T=JZr zu8|?m+YL~%lftGeTS()O0H>R!3pZU_PQRPJhHXQmIk7D=MCz6TOTT2zjQI{m<-Ug% z_ZDXofBQ(H^L7jin7f{2FG&SOg+E+`;SZF^8G@7iIWhNX=Rt;nq~!Dd#;03=kwv|)p4}ohCUfPVGW7bJc8DfYPgyQlVIEyDV98M z2!34qUZL8r#Afxc5WX~)WO>`i6A7yvL1@@V5Haq6xHSvl>1i=o-{JJOu|#{bA2bY};5y_=IQO+8Wb@iFC~cC4P0lf_esU^|IF;5Fa=sCKIdDExd6@^G>-ynn=?lPpr zz!2(3$_o077of}To#gJs^F-{G0g9^m!a0L$f+=OEh==lK*s(~QEiJi(C*L-}IIkz< z&^Je(|1^y*G2_^a=KoNkd=;BI$p^02uEIm_%W=x;xx$XfiDcNvn;HM|hQjYFxc;d{ z^uUlB*(gkgP%(8Dzhnj5fA0>N;v~zaS5=ctuR>N__6Scc7vMZy8Tf9*E6Zy~FoPO? z=6ySf?4G><@<}ny`*s2U{?=rTk4LjeEdx}MbA&aOk;HeEF;j?sL7()Qk~zT_*oy0_ zP*YV;`(0+D+nsJ!@$)HvAJZamd2@iC-r0^B`x{t$Wgj;jcL~p2SOsSkAEKi{4lzuB zAbcS85ITbf@m5zf+ps+Z`^?5L^9mj8>AXy?RC^HLD`%LIs{vcNyN*Vf)DxZ2M$Esn zhU|5?4u{QT*yb!Tw*2EOp09hDY)&WmUhxAMoi1maEX%R}?`|ABRg`(1$$-MHrx>16 zg_DoTk{Rn~llyO$U|ITfvSs&Yc4N?tsTrA)KTA##hw=Ra=N@y&_^9E!J^T@7|ab(me1Gcc?EcxZFfk}r2WVAZJx7nWvE0iKoC|JVOKFlMtzQw^I z(K{G*=O!%Ou$1V9+CitWJsFgZ!R!WUvO_5lX53jy%uCbAp#WYL(=!Pl+bex?-`gCfD75wURKTNB#*6`*v6E^TXH1nUYnfT*VrArn2oKBER! zDb~@kXv2DEHPKKRXRP*F!1j#^gy=!SR=OBK*n%#B`o|SvWl?%T=j+|IC`W3nt!)LpRpBjax-R)f(-tB zk0Enc+Oj@A7i0KXoIDI@=eEE#h_vnnjmfuQM(G5|3R*$u(HG=d?jc^ir-GiO8Yfwa zv+$ikEa1X1oWFmVdVP3@<1GBxIcH#&4niCGI}gx%#~bcr##gZBnL8tc`YRUwQ^!TI zkLc^K*U>G03VW$D9%fIROcr+~!_l43pi*3w9QpN_WO-;Z^O9#YCsLQ>A8+QuA}Z)+ zV^xgY9)SOeq@lk2HyDar3lV9RJU>hqw*ScF3Y`9dVze0gJUvYxnhxMk*C$kysN%=n zKjGnb1f3%*$^FS{II8RdN?CPd%jQ}P8C8zOr<~E$xKL0K&R?fpbRe=SDFi@*_5Tra zE-j;|$6YP3yQwC4^ra7OPk)VTd{&{YY##)dB%w@O2iSSs#@J~R?D-5ewB+B%C!bPC zhZaMr?bkpv;s|l5RRX)Cl5G9GKO6IgUq5Sje+Hzr%`hrSFW+?sy@UPsjt zFZU6k68al!^sKNca#*ncWIKNAQ%8$odm=J|;^m%SF!Hnnkxv-Gu1~h7mm1^nr9hX8 zbjh<5buxHS(}ws=wr4UYZjlSZ0lLxS1}-kl2cNxD$>ycM1#`uBGMAi0j8drv3-SAW zzETRmtWYLNYn<8MoQ2F*#T;CweL}JFI+V%eGfw|xS-DXJ=BU_``;Y$O&Wuo^`7Q%8 zeRR3+a!X-n|0?p=n(w78ZifBuQ{eox5^7uJit)qC$iXyYj4R2;<-cE|2Dh08e4dDh z4y&^65y~)q{wDII@*5|4?5MCo{XBG(_*XbN9EUIbT(RlfQxx|IW*MV6})5c^hDk`5FTdcBl}RN_err z_+a7n2l-Gm+k=%4VKuFoe@14C>P9jmn$b&zpGj2)O6NoVu-<*71@{Eyc*IYVy^ZV?R$N6mZ(om6kq)RH9|iqG zv&n><4p@9o4tJ3RQhsR#c8T2k&C-3 zr;>#p=V4N>C57N!aP`tf)_?6k_+Y2R6xFjy>S7K9=6SO$UeTJmny_BI0*K*|vka3i&?Vf{Y*en6C3j_ zxaF+xXS1;8wl$1<5s9vk?$|u)dyYC(T3BD!f)~`4$eSmwWJBv12;e#0eFN@fUXi%1 z)zm0bYqAyHJ~m*>oEZMhum@KpN0IY4`nZYaj^O7JS+QnYGt5qmBQItok-hEBsI%Lc zjQbJBisgQRugDf`w$vf52gLBuvzMGlcL1ip{E5xC6$mjF1ZG|B?eX|`~ zy@n8d_j>$RmCHtCSaAh+tHBCK0iDhBo0`NFLD4!=mi9#a_%P_y+s7 zJj2B`{C(+$RA~2GPL|(33uB|>Nr6c;n|bOej#5bH?@Yf4R9#mR54q_mH%Ev0w^wm* zN7rCh&mN+GTbi^^7{S`VE5o@fY4BAtlzTp{9qkvVl9ulmP`!OL$+;Iw^f@=2vwAkZ zRP4pp`+XR+VFytep^5R`qgerepBdG%73c3xCwg7R5L3fvo_ED?Y8o5ZmDjguPmVF` z{#ptTb&ivO12f1Ez9-)u<-s<1&msfU6LITrBd*+su+&C%IOkP|;*LKtR3Z*8Rd-;| zf~8E(vmIZ)YeM%|=V^RiJRS--56g_-fR*)CT;5WFFH`tAn|%#O`A-tHpXN4`nhaw zr7Pr#ZlNZk(oFn)GP5~8884>81u`>9gh0+yl@LtaT`v%~xhNaR*9 zn%om17nBwggTi8n2HwWcms7Ae#~q{p6KCtK|Dy-2gy=ml8ZRe&!o$*<*v z(ss=K?|VvYwD9KV86^FnBMx_)K*zUJFy)a8q&usjwWAo*eozJGC93R<<~eFP*H7TL zOP*DF-^Ps*H$loolnm(_;y%Mr95NE3dNG5F!hI0Y@s55_$ivH$L2xHH081M_3x6Cp zg`l}Sr&Yn2M)xbCnfni}^lbtz+Fdw9X_N(A)MkS1OMBpctUDe&Eer2I+ypCUe@J@H z=LPa~asS3|(CPdI+kV8r9e(X@;yKm!1D}Ma8icqqClA*rxZ+QKE}lK^G{$sDu_u{F zY50bFn5bb3HMi#r&tF-91$%8-+btosddYI4R~9nVT}@i9Ph(1En^E&?2&z9+W7?l@ zLEu|?0?M(1?8q$~rF!_!>L2{;5hrh>+y$#QIAP+}9Oygv49fV~QlDfi+_0Bo=bIZL zc%ZE(#TNGFrHI2Ax(Pg z*#2(Dmd^7En;FOl>!oZOO91Eh>)s}T6nNz5gGqEm;D@w<=4tA z40*_(uYJ!#{eV1k{AemL9{dU+K7~BneIfa*IF7yZx&%v|w!v;*PZuv-2~YO=vy?z0*} zqCREf%-jE9=A@r=s?-N=TwXENZ^Q=+QHuCuBb>^Yrz!H`f>}% z8GPUBkLiuy!P_v9NG6VB+RsXfW7ajY>dRI7WUYej_pWZiI`KMcvrh;kwcEhbpV(coWbG=Z?(GY&7t64p(Y%A^Zx?sZS_iH6oWgazgRpnkHBdWX#Aumz5_?^c>>%`c}}~?n8?M*5vSGyZs%l067^D<^dHi=8S42e$??g`}1{OScg(`4cQEF zJviH9iQ3y^$kI9;miXc&2Ahq*xwCB8cWW7T=x9HtS5HFgk`$~yF`qR&QYNQ|s?fnz z90uB**jFPXV&K<5}tO;cz0k}X))kT2N$X~v$8RPtL|2pgk9k$qHQ z{nabjz2+9~4!@R9yCuta$CM%el~?)g9Rb2xACANytHOuMSzP!lTZ|u1@ZLO2_SD`H z1jkYdbVJC=?!k% z`a$%RJB9iB?$EUL65RjEvnZm!vG#XbH zjRxP1=b`n*LtOT+TNrsK3j-F7hANwh{5?+*H}irUsEtVzI{h@p$@+QRt@@SN>UfDO znIOaGXu45u%mQ$|T7pV%d3V;mukbZOgssH&FsGvd%&(3i`d&w2qu*A%&hBGJ=Q(*)Oa2A&SpCJb_iDYZvml4Jz9=@&dVz|qPuGjgzK7vYe+oR{uBs}9wsnSu@MhX zm_wYNm%)Ln(pRxWMG~BIHi8 zJd@KgL){1dq%@uhM3nM{M^=v%x+)9E#d{xUa@kqoD}F8M(PDg0Ylu^-xCc2u_*syt zH=OcnvuW3yPddUj5$CB0rU|i}$#P3DubK$+B=m`N-&Arm`93_f34 z$R_SJAZo4rzs;u!W=km(M;}8rlRy6-YtUi#(`rDY?|>lStv4(_6U??;z62$YWqD5z zA&rN#>6(mFf;2${uaqvwpGEs2RQ(wZ-om>Wb)N!J(qJkj%H*T;A{?u1Mt41HfbezV zd=I3BR_6af)7k(I_WGpcaen-Eo<+-VUR7tr{I+V;k#c3`0gdO3}QAR2nT7E`z zEkB~bkYCfE%)ATNUgV*c+X@WWRL{LCel7^S>c{r?8bIvzPFQv{4#s)=LEYM8czMGq z*co9exV&I2o7%}|hmGfxH8Zb+YIPuum^z-h*Y)DbofpaYhtYVqP#4iY6>fgsKwdaX zq2Y)!q4K3{$Xd}#*OzvKFuGJwSSG=|3Y6Jnc|NDR+>HE*Th7f`5et$Z(l9@FJ7@Mz zmaKSvgPU!31wDgSaz+QcfYolmf$}tRHhcl|yFQ1_tue;u-mc`$o_i?v+>d9lOVGVtFAlsda^cteK+c1Y zx-IHT;v%jaGcb@R2?5ER^R313ZjLw(eUoI8Gx}g)^GsIdc7~m-%Y<})f2z!$!}Hg_ zY4j&isFhZ*^*wA1w~egufS(!(^bSJhB`@KpP@8ed{&c``KO8Wf2y<`xG5gucSf`qg zHg*<7b>j{){9P0M+NOfUh!pJiwdLe1Z-e5JSYmcY8g@6X@4co6`;o;emry-s6nJj+1jSr2);qm|{1Yj_S#NLSt%6US!aNxp^|udL=+}iH zW~a!+_W8j}oeLyS&W_2+HN%<>aqxZ1Y;c|}fv?OK;L-dnSp9J<^9+86D%z5yi_h<* zyd8$ZnN6@LIg`)3o3Nr!lnze4&G*f{SmXP7WLqYXn_?0ywA~Ktlw9D|Hz^ix^9dZC z)xhJtIUIM;W-{eBQ5O4PeY9p6iF@;1cDKTTi zbQ*YCm5g|2jxmPYXxIX2+`LAU??miE`!R{^V&YpA3oC@;zUuv z8=x`HB+klC&}6@qXz|>h%ZEPMtl{UTM=J90z(Qks`r>jH7cvogww?lq1-URPPZL6? z-$XBPV^NVBr1Rih@_vmndD@r?;R`I;pP6Hsp-uzL@%jxNGqPY+{Cg~W_L)nZ`j0!Y z)S7oqjv$l&h`_VaU=P<&7|jcNXrgZT4r=mV8r*pNL4#m}3#qes_}xJxg&QTF3- zv7|qe^v_(;&U-l5`87RwT7*3JKMNDf&6wmqAL7ngke_G#Ao5LGfFI?0ugFB1hT zBv82dzKoE#K0=kNZg8}=fxKDtlj>QWf zmuDjlOf0AG{8o|ek$>@|S0NtoO+lB9Pa%C)7K}0nmgSwtdmMJMHQ#iEyOkt~p`$#M zN1TDWj#Q}m)y%VXcd}kne@toOXHmUt!8|UG+i)|Sm9fJ(dX588ZU_T~ zqna4=?-*VU@?)X)Z-|7y7y}J|xLolRU5ZDNqAm#x7GZ&FrXxjptne$bu=F3djMtVoEb=>mDdpM9a};4hyfXG`W4QOOU4TK zy(II20h>*-a6JF*jB=kuo-V(|=6j7KH}+nJOF0c>-Xk9-Z?Tp>T8UKVbpRfBcnM=b zfgNwDAljd5AS%+A=oueGV?7&kcl;TyFnJAmrahmSxEZk>E5~D5nIEffn9q(?M34qJ z%9dx!LhPdsVpKZ-Tf3IS`md+4>!}D4%~5Bv_pM0FeMq5=5sP3(-gqV}zKSL6)`Ed@Gni(i!|qgm!%55kV6nb{ zSdJeDe?OSwW4;@;GS(4X21B_0_W7La<1f_d-*M>I_JMz|y}7u<-B4(1#LXOtfhA}6 zLXS0nSJArgRRFH#}e>rw}^;~jv(kter90j*4 zg>d`C2CSElBH@NYt~&iIRS#-KjVy1NJ>#ozRm)Z);o>6D(v>9TtCw(X_p@+P+Isq~ zuLX_GOSwPO#NhehLTtS78}d&Zhkw@P}FS#`=Bnku*C;S z)OfQ0d<8u0Ttf!Jbr2Sdu}ANQNOb;3-qE#<8H?zU-XtN$9oS8#O+P|*er)Et)u-_J z00pMp_Y+Q0_me8ZJ{Fw`nMx8Dms6t%)^>ts-N zokwpBn~}KU7&hi6&s>V?hShu~&3?-`vT0{JWK8m4T6^_Kbk{cGq)i8^3nEWh8G@{WEgc7c>P7sz-0$1v^bTqeKe6^>&INOM>yPCEPm!1FA4xQwwK z_?peB_Vq(yxD>2h7(jKqQedaV9mreqn3%#1a31!-SfgOlJadS9rSl3ZwLZ{aqxO=) zovTsq?{;qLc73e+Bgx2XgZjQKLu>^K9DT z@j#XJ*jx)n=LT~7&hMvBzjmWot&?DH-g~O=9FKPwJi+S30NPd`fQHN8V6Ec|dSsdi z9;jIY@%u#BD%C8^H57+Cvt()ihH`Aa-v{QsXD~tPKD0cJ1@#hnW?)0%?V}@DYnvoE zqrU^g_aQtzC`Q&s@g1zpWNv6;4cwP9K^I9su-@s#XHJ&jMu{OXiS7cE?Jx0c#s<*Z z9L7~Pra@s>>B?cmy=b7W9HW4mb3SH);2)Sockzpyc`BuHTFarL+gzj zaIQ~=*?teAmz;F)+RYQ-?h%AD2Bz}v>MyWyqZ{vq)`rgDczVq~3#VTeV}1JDxnuka zIACJ|vxg8aM4y6u_Xu7{Ra2%prx3J)K5_}v4NG3D2u-SV*{^JQ%-R=1mH9j38SO53 z)_nw#zTX4x!AW#uSO#AEcmg`^)!}O+H_(#5L+u<@nN};$xAS@=c>AZjV&BAI^qkxT zbKKuhXLC8cGPW1aCU$U(^Yf`ql>kDECEN^&}-_4la_z!i|*@?{??gD?9@bq7BP~m zxdWUw<)PcMB`~CYx8fd=ukanYk2y?hM0KV`;tC4zcf>mwIK^{%6E2~Rwhk7gE`$3; zy@HfIMeut3Pu|(bk@H_FxRC1#+*VzLBVxj-4c}9X;8_(9wmjl0b4=MakMV3U zunB*g+<^k`3fgC>2$H^2n1fy@$#E;i`n$idLxayE>%YZC#YIFcVJqux`wtdp#Nj7X z2e#={Jos$*Q}O3O80hEc6Gz=5+~`>khpzqwDFrWb^^reZ|2xU1HXpk9WLY(xI~J_&QmWh0jdEYnLtoOxgfZA{Cf%L5p_E`l4;R5?HuuP{{`tOmj&k zs*G{L`<8a_*#0W_T-^usjZSm9yY-lJWF`c@nvXRb-^2EtGuUOY1KfGu?cjG!deT}I z6yCjuBNP6XH?9cfqIdP+Ii6b*YF+~Jy>U1`V=Yvuq~S5c>7bn_3%8Po;KgobqB-db zw#g^rg2$t{U*CHgYX&1FZjV2~OaLq8V~xO zhviacsCOX{yuK>n#T9P2Vss0(efmVj7HM<2Z%XN$@h!9q<#6n&8~E?eG0>a7*rv#J zq|Ke0Z}{V*KZ)Fb5$Qgd!N^)(m(?uNt!h&jhdBr|JL(9>v^BoTK==Fn|ts3+UM{&4m)sm`-2bf50Un7 zAHXDaLpblP$7bef&2)P)qSk!h#6a;;o50DnWdzdGhz5egh{%6>Efen;ZW6B`gOD^JthAbi?!zpmo)p*wbIO+A2yv9jCdxj zaIS%T_5vU1Dv%m6oes&Xgq5ou(c-)*ZGX3f>QuBq zw3u{R&ShpLb770OF1AIa3r+{e;fBh=?E1xZz@|twn!YKRcWcru9S3oz*BoZq!$aCh z>Bj3}7Npt75h7AYLa#$X=ziE1qwjmuVT0BQXLIxf;X*C4>^h;Xts5c@e&LR-E)Y5| z037`jFfF=Jh`bSqqfH+O)4i_$O0P+v@+I_fUPAV|9F;AYyqx=fR$|Rv zdL~Q}{e;{B3at8?GT9L{n^6^GP&%~`v^MpCw-$#<*A#OWYN`e2yCT32o$%6+7#Oak zBs0Gi1x3;0aZiF0IPceIhoTf{=xI&-I9ZMkTp>BGUH_GrR3E^qKi4>~gOOO4uM6Uj z3AFdrIkeY^gTj;54~4JmVsRv&4^ID+-FstBlqCLs(|sN$$V+*Ud^J+mr-Zn>RKe^Q zV`N#23h~y9a&Ufi7V0}?L}7nVI@QJ(uI#vlE8eBxn5QDn@jNcMZ$`24%Z!EHjs4iz zu0*JL)&u*#n*dCl~s+$DOyUX^!L8->hCYE=K&M?FKrmqoHK;on`VOgoHofLqsIxBLu9_6o9UA4QVH|$re3`?I8({vj z7>?%T3r?IfHE)@WTc*UZrSs1V6~PzC!U0Bf=)9{qF?=G+lWrBd!3Un*`6+x#i>ISE z%z&FFi`mV@eW;aW%37M|(ZO%kab#yI{Pukm#2!6=B> zr$W!plc#0t6_bnw&H2QP;g2+``ad+TADa(F5xPxuJ0^1azfwU1=&*E^C&axTnW zIf1;&(}uP)_d(0-A2{qfLCC!zL1b?&sl7pyAcs%AbT<%dIn=G+Dmt1;) z+>}RcJ-Atof;m(-{J%*HqrxjiE>FL*BobX|pI7u4wD*c?cEeFLgS4}yvZq0pqC z0+*B%@L-A@bbne83b!tR*IkJX{!pErIXa5!x{qW{9unTFs2vCHIgFWm%HX)w7c%Xi z2*;0Tf=09@`RBKVzBcioE`ztzhEhxVc|juFd8$b3H;rLte>!07B|pJM?Kb(rDU+ge zuVh&2z|3a#0+YUn1X80<*GoLF4cIChC%XYZjvBEm7q{Zi$73+5@f%6(u}4^Hl7!j& zcVeOHKOtpK4_skx3{#eebc*)j)n?2YD)JE9KxN)oDq2+V1K6o6}O|J+u9u&&v zXgXuUe?gEF=?SYtJ#gu~$>qz&P6`qR~xQm~S z&EQWGTN<$E&y!rBvZ2g|r_l3M5nR9j2bb3rfKg*AM9kTaHeqR~T;hT8`5UC#N1c5L zQ^9xZQ!z=emi+5A9fl@JbC>T8!rIqN={@?3QqD%EwOryA&)Nwa zHoC!a?N0)C-iYM1NE`+8Qi0od3WhqC)7Xa+FZEz32P(-l^9O>-l*k{Z0% z%*phnEVfs|eeT;_O;+&c6}CpYuvOEB(P0T8c=q>TS{(2~=(OoWbIcAvd`vgqlQQ|M zUk^axYa}dE8cdxumI`4L4Dr@>GjvUsvZh=$-nPIG5Y;6KJJO|GfYqL zf2YOv%}*6x&yR)cub<%W3r4iQX&^CMugun-QlN8qh4OXvnUGujSLlUv%SVrYJO^efe;-c9;x3Xbg^%EU*RPg z@WzbXIG6$97B^w~{jt=wyo^*S&5;H4G#dnw)@-|0ax|<4IoyOeVQ615tCCGCUqM8(#eJzy|*eQ0rF@fASCF^jW(d zckIw$Mk9xUe8pDbRT~d-LNXd2J3<77bWC&ehM`50*X-tOay>`dMc1hmn8hs|Zs_Vb z%xb^HT9v$u&dx$yKgpw#Zj76a%i&9kEiC`hBv^hd!T4n>@wl|FIf3x--zy!^S9p&j zy7~$?;vpI@AkS%F590VhvzBa z%eM^o{OW{78@FJ@=s5JrJcBi7lhMX32fiJY!^EmQwEk#7&DkzP}_fokYpMP+J9DHEANU& zdLAY<=ThO|3|aXF?JdFw?hqu|>Z7<_lj&O6laB9EXk=;(x#p4BocN6N^iZRR-uR(S zRtatvm4$#QgWz$`U}V;%!j9TjnO&6teRnI}uVR$erA2bw?!DY7rP`@!5{;gV$rCVmuumj1={go@u5#_1Q zs_huJE*aPE>4TneR@7(gT^L#(g`#dW8I`!09q*h=U&id_e%7VJr?Y0vxW`VISb7Uj zPRR#cI1IDjJ;aZ0OELAnE7l}SHUG=wc-y)TX5YGq)9>}5iXYO@I6Dg`n>XOBPrmpy zCK$%WttK;PE3(!?>4LEEKs&X-Q_d5(7+Z0h234O0 zUb#1|Gus3&YQo^QT?Kw7dcvvX&Gebne?FFRjA@T>W!i?SbgIb%Jl|A}tIaHFvxciM zIR7~|Jh+b0D>mVi7ypPm(I(wbd&57wV=%J430Z%U?eW|a3jw25 z3>aAB{ui*%Ghm{2@ z&C%4|bhG3$Freq-=F!ZH30NQgoRdlH4BpX*ML9o*nWK-9Eb4*1*Ao__n=FW%UqbDa z7JO{87AK!yM2+vAf&ByiB9oV6KUcbd^-xB7-co~Fi)`-mE;XX~(p9*0rJoQG-j|uj zY=s`17P5v(9k|8oY57R~Ul`qg3*&7M3-ikLFt@~-P1Q@IOQUljEBPAHuUVjAWU1Li* zdR=(pGYi?)Y&RUTYo1!g%5s8MqHkD|6wb z)+*}8hvTY6NAPR+7s32aDfZ5j&RSn1n)B6&K3Z3T){RYA)wuwdDoDPtNzwE-d4)kW zgJ@-WxXd^yjI7OA0HKNB1=Exhbb@+;&@SZ&rq%gTm&b!yaq1gEwcU?KmlR;q6Y04; zHJ(~-o-A3dAtV~$;+jXd`c@@fzI#aDc z7hJze6Wj+%dzvlnB%%LT%(-ky?PKP`$stw(eQ^-AHmERzG1B}&`6Bxp^&9)m=4i)F zH`qBzokgEsM}PE?hrcf^>9Gi)xw^W-Qs1AL+U`R0Z;qxFZVkdw;}qEZ-U$LN4%5x2 z?J@R(C!{(Jqp=!gLQLjrwqe=>^7BxvV0XYt@;`mYJ^d=d^?RvM;-!dp2K>f5wTWQx zBpd!69FA*OB{L_56l~9`#>~PC@J~(-w)Xd@^Y1(a2Z=9!Y2{zed|3h>?NJ7~@(OHH zL;vLVo4W02mepuTzS%{Le>Dl)FMKU8QalWSFTP`X zZ&h%)bqum%ByVDu5{->DV=K4y0-v$J1cOv7+;^uZyPsu-Y3Gw+M0gH-?tY2auAUKe z8{>uf0aIlA*-&_1h}p3gSD#IQftUNR7@ZOH z$nz4st!9NAj_sh%W~1njr|)Fd;r=Z1MFqys^2O1bd62PsGO4*34$7k|vE9ELmaI4> zJip_Qa;1;)F&ppV6VsgJsQ1zP@ zJ=t>z*hPOPdr}jiJ@_e(H+})y83&<7&KDac&(mV_e4Ku)kSNDRgSD$XyE*43lqz`8 z)`(hg7_vh!tO};KoihaEx+kbubq3GH6%xhrk1)`toCN0i!dUlUte-ZB9eCnLLT!Et zI`fOcah!?7ltG!En-*P@r$>Xg&Bhw_!@`TiFgnaKkd8bWNP8a4f*Jig;F4vG!x}>A ztY5z|dz?4KUzpFP2Yke5&7-CKlM<}TSD?FIhLV^6C8E*#Dmb;Eo@55PVS!MDO_ERC z>dEo)6~zn^+{UtXrm^4~+hh z7Z1+CY0~?5e130|Bp)km-XO8f)TG_rCHGobv$#VF2KA;bD|$k1SBmiG*LSR(t;#O&qjA>dBzXMVk`5fwh69c&;_3r^g)wo` z=X{3}ZS|_hiFQh~%a)OTb6jxl>D$7QW4{H}C2i19e-SgQQ@HmUPLS4QgyU2x1Vj&K z>{z$ZesKuA+*<%secr;;sV~4q|2XMiXhGH8G{` z!sRbXg2k6Yy!~t@wmFQUeXjmR^}=h8!Wt!5DytWk&W^#!Ce^sJUpK6;3&Hp|`dDlp zLhn5oj5@cjf-rx)u;F?b+pFgZ*Q=b^tIz{nn4LYOobJhVW*)%oKZEex(n8Q1X3yL% zoW%Ks9Ffu^w7am-Cx}#OC&9Cq&m`8t zAT+%$gN!bO)D=9qE}D`RI_|EiBUN5Fo6YT=aAX4~fyR zsp&7o<-LOiPENQ!r3<$1`bJjDiZSF*JDqrA9KE8u60GkysgNhOMz%A=AhMr*^AR-KB--xh`KK)$PX7-M)frcOad(;vyCwe+@0; zC!^Cee{Q*InQ&r-DZSED6VoT}z;~0Dz`ZPq5??TY{_fl>9LeU%?ZhSc?`R^D&13Nr z--yOGLt&$LIG!`pW3>ZBbS^xNGYn1XqO5S7?X*^ywA6_0x>5}t9k<{Ibb_;dCXx4l z4wJ_h!o%kptXHLy5UwdD0w*VuG~?572&06m#55=n_Ca9-PjUj1aO$n|Fj8qC{gNn6 z21+P=Gl`<>^gUtD`Jtecc#n9Lzr%yOx}jnCG0a=^P;lIyh0iaP3Z=z7eH{G>|I?fW z)(boFX;>BRGnOn}mli_=5A2B@koUt%|M^RiVdR&LNP8lbHD|NW02 z9-z2r1T4+8=GX4mVfWL=(Pa7A?3wpDw$}M5E!btor@K|s&3j6r;#mZ5H*5_*`&T8! zZY4HjO&fCST;bQY5jghLDABc6lgNT~b`D+koD=R>k+L25My<&@GjT21Whad+BF^$|@h=`a3UVod9-cZunpVfgvX zF7D6GO>CX}7o2j~0-8LBQ@Q*VwDan0QB!>irmuR7%;X7nI%mL?C2@3=-(5E0?>~68 z|j!@^Ip;TWrN(l0joLrI*YRIAm{P3AY@N6E(Jj)em(en4GF++zA8pGJkG-Y)I!o2`wTa?`SpJZYK0VQQ2)&cC3nq!NNT#cbuio#)wJ(Oy*~u>jGA@_6Rvv*V zQ)=O`nvS?}mjQDjU)a?xCA4$i0`a-1#aG1H^C3$|V=gljGeE|S7&LS|7@^PiH4KY-$F*hB04hd0!uD3pv^IUG}jne z_(%|Iqb_0h`g%Hg%K`N zk9QVQ>#y=`LGQJ+{MRu$d)gA-MJERhK1D*L#~$4F=QEv?eU*I<%Exp6?S$%Sf%r&8 zQ*2e+C;W`OK|UCrWKX8V2#4zaV95UY%q#GW<7|}(XjRZa7^q2ecJ*P~6Usn;`#7=x z*q!WcTRfKAtFWp%dwyFU<@YH3#{!D?vixmcEV=h-AcZX~ada%~(K*LXZk!GpeFoC2 z+g_vR7wHVV)xmC5sL<@|9gy21V{^W$@MD@=+4-IIZ0l(`_9${3?xH4yrw$#F9MU(_cYZ z>}6?RHf}@>GY{0q=}&IcJ@@YjHaeT=pdBIX)o5)#y)&8KOCOJlT3cxk;~m(2E|5N) z9FO-7x=eGpil_o%ro=qsQNUl%=*`qOZoh zg)Uu@_uf4OpX~2Lqb^&q2?JAE=pQ>)Cb>eaw|t_X-_K<-o13ulM-iP_y8^vObkkaA zPY6G^6bp>xg>(l`oO{ZaZJhoEKH4v5PKo;b%#tR?IhSKravU76+R3mvNTxaW2K!m} zA3yI@I_?^GmMY%dOIQ6)p-R;=rTp7LrZx8}J+>!>zW5!$YGmDPvEwOXZ!ny{9Hu9^ z<_u{-KPz7QwiX}0rvi6hJ17fRe#;#5zc^Oh@ME`w6d=|;kvXcn(`f_t%^JFr@X zIh~DQO^wCOQ~etKytWjq-aD~I?iYL5nJJ7jA51k&pU}K0OZITzEI$8vDfMqX4L=rK zmv$)nh*zF{f(eGjSQ=m~=DEevEYCU`qH`4ce9VLtx0g`JCNu4Kb+}1$1UoxtEJ;ls zi*63N5Z}LtIOg&TsQpuk>#Izu&4~)y_0fif>uT~OvmVsl%c-4d9PYGN6Q`P}(9@ci zXn2bOJz@Knd$h=#cJ!+x_8Jdp`(azzu`Yp{+@2(=ujs)(&UIu8>mIQH>H0kK`znnY zJAt23vYj=3(c(?VOWyk!Pnwxr15R$=;e4C{uenbPr_Ku^ou9oSTGyJcYWl@u)RxjQ z%M$4_^JsChXF7MjeH2xjn9cVa5e;MaUBGI;+jP~$4mz-br)JX=Xt&D_`m4JfEv-7? zUDY7g&qZ6jwU6UxPi?$EJ{wz&^`^>VuQO6V=gMQ^pD{~f;}2v3YKug}f!W-NSF>rg z*)v&))I*Q?d!F1h-^XrStq0jTEfgg-Zo?&Oc5F;<@xzPDw1TUIkqN!v%7O9pgJB7H zyq`^*JgxX|Lnq_hiA#z74E-~Z}Ys) zu3I-!uhoNi!$D(2%P@1)*R>K|LuT=2HmBI|2^(43Ty0VIcQG5=oJy@T3P8T3Mc6Q0 zQPePb1Qwf2p>mI*c=$sMlgUxGQkcSEYZk2emBX4PKFQ7yO|fxFGdx(C&ep6|5uXf+ zhbcQ2(B}`%V`5!4wW%{BpClOtuTS660SJK?D zM*LJVo_!oOO|)2W1!rHGgeKrD*2YQRDE$z=<@0EICsdo7@Ok9*(V?itPv_T5e2H&| zBiSmg29llD4ZXr{LvHX{e!fK<_&qp+8j2u3j2KR@Pw?dr9a_c8hMZ%HMk6S%6)UW| zHJRnC;jrr8bi$0qgA;@1{l$|=u{1Q}I-s+yr-mi`} zn)Kq6R<^N_rt$1xYEL?1k%Mr5o*_S5=`IW2zLKt+n9aAKf$oE)+%& z=2Kn{;jcLsv-#IH(8%aO`pskv{Y84$;wY z7*M0$v*5&~aY(L9+}xvHboqf#)NUU9_ ze^paOlz!QYYJFb{@3xzWU*>m8y@C*N^|=Bz_3=C`?3h9qcdTX)-VYJA_g-T$oBjE> zZdEKzF`9WTyhxKi{lpWwQ$_EEJ0YUKKh$NNWV%`sW67WtCiHQoK`YBCZA{$%>|MYd=6_LJ^qn)EMrQUS z@tZTq-v_Epwq`zdeZI~5ei$ys?%u$P%_eir3iumj~)m>in+J1KIZn2WV%| zQMNs?oW5DUhi3J;2|HR8IhD+Ovh-ypw4aJ&wil}*{zoS3e7~C(Jvu?X&y})e@zngv=-Ex|TKfpG-1ZxI zjZkAMC3WT6E#6T7Z#av+cN^D^x{9iH-DJqgTC85Qn9OucKCPcGM4%NIaS={P0j5ysb{ZX zdUanTSN-@ghnbrx|K$Vh(j6({o(`P!XFeY|=rV14ID@~^T219^>>zuMsW|Dw50DEV zO*g$iA;iQr#(!WksH9;#RHzed6~@`J~grAi2@lPprh7^OETs$yuuE{6FQY z-Tx|AM=PuTpK8_IW=SAxFYC{rjM|RJNB)9&T76hvOf)J#8^+vJvuIPlBP^)eMNl24 zsu8%%FF0sb(29k@ejAo)_fwYpe}9kte*=`e3O?VRws?k<>QGhX)oWHS3)--8_5bfj zZr}Q!ulG)>Q9V_4{@2f}AGu^r&?cMzUD>@?R8~^%q1;os=l^+y&5ABxJ`rR8qY(E* zO23@+rT?V|(&EH$n9kpZ@UJ828795+5}jCa;W@}mRj03dd(mv$SZcIL+8CW(1@h4b zbn6g5I_cwTI(fz{x<5&oo-%c$(+!r;TubSWR`sA(wx%pMq*8e9UJp7C#?Y59v*=on z4lLQCfb+g)!{MMyz@GP|;gk1pGaWogMDlKevz8H}a1$#0PQb{KnI!vuhmboW3X8iV zz_=ifS{x_zW6Wi!Oze``%KwJOf(gQuDcZv1JGW)y*EMkW8?VTE;C>SFe}Ou@$axZr&-PViZ?QSdoE04tq3guI_^f}U_!h*1g>9{TtRcVD^) z4%UB#OTqHERnu9})8C2H(H2k6&5>1ppIg5C?mRATN*qS^Cs<}aU5FE|35PBm=KhOY zhYRf;P+qDGlH4rtc7zw1zp)Q0>B`|X%kwy7@<9B4a}MG3^ue&zS=bga0W0_Ib^N&1 z6>^)UCuOe%?A7l*?9kpsc64q6)4n&HZHQmP5|^eiyUQ8u)S(o1zd@V*Go8*>ZHZ({ z|BGd}^b?rj3W+$AGKgvT@~p6P9{VcuWlOYLF?!}L?0upqE1o%o4Kz1l4qQH#g^0NQ z{SBE$K@9 zzplgZskes4;+My(;c?D7I!7~`HYG2k9j9Ik^^*q`6^^NB zU$&PGIQWV7wNK!utxxB*+Em4q8-3{z$GyCR>=_@?-pnW7`_A^5U7&4WPx7a8Uhvxg zntA)^*KE{T4c_`*HQzGq5C7)a4}McrJXo`3BCT)M?odDKQ@tE*uI; zgrslEG{en^`aaX4TNW?jO49ehfD@mgNb1T5J^2JH7wu*TqAl5}6+6L>ou)q4k7&vB zZ7lY!lsbKxNsE>a;1B;X<&WnsV~U;Kl2NpVMrxVzZx$N!V}C7T9}Lexzv*&(4<$$5 zG}M8AzhyitS~HB6HK_5c<;U}vrSA`SmGR_4;g+6^Q zlF$F^M9>hknJg=8__ za}a7@?FGXbGifmyK^N_>gZoblpniiN6lx5i{hNAIueA-}SX&5Z-~A=G*g#(ERtTR} z9nR}2Tk~lJ4`>FN%`b~g;D3Gy=gmJ2;fJq!N3U`#_`ttueCCgJ{G-LH{EJBsseMT( zuQm7x&z;@LJLnnkhpZ~;&L89WPet4Ks|UP!IWm}UpDCqeM@GXBDbbl>xgS1V<={iB z95il8hWsO^psqO*_~L%xY~Krx%{v4x6J>C>_b&J=KNR|Vej>cp0hs@^5Z2o6g1quk zu)XXZv552quUXq+zuzo4;AIKnjt|NA%jWdkqXIfaS2`W~ zn`*8~q}%7%(}?H{dZ+acy=-!Yekff}%k6Bb{lAlRsm&XDGwd$Cy&{qZhnmoDhKY27 zzev~R7t!dj2wE9uN2`2qkfnykaA^K;Q09lg-m#K7$?;}MlOKD8z$gY z?+st2w9OsuUJ!fyBXL)}Mz)#mhsJRSVM5YeX^Tb^+8*5_!~KiF#QzRt&O8KfKaK^B zO?QcbiZ>+h4uyYHM?g)dA@>t~Gqe2M&%#ZdoaA z|51i{>LO0qRE&d{M`DI!CZ?=BiGEA!aI8})-o1Sm3nS_=>CFe^%fI5n@u%?0Uxu1j zKjKcQqKH;}hW%8-ag25kw#@b@v+MCw>eMQUwuZmiyRoyG!P{yUQuTv%P5i+I{{F!x zJq~3f#4BvpxmzqurGo|8^$`1|?`P!$8d+h-N2XG$B`!_sDHdy8VPR$};-X0gqWwEV zG0N!&8+WfvSTARVZ+FLI;Nx1Hl8}X-Rj-7gpF!Acb_3OyJ;mE66YxWk19ru4L!Yu} zG=DrBW7iMDxgPGgC+j~<${miTsbld*cQlHY0(vRe#tlEfMYUkm<01Nyi_Ql9Aw}sV@`d~`DF-|&bf$md33;9=EaZGCh4){I>OA7+9 zF6$U>QLjbS7pL%Bm=(I-w#6Ok(U|adAFiyKf~pG|h35ap38&rrV!wOe@Qn$_E+|>B zN!yliX?tC9%K}R_YuZ+}vdW%mHSZEi_2O~u6^^-F_Gi}*STUE~Yq9A{6)qa*!07qO zEJ0I&d7Nv(#^w5~t3fiuPY^3~SBkAYp1}LDN$7s; z11qFaV$ZH@QOQFh>!h5)b)PlF)Vz3cN&g&iYPbgL`znnUFE$jvp4uiZTDVEK zSLc^s%Go3gxjzHf>gHo&P#y;VDHbmH=wg}HHk|)vKepOS(`QFRT%o%Y!}{i7@zleZ zUEzT$5B=sG51?eZyK87gqT9Fyp!}vCc=iY+ReZY+=Z8 z;rriiA+(1oTCM4e9}i3xeC;j>Q<_atd)`zWH`Nuxp5%~m2RgW-c}CbeJ`Zgws?fcz zjPP}3+)$lvVeF$2d>nchiv!NdOj=R|uhP514{7o^V3RBIE}a2;0gkg}5v|yn4|S z-FnT#Co?s0rt2@^vzZTuI;UWSQv~Lg>*J3@8fY@aADt#m!gVkA;`7~0aF!T_^QB~^ z-Lw!qF#R|d-wH*~?nvCYeP(X@W&{6&a)T9W`?t*`Ci{%=T6;`*fy~^;?j}d}H>rM(1o+8ho4)#YW~{ z`G7@wBWwG8o|P9BvvJ2|%sJ&0%bYcX5pe)(G6-f%bY`%2haIfb+e^6h`VYA#r^*gG ze#dS0O9k6M@q)Z{vT)!2nxOCYNO;!tLTLW}NBCs;EJA(NL4?X<+5 zqu1c2t}|#$p5W4hn#@I_!c8Apho`z)apcXHsOUe4wY(b2JevEnuuf%m`jrw(J=>3+ zRZ?S{vKw)HeFToXn}e2`?onUt7tITKCec5)WdZFublQ5MG!|lBl&~So-AlIJY`00R#url0M zu=}rGXl~g}!X|Hn6RP>#(y!gJAp?&HJw9-3l)8AV6=(v8{X{JIw^zOBayk9IEu(#=8#wGR&f3g zS8@-xUf^c@KF-NMN|Gc_nlLQQ0HR{_p{hHY>>XXg1*T4e0Wad=>z-_&F6$v+>^?H% zX+HU!-Uo7qm_bmXjOd!pCO%bMl#2r;!ao|M9mBIQB8a-!ntyhalnzaeH>!3?n z=p8S|e#X^Y+nB2)jkN=gvZC=izrpIkbclGS2(d=HdZ^N_K<_JqP;L|&||V`;6(0Wg+F)tc{seOe?%Hp^~v{7$i04IK%ihR z+?+CiT-UWBOA4NH^EdY3`W2@*?*67glw|+7Q%(~&%ijYW%Pb-tEgsZ!tMqlamB9xb z62m4ssDDc2!tJ+kVJ?}Dd2y2+3id2gDHj(*?6Eo`W-KP&cl(0&Vh>O$v;gzy4a7%&fMcnu8U)@~fua}7$;R<(94*J4 z=B}!KB1g6blU?I}I(m64JL=C5BWF6@NOU@KM!`oNU%O1>T!3BRwb6m^!D~p-eGKN~lIM&zo=Ui=k9p>S>k`v>c9lt!@%l&!(%HdkaRL6&k z{~WRxCpiv0F`G;HY~^S@!@z+*R59jAc0W#!WpJefne4UqQQ};BkPKLuT=smPnWOVV zW#VSk6N=|ZhU!7r%7%T;cbI?YIZ5-?0?QYcBtqVr`*v!9qs4!NVbB8=*fF*j37r(r zHEiKH#R+PVak-7$$-m2K+q~s2LkO2quEO+`dlU6x-yl^k7KWW!z}eT9VespD5U;62 zpDz3lTwgveU-4p?Fd^VGoI2W%Mm%?h>RayQ$o|Vv#%55Dg@b9~hsB`%;Q?_EGNF6q z1E{-`K7BbS07^23frEQb;irBEH_K)ad6uot`9GI0Tkv3l?9Lh`lC{Tx=s2lzi^VL5 zrvcd<$(cv0?TpCd**xcc`J&^6@6(9Nx&#szqEE`k`*U&k`f;DVJc)_+N@8fyo2=fP z!tqz9bJf>ZVw2-k;pPl2_`G5u)HsxK)zh3X!(udd>&Pew5B*MNsil^8^*JoWed{7R z>TXc>x`U)IKgUgONhfa4cf&2sP}ryVnoQ6hLKgR?ur=-+sJHI`pZ%A~EE++KhE0}@ zozhY^(>am5ka?1OIOsID(o(;C)BGszkVb~%t|=;9WZqYYfxRBd)($sxWV;-=ggU}y zRCqgFU>6+?r>AmHE#(MFZ{>)(95=@%jXN{Lg|wU-N8-d6-176L+-1!Q#{tE+xQX2( zID>T2(f?pfsdBA7cO$hYsij9a9cK@&;j0RFc;PH^BY2VYnp(LJf!>@+636LI9Y)e| zA{oue6DwBARb3C{_Fqco-Y=`-%nOS-;dweYBQBKt621!BL#^rOWEEO?X&*TMD1ddY zMR=}d{3 zyXW5x#KUjNoYXfkyyy*NB@L2nrb^Ti{>fm^O%Mz(xqQfys!V2DT7|u7_+wfYAL&fhuqgdI9 z{j|n&AfNxZ2Yy-+G`Eh@W&LAJDk*7(XCtc$bCw@l(8eNhO!u=9eut3AOye6vu0 z_bky_AqT zG{vGx{BqXn=P4zHuYjevfj-mSOW%KuWX^R8?8e0aDAsbP^ZXann(EiG`D^n@|Ci~c z?q@5>9jOE%hukEFV?S{CHXhc0911pe_N23aPe?K;0p*CRu;=_1*n8c*$ipHXkZbG(TlMpa z>(rm5EH0TSJ(7phU5fDVu8Uxmmm|Be)3{$|KF`^lA2!I1DWpf!pO5RQk$Nw_nfBo2SFB`mUdtRW^wgwU+wM|@6TSFb zLodrN1*MTW`Lken-zd2GW*4XjXA6D{vdO|9y5PGi5N6nhL1W1*K|8XL$mEn@#A{nP zbxWGio-8O&DY-=2>lHvgN*(r&90+-C;X-lkXrZ${l=E|bLUc_D{A{zrO7F{@c(pH; zzZFis(vs*urE56p+aa=i_87WMFO?2kUr3{8D6^;cDkKk-G=a?Zq&kv)K{kV-wYU%* z?pV@8-mWxn<3##nb`HK-HHRz+hyrhqUl4SjQja4~WGQcVk*ihviRHONq~qQxGGog@ za^a#n>}m0p?v(?ub$ALGH7g54zJB4djGhzuDU)Eh!hRTArYjhPCK0#&{UPr(1;6cM zVNvl`N23Hi*myDtOn*4TsJA(!((|FrRQEBN*g6qT_X&ZRz0*OlZif)r$&uy%DoBfo z3)%QZi(D9YmQ+nCB|di7i0byq#IDSp8@niu@Xku)gISuR!KF2v!_m#;xod%-6Z%j1 zSs~S-{TjHr+TG;+{u!|LVFCX7Fq7FIj$rN93z@PPAp-|nvLhY`SaVD=o3v{)o3UT2 zQR_D_&A_uvoR!QJ`6yR*qbj>$J1Y^=a`)yPTxfyJolb_9zrS7Y+|*SM#t9!GivOC1XVY6$w;CkYp46ghUs^EE_bL0e!&F>oXC|H=nhX|?bl4`3pDZra zQv832I@7S4-l+elNpnBVM1>;FDRs`i*G^`MjG2d&l8_-}2+<%V6_rW|DVixdXW#2i z6e5y2gk-MFA!E<}zxY4TtMl?)*WTyc*S*&IuFq$*rG3@0Aa$k{cYw{jr%E8(b_5;ZR?Y9`C2>N z5k48$#gD*Ir)T5K5zElQfJ3v7v+#k%dc2q$h?8{!aHoc7=1HjKo=z#EJDKcz1LMCU46k8zeJuQ6RqU|;{>WZO$o#9c%YX1S~A+_gK+-&0Q|Ji1+V$pqbn;Sk5`=*{$2b@t=4wY zzL#GLM*8#!nr2DOCd3Fc8?rdd#aYsZ>lbL<3mY7o{Zsgm(u;dj>&(?_W^=~E(!p+0++Jxy96IV~UD9lHS#k*73mK+XzUS_xyKr*#bm3)d9QC^q zPqTJ$MB+_|WtA8G{h^q;6#k^6Ka`L?)fS|2;t6`*dk~g2&cyER&&c7WrBd^ks+gnh zh2AZixMG_k+35UTC|fs`J{Ye@Wm}uLZJSTSqoVI*L|hJ?);tPZHf7?mapLyWkQk!8 z_$W145raCznlLy%3HBI{CiBY9(4uEa3C4N4wkTZ{Pfv$c82_&502tAqWO ze&=)Um+)<$Kf;xHz_Hs;ekW|7pBoap-$1G~goZfFxr z*1N{;nQFn==!3-b(nYSJL5Z$m&2)&=0(#rz61z|nPCjH8O%nR!)e~x9X)lnvAQjRz zd@ZfNVu!PPXOXx`AGwp)wg}wfgLF}-3ieELqgAuywBKZNT=-amINAUka;^BjO2+)H zf2lCTIfVaWM#TFBbmlgZ>G`R2Dqe1UT^ME+I5qoTR7YM~ad zcjFHziRnwHYJD_&rpF>q=aJQ;?CGsR;xkTAViIP?GOoMxAt86>?7%&FS15r)Wj0pp@_dzj^r`O?qI+o=88d#b zAd5UoCEs#e+*PNO6ur0)IS(tq+-3u5y!3+3+DV`%$%DAt z1@LZErjQrt+1Zs_%T>tRwvV=s5PeSyMk@Z8alf8#K$y&V>a_`Y%lK<%>39AT(+}9khU^8gbChf^Ub zL!V#T8q5bjBYZ?(J&0BB%WXMi!JCAx;hO?^-XPM9be1X6(C?bO`r7%t`(;bMZL~<` z7%>EnxqO4tuWo$HnqGXF_IPsm8}f}8DI&P^M-8Yad@ zLH|g5a2eP|&Mv3q{1SZ#?e&;^4jNB9H-*cs6r{py5qOi4_M2>t*g-UQ>XI|0f&25` zlw3-SA79`D_paj2B94)X+ppo!`VYK8yo@iCjTg;j`DneZm5lH#