From eb0881d3d419bae1fc806dd22cdc6ed208abda99 Mon Sep 17 00:00:00 2001 From: ZhidanLiu Date: Thu, 10 Mar 2022 18:31:58 +0800 Subject: [PATCH] add demo of ocr evaluation --- .../ocr_evaluate/__init__.py | 0 .../ocr_evaluate/analyse.py | 126 ++++ .../ocr_evaluate/cnn_ctc/README.md | 591 ++++++++++++++++++ .../ocr_evaluate/cnn_ctc/README_CN.md | 523 ++++++++++++++++ .../ocr_evaluate/cnn_ctc/eval.py | 111 ++++ .../ocr_evaluate/cnn_ctc/export.py | 51 ++ .../cnn_ctc/mindspore_hub_conf.py | 30 + .../ocr_evaluate/cnn_ctc/postprocess.py | 54 ++ .../ocr_evaluate/cnn_ctc/preprocess.py | 96 +++ .../ocr_evaluate/cnn_ctc/requirements.txt | 7 + .../cnn_ctc/scripts/run_eval_ascend.sh | 50 ++ .../cnn_ctc/scripts/run_eval_gpu.sh | 50 ++ .../scripts/run_standalone_train_ascend.sh | 49 ++ .../scripts/run_standalone_train_gpu.sh | 42 ++ .../ocr_evaluate/cnn_ctc/src/__init__.py | 15 + .../ocr_evaluate/cnn_ctc/src/callback.py | 73 +++ .../ocr_evaluate/cnn_ctc/src/cnn_ctc.py | 389 ++++++++++++ .../ocr_evaluate/cnn_ctc/src/dataset.py | 343 ++++++++++ .../ocr_evaluate/cnn_ctc/src/lr_schedule.py | 41 ++ .../cnn_ctc/src/model_utils/__init__.py | 0 .../cnn_ctc/src/model_utils/config.py | 131 ++++ .../cnn_ctc/src/model_utils/device_adapter.py | 26 + .../cnn_ctc/src/model_utils/local_adapter.py | 36 ++ .../cnn_ctc/src/model_utils/moxing_adapter.py | 124 ++++ .../cnn_ctc/src/preprocess_dataset.py | 172 +++++ .../ocr_evaluate/cnn_ctc/src/util.py | 102 +++ .../ocr_evaluate/cnn_ctc/train.py | 148 +++++ .../ocr_evaluate/default_config.yaml | 76 +++ .../ocr_evaluate/eval_and_save.py | 100 +++ .../ocr_evaluate/generate_adv_samples.py | 139 ++++ .../ocr_evaluate/image/catalog.png | Bin 0 -> 14419 bytes .../ocr_evaluate/image/name_format.png | Bin 0 -> 8025 bytes .../ocr_evaluate/image/result_demo.png | Bin 0 -> 45458 bytes .../对OCR模型CNN-CTC的鲁棒性评测.md | 508 +++++++++++++++ 34 files changed, 4203 insertions(+) create mode 100644 examples/natural_robustness/ocr_evaluate/__init__.py create mode 100644 examples/natural_robustness/ocr_evaluate/analyse.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/README.md create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/README_CN.md create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/eval.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/export.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/mindspore_hub_conf.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/postprocess.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/preprocess.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/requirements.txt create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_eval_ascend.sh create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_eval_gpu.sh create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_standalone_train_ascend.sh create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_standalone_train_gpu.sh create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/__init__.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/callback.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/cnn_ctc.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/dataset.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/lr_schedule.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/__init__.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/config.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/device_adapter.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/local_adapter.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/moxing_adapter.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/preprocess_dataset.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/src/util.py create mode 100644 examples/natural_robustness/ocr_evaluate/cnn_ctc/train.py create mode 100644 examples/natural_robustness/ocr_evaluate/default_config.yaml create mode 100644 examples/natural_robustness/ocr_evaluate/eval_and_save.py create mode 100644 examples/natural_robustness/ocr_evaluate/generate_adv_samples.py create mode 100644 examples/natural_robustness/ocr_evaluate/image/catalog.png create mode 100644 examples/natural_robustness/ocr_evaluate/image/name_format.png create mode 100644 examples/natural_robustness/ocr_evaluate/image/result_demo.png create mode 100644 examples/natural_robustness/ocr_evaluate/对OCR模型CNN-CTC的鲁棒性评测.md diff --git a/examples/natural_robustness/ocr_evaluate/__init__.py b/examples/natural_robustness/ocr_evaluate/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/examples/natural_robustness/ocr_evaluate/analyse.py b/examples/natural_robustness/ocr_evaluate/analyse.py new file mode 100644 index 0000000..9d9464d --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/analyse.py @@ -0,0 +1,126 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Analyse result of ocr evaluation.""" + +import os +import sys +import json +from collections import defaultdict +from io import BytesIO +import lmdb +from PIL import Image + +from cnn_ctc.src.model_utils.config import config + + +def analyse_adv_iii5t_3000(lmdb_path): + """Analyse result of ocr evaluation.""" + env = lmdb.open(lmdb_path, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False) + + if not env: + print('cannot create lmdb from %s' % (lmdb_path)) + sys.exit(0) + + with env.begin(write=False) as txn: + n_samples = int(txn.get('num-samples'.encode())) + print(n_samples) + n_samples = n_samples // config.TEST_BATCH_SIZE * config.TEST_BATCH_SIZE + result = defaultdict(dict) + wrong_count = 0 + adv_wrong_count = 0 + ori_correct_adv_wrong_count = 0 + ori_wrong_adv_wrong_count = 0 + if not os.path.exists(os.path.join(lmdb_path, 'adv_wrong_pred')): + os.mkdir(os.path.join(lmdb_path, 'adv_wrong_pred')) + if not os.path.exists(os.path.join(lmdb_path, 'ori_correct_adv_wrong_pred')): + os.mkdir(os.path.join(lmdb_path, 'ori_correct_adv_wrong_pred')) + if not os.path.exists(os.path.join(lmdb_path, 'ori_wrong_adv_wrong_pred')): + os.mkdir(os.path.join(lmdb_path, 'ori_wrong_adv_wrong_pred')) + + for index in range(n_samples): + index += 1 # lmdb starts with 1 + label_key = 'label-%09d'.encode() % index + label = txn.get(label_key).decode('utf-8').lower() + pred_key = 'pred-%09d'.encode() % index + pred = txn.get(pred_key).decode('utf-8') + if pred != label: + wrong_count += 1 + + adv_pred_key = 'adv_pred-%09d'.encode() % index + adv_pred = txn.get(adv_pred_key).decode('utf-8') + + adv_info_key = 'adv_info-%09d'.encode() % index + adv_info = json.loads(txn.get(adv_info_key).decode('utf-8')) + for info in adv_info: + if not result[info[0]]: + result[info[0]] = defaultdict(int) + result[info[0]]['count'] += 1 + + if adv_pred != label: + adv_wrong_count += 1 + for info in adv_info: + result[info[0]]['wrong_count'] += 1 + + # save wrong predicted image + adv_image = 'adv_image-%09d'.encode() % index + imgbuf = txn.get(adv_image) + image = Image.open(BytesIO(imgbuf)) + + result_path = os.path.join(lmdb_path, 'adv_wrong_pred', adv_info[0][0]) + if not os.path.exists(result_path): + os.mkdir(result_path) + + image.save(os.path.join(result_path, label + '-' + adv_pred + '.png')) + + # origin image is correctly predicted and adv is wrong. + if pred == label: + ori_correct_adv_wrong_count += 1 + result[info[0]]['ori_correct_adv_wrong_count'] += 1 + + result_path = os.path.join(lmdb_path, 'ori_correct_adv_wrong_pred', adv_info[0][0]) + if not os.path.exists(result_path): + os.mkdir(result_path) + image.save(os.path.join(result_path, label + '-' + adv_pred + '.png')) + # wrong predicted in both origin and adv image. + else: + ori_wrong_adv_wrong_count += 1 + result[info[0]]['ori_wrong_adv_wrong_count'] += 1 + + result_path = os.path.join(lmdb_path, 'ori_wrong_adv_wrong_pred', adv_info[0][0]) + if not os.path.exists(result_path): + os.mkdir(result_path) + image.save(os.path.join(result_path, label + '-' + adv_pred + '.png')) + print('Number of samples in analyse dataset: ', n_samples) + print('Accuracy of original dataset: ', 1 - wrong_count / n_samples) + print('Accuracy of adversarial dataset: ', 1 - adv_wrong_count / n_samples) + print('Number of samples correctly predicted in original dataset but wrong in adversarial dataset: ', + ori_correct_adv_wrong_count) + print('Number of samples both wrong predicted in original and adversarial dataset: ', ori_wrong_adv_wrong_count) + print('------------------------------------------------------------------------------') + for key in result.keys(): + print('Method ', key) + print('Number of perturb samples: {} '.format(result[key]['count'])) + print('Number of wrong predicted: {}'.format(result[key]['wrong_count'])) + print('Number of correctly predicted in origin dataset but wrong in adversarial: {}'.format( + result[key]['ori_correct_adv_wrong_count'])) + print('Number of both wrong predicted in origin and adversarial dataset: {}'.format( + result[key]['ori_wrong_adv_wrong_count'])) + print('------------------------------------------------------------------------------') + return result + + +if __name__ == '__main__': + lmdb_data_path = config.ADV_TEST_DATASET_PATH + analyse_adv_iii5t_3000(lmdb_path=lmdb_data_path) diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/README.md b/examples/natural_robustness/ocr_evaluate/cnn_ctc/README.md new file mode 100644 index 0000000..7606795 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/README.md @@ -0,0 +1,591 @@ +# Contents + +- [CNNCTC Description](#CNNCTC-description) +- [Model Architecture](#model-architecture) +- [Dataset](#dataset) +- [Features](#features) + - [Mixed Precision](#mixed-precision) +- [Environment Requirements](#environment-requirements) +- [Quick Start](#quick-start) +- [Script Description](#script-description) + - [Script and Sample Code](#script-and-sample-code) + - [Script Parameters](#script-parameters) + - [Training Process](#training-process) + - [Training](#training) + - [Distributed Training](#distributed-training) + - [Evaluation Process](#evaluation-process) + - [Evaluation](#evaluation) + - [Inference Process](#inference-process) + - [Export MindIR](#export-mindir) + - [Infer on Ascend310](#infer-on-ascend310) + - [result](#result) +- [Model Description](#model-description) + - [Performance](#performance) + - [Training Performance](#training-performance) + - [Evaluation Performance](#evaluation-performance) + - [Inference Performance](#inference-performance) + - [How to use](#how-to-use) + - [Inference](#inference) + - [Continue Training on the Pretrained Model](#continue-training-on-the-pretrained-model) + - [Transfer Learning](#transfer-learning) +- [Description of Random Situation](#description-of-random-situation) +- [ModelZoo Homepage](#modelzoo-homepage) + +# [CNNCTC Description](#contents) + +This paper proposes three major contributions to addresses scene text recognition (STR). +First, we examine the inconsistencies of training and evaluation datasets, and the performance gap results from inconsistencies. +Second, we introduce a unified four-stage STR framework that most existing STR models fit into. +Using this framework allows for the extensive evaluation of previously proposed STR modules and the discovery of previously +unexplored module combinations. Third, we analyze the module-wise contributions to performance in terms of accuracy, speed, +and memory demand, under one consistent set of training and evaluation datasets. Such analyses clean up the hindrance on the current +comparisons to understand the performance gain of the existing modules. +[Paper](https://arxiv.org/abs/1904.01906): J. Baek, G. Kim, J. Lee, S. Park, D. Han, S. Yun, S. J. Oh, and H. Lee, “What is wrong with scene text recognition model comparisons? dataset and model analysis,” ArXiv, vol. abs/1904.01906, 2019. + +# [Model Architecture](#contents) + +This is an example of training CNN+CTC model for text recognition on MJSynth and SynthText dataset with MindSpore. + +# [Dataset](#contents) + +Note that you can run the scripts based on the dataset mentioned in original paper or widely used in relevant domain/network architecture. In the following sections, we will introduce how to run the scripts using the related dataset below. + +The [MJSynth](https://www.robots.ox.ac.uk/~vgg/data/text/) and [SynthText](https://github.com/ankush-me/SynthText) dataset are used for model training. The [The IIIT 5K-word dataset](https://cvit.iiit.ac.in/research/projects/cvit-projects/the-iiit-5k-word-dataset) dataset is used for evaluation. + +- step 1: + +All the datasets have been preprocessed and stored in .lmdb format and can be downloaded [**HERE**](https://drive.google.com/drive/folders/192UfE9agQUMNq6AgU3_E05_FcPZK4hyt). + +- step 2: + +Uncompress the downloaded file, rename the MJSynth dataset as MJ, the SynthText dataset as ST and the IIIT dataset as IIIT. + +- step 3: + +Move above mentioned three datasets into `cnnctc_data` folder, and the structure should be as below: + +```text +|--- CNNCTC/ + |--- cnnctc_data/ + |--- ST/ + data.mdb + lock.mdb + |--- MJ/ + data.mdb + lock.mdb + |--- IIIT/ + data.mdb + lock.mdb + + ...... +``` + +- step 4: + +Preprocess the dataset by running: + +```bash +python src/preprocess_dataset.py +``` + +This takes around 75 minutes. + +# [Features](#contents) + +## Mixed Precision + +The [mixed precision](https://www.mindspore.cn/docs/programming_guide/en/master/enable_mixed_precision.html) training method accelerates the deep learning neural network training process by using both the single-precision and half-precision data formats, and maintains the network precision achieved by the single-precision training at the same time. Mixed precision training can accelerate the computation process, reduce memory usage, and enable a larger model or batch size to be trained on specific hardware. +For FP16 operators, if the input data type is FP32, the backend of MindSpore will automatically handle it with reduced precision. Users could check the reduced-precision operators by enabling INFO log and then searching ‘reduce precision’. + +# [Environment Requirements](#contents) + +- Hardware(Ascend/GPU) + + - Prepare hardware environment with Ascend or GPU processor. +- Framework + + - [MindSpore](https://www.mindspore.cn/install/en) +- For more information, please check the resources below: + - [MindSpore tutorials](https://www.mindspore.cn/tutorials/en/master/index.html) + + - [MindSpore Python API](https://www.mindspore.cn/docs/api/en/master/index.html) + +# [Quick Start](#contents) + +- Install dependencies: + +```bash +pip install lmdb +pip install Pillow +pip install tqdm +pip install six +``` + +```default_config.yaml + +TRAIN_DATASET_PATH: /home/DataSet/MJ-ST-IIIT/ST-MJ/ +TRAIN_DATASET_INDEX_PATH: /home/DataSet/MJ-ST-IIIT/st_mj_fixed_length_index_list.pkl +TEST_DATASET_PATH: /home/DataSet/MJ-ST-IIIT/IIIT5K_3000 + +Modify the parameters according to the actual path +``` + +- Standalone Ascend Training: + +```bash +bash scripts/run_standalone_train_ascend.sh $DEVICE_ID $PRETRAINED_CKPT(options) +# example: bash scripts/run_standalone_train_ascend.sh 0 +``` + +- Standalone GPU Training: + +```bash +bash scripts/run_standalone_train_gpu.sh $PRETRAINED_CKPT(options) +``` + +- Distributed Ascend Training: + +```bash +bash scripts/run_distribute_train_ascend.sh $RANK_TABLE_FILE $PRETRAINED_CKPT(options) +# example: bash scripts/run_distribute_train_ascend.sh ~/hccl_8p.json +``` + +- Distributed GPU Training: + +```bash +bash scripts/run_distribute_train_gpu.sh $PRETRAINED_CKPT(options) +``` + +- Ascend Evaluation: + +```bash +bash scripts/run_eval_ascend.sh $DEVICE_ID $TRAINED_CKPT +# example: scripts/run_eval_ascend.sh 0 /home/model/cnnctc/ckpt/CNNCTC-1_8000.ckpt +``` + +- GPU Evaluation: + +```bash +bash scripts/run_eval_gpu.sh $TRAINED_CKPT +``` + +# [Script Description](#contents) + +## [Script and Sample Code](#contents) + +The entire code structure is as following: + +```text +|--- CNNCTC/ + |---README.md // descriptions about cnnctc + |---README_cn.md // descriptions about cnnctc + |---default_config.yaml // config file + |---train.py // train scripts + |---eval.py // eval scripts + |---export.py // export scripts + |---preprocess.py // preprocess scripts + |---postprocess.py // postprocess scripts + |---ascend310_infer // application for 310 inference + |---scripts + |---run_infer_310.sh // shell script for infer on ascend310 + |---run_standalone_train_ascend.sh // shell script for standalone on ascend + |---run_standalone_train_gpu.sh // shell script for standalone on gpu + |---run_distribute_train_ascend.sh // shell script for distributed on ascend + |---run_distribute_train_gpu.sh // shell script for distributed on gpu + |---run_eval_ascend.sh // shell script for eval on ascend + |---src + |---__init__.py // init file + |---cnn_ctc.py // cnn_ctc network + |---callback.py // loss callback file + |---dataset.py // process dataset + |---util.py // routine operation + |---preprocess_dataset.py // preprocess dataset + |--- model_utils + |---config.py // Parameter config + |---moxing_adapter.py // modelarts device configuration + |---device_adapter.py // Device Config + |---local_adapter.py // local device config +``` + +## [Script Parameters](#contents) + +Parameters for both training and evaluation can be set in `default_config.yaml`. + +Arguments: + +- `--CHARACTER`: Character labels. +- `--NUM_CLASS`: The number of classes including all character labels and the label for CTCLoss. +- `--HIDDEN_SIZE`: Model hidden size. +- `--FINAL_FEATURE_WIDTH`: The number of features. +- `--IMG_H`: The height of input image. +- `--IMG_W`: The width of input image. +- `--TRAIN_DATASET_PATH`: The path to training dataset. +- `--TRAIN_DATASET_INDEX_PATH`: The path to training dataset index file which determines the order . +- `--TRAIN_BATCH_SIZE`: Training batch size. The batch size and index file must ensure input data is in fixed shape. +- `--TRAIN_DATASET_SIZE`: Training dataset size. +- `--TEST_DATASET_PATH`: The path to test dataset. +- `--TEST_BATCH_SIZE`: Test batch size. +- `--TRAIN_EPOCHS`:Total training epochs. +- `--CKPT_PATH`:The path to model checkpoint file, can be used to resume training and evaluation. +- `--SAVE_PATH`:The path to save model checkpoint file. +- `--LR`:Learning rate for standalone training. +- `--LR_PARA`:Learning rate for distributed training. +- `--MOMENTUM`:Momentum. +- `--LOSS_SCALE`:Loss scale to prevent gradient underflow. +- `--SAVE_CKPT_PER_N_STEP`:Save model checkpoint file per N steps. +- `--KEEP_CKPT_MAX_NUM`:The maximum number of saved model checkpoint file. + +## [Training Process](#contents) + +### Training + +- Standalone Ascend Training: + +```bash +bash scripts/run_standalone_train_ascend.sh [DEVICE_ID] [PRETRAINED_CKPT(options)] +# example: bash scripts/run_standalone_train_ascend.sh 0 +``` + +Results and checkpoints are written to `./train` folder. Log can be found in `./train/log` and loss values are recorded in `./train/loss.log`. + +`$PRETRAINED_CKPT` is the path to model checkpoint and it is **optional**. If none is given the model will be trained from scratch. + +- Distributed Ascend Training: + +```bash +bash scripts/run_distribute_train_ascend.sh [RANK_TABLE_FILE] [PRETRAINED_CKPT(options)] +# example: bash scripts/run_distribute_train_ascend.sh ~/hccl_8p.json +``` + + For distributed training, a hccl configuration file with JSON format needs to be created in advance. + + Please follow the instructions in the link below: + + . + +Results and checkpoints are written to `./train_parallel_{i}` folder for device `i` respectively. + Log can be found in `./train_parallel_{i}/log_{i}.log` and loss values are recorded in `./train_parallel_{i}/loss.log`. + +`$RANK_TABLE_FILE` is needed when you are running a distribute task on ascend. +`$PATH_TO_CHECKPOINT` is the path to model checkpoint and it is **optional**. If none is given the model will be trained from scratch. + +### Training Result + +Training result will be stored in the example path, whose folder name begins with "train" or "train_parallel". You can find checkpoint file together with result like the following in loss.log. + +```text +# distribute training result(8p) +epoch: 1 step: 1 , loss is 76.25, average time per step is 0.235177839748392712 +epoch: 1 step: 2 , loss is 73.46875, average time per step is 0.25798572540283203 +epoch: 1 step: 3 , loss is 69.46875, average time per step is 0.229678678512573 +epoch: 1 step: 4 , loss is 64.3125, average time per step is 0.23512671788533527 +epoch: 1 step: 5 , loss is 58.375, average time per step is 0.23149147033691406 +epoch: 1 step: 6 , loss is 52.7265625, average time per step is 0.2292975425720215 +... +epoch: 1 step: 8689 , loss is 9.706798802612482, average time per step is 0.2184656601312549 +epoch: 1 step: 8690 , loss is 9.70612545289855, average time per step is 0.2184725407765116 +epoch: 1 step: 8691 , loss is 9.70695776049204, average time per step is 0.21847309686135555 +epoch: 1 step: 8692 , loss is 9.707279624277456, average time per step is 0.21847339290613375 +epoch: 1 step: 8693 , loss is 9.70763437950938, average time per step is 0.2184720295013031 +epoch: 1 step: 8694 , loss is 9.707695425072046, average time per step is 0.21847410284595573 +epoch: 1 step: 8695 , loss is 9.708408273381295, average time per step is 0.21847338271072345 +epoch: 1 step: 8696 , loss is 9.708703753591953, average time per step is 0.2184726025560777 +epoch: 1 step: 8697 , loss is 9.709536406025824, average time per step is 0.21847212061114694 +epoch: 1 step: 8698 , loss is 9.708542263610315, average time per step is 0.2184715309307257 +``` + +- running on ModelArts +- If you want to train the model on modelarts, you can refer to the [official guidance document] of modelarts (https://support.huaweicloud.com/modelarts/) + +```python +# Example of using distributed training dpn on modelarts : +# Data set storage method + +# ├── CNNCTC_Data # dataset dir +# ├──train # train dir +# ├── ST_MJ # train dataset dir +# ├── data.mdb # data file +# ├── lock.mdb +# ├── st_mj_fixed_length_index_list.pkl +# ├── eval # eval dir +# ├── IIIT5K_3000 # eval dataset dir +# ├── checkpoint # checkpoint dir + +# (1) Choose either a (modify yaml file parameters) or b (modelArts create training job to modify parameters) 。 +# a. set "enable_modelarts=True" +# set "run_distribute=True" +# set "TRAIN_DATASET_PATH=/cache/data/ST_MJ/" +# set "TRAIN_DATASET_INDEX_PATH=/cache/data/st_mj_fixed_length_index_list.pkl" +# set "SAVE_PATH=/cache/train/checkpoint" +# +# b. add "enable_modelarts=True" Parameters are on the interface of modearts。 +# Set the parameters required by method a on the modelarts interface +# Note: The path parameter does not need to be quoted + +# (2) Set the path of the network configuration file "_config_path=/The path of config in default_config.yaml/" +# (3) Set the code path on the modelarts interface "/path/cnnctc"。 +# (4) Set the model's startup file on the modelarts interface "train.py" 。 +# (5) Set the data path of the model on the modelarts interface ".../CNNCTC_Data/train"(choices CNNCTC_Data/train Folder path) , +# The output path of the model "Output file path" and the log path of the model "Job log path" 。 +# (6) start trainning the model。 + +# Example of using model inference on modelarts +# (1) Place the trained model to the corresponding position of the bucket。 +# (2) chocie a or b。 +# a.set "enable_modelarts=True" +# set "TEST_DATASET_PATH=/cache/data/IIIT5K_3000/" +# set "CHECKPOINT_PATH=/cache/data/checkpoint/checkpoint file name" + +# b. Add "enable_modelarts=True" parameter on the interface of modearts。 +# Set the parameters required by method a on the modelarts interface +# Note: The path parameter does not need to be quoted + +# (3) Set the path of the network configuration file "_config_path=/The path of config in default_config.yaml/" +# (4) Set the code path on the modelarts interface "/path/cnnctc"。 +# (5) Set the model's startup file on the modelarts interface "train.py" 。 +# (6) Set the data path of the model on the modelarts interface ".../CNNCTC_Data/train"(choices CNNCTC_Data/train Folder path) , +# The output path of the model "Output file path" and the log path of the model "Job log path" 。 +# (7) Start model inference。 +``` + +- Standalone GPU Training: + +```bash +bash scripts/run_standalone_train_gpu.sh [PRETRAINED_CKPT(options)] +``` + +Results and checkpoints are written to `./train` folder. Log can be found in `./train/log` and loss values are recorded in `./train/loss.log`. + +`$PRETRAINED_CKPT` is the path to model checkpoint and it is **optional**. If none is given the model will be trained from scratch. + +- Distributed GPU Training: + +```bash +bash scripts/run_distribute_train_gpu.sh [PRETRAINED_CKPT(options)] +``` + +Results and checkpoints are written to `./train_parallel` folder with model checkpoints in ckpt_{i} directories. +Log can be found in `./train_parallel/log` and loss values are recorded in `./train_parallel/loss.log`. + +## [Evaluation Process](#contents) + +### Evaluation + +- Ascend Evaluation: + +```bash +bash scripts/run_eval_ascend.sh [DEVICE_ID] [TRAINED_CKPT] +# example: scripts/run_eval_ascend.sh 0 /home/model/cnnctc/ckpt/CNNCTC-1_8000.ckpt +``` + +The model will be evaluated on the IIIT dataset, sample results and overall accuracy will be printed. + +- GPU Evaluation: + +```bash +bash scripts/run_eval_gpu.sh [TRAINED_CKPT] +``` + +## [Inference process](#contents) + +### Export MindIR + +```shell +python export.py --ckpt_file [CKPT_PATH] --file_format [EXPORT_FORMAT] --TEST_BATCH_SIZE [BATCH_SIZE] +``` + +The ckpt_file parameter is required, +`EXPORT_FORMAT` should be in ["AIR", "MINDIR"]. +`BATCH_SIZE` current batch_size can only be set to 1. + +- Export MindIR on Modelarts + +```Modelarts +Export MindIR example on ModelArts +Data storage method is the same as training +# (1) Choose either a (modify yaml file parameters) or b (modelArts create training job to modify parameters)。 +# a. set "enable_modelarts=True" +# set "file_name=cnnctc" +# set "file_format=MINDIR" +# set "ckpt_file=/cache/data/checkpoint file name" + +# b. Add "enable_modelarts=True" parameter on the interface of modearts。 +# Set the parameters required by method a on the modelarts interface +# Note: The path parameter does not need to be quoted +# (2)Set the path of the network configuration file "_config_path=/The path of config in default_config.yaml/" +# (3) Set the code path on the modelarts interface "/path/cnnctc"。 +# (4) Set the model's startup file on the modelarts interface "export.py" 。 +# (5) Set the data path of the model on the modelarts interface ".../CNNCTC_Data/eval/checkpoint"(choices CNNCTC_Data/eval/checkpoint Folder path) , +# The output path of the model "Output file path" and the log path of the model "Job log path" 。 +``` + +### Infer on Ascend310 + +Before performing inference, the mindir file must be exported by `export.py` script. We only provide an example of inference using MINDIR model. + +```shell +# Ascend310 inference +bash run_infer_310.sh [MINDIR_PATH] [DATA_PATH] [DVPP] [DEVICE_ID] +``` + +- `DVPP` is mandatory, and must choose from ["DVPP", "CPU"], it's case-insensitive. CNNCTC only support CPU mode . +- `DEVICE_ID` is optional, default value is 0. + +### Result + +- Ascend Result + +Inference result is saved in current path, you can find result like this in acc.log file. + +```bash +'Accuracy': 0.8642 +``` + +- GPU result + +Inference result is saved in ./eval/log, you can find result like this. + +```bash +accuracy: 0.8533 +``` + +# [Model Description](#contents) + +## [Performance](#contents) + +### Training Performance + +| Parameters | CNNCTC | +| -------------------------- | ----------------------------------------------------------- | +| Model Version | V1 | +| Resource | Ascend 910; CPU 2.60GHz, 192cores; Memory 755G; OS Euler2.8 | +| uploaded Date | 09/28/2020 (month/day/year) | +| MindSpore Version | 1.0.0 | +| Dataset | MJSynth,SynthText | +| Training Parameters | epoch=3, batch_size=192 | +| Optimizer | RMSProp | +| Loss Function | CTCLoss | +| Speed | 1pc: 250 ms/step; 8pcs: 260 ms/step | +| Total time | 1pc: 15 hours; 8pcs: 1.92 hours | +| Parameters (M) | 177 | +| Scripts | | + +| Parameters | CNNCTC | +| -------------------------- | ----------------------------------------------------------- | +| Model Version | V1 | +| Resource | GPU(Tesla V100-PCIE); CPU 2.60 GHz, 26 cores; Memory 790G; OS linux-gnu | +| uploaded Date | 07/06/2021 (month/day/year) | +| MindSpore Version | 1.0.0 | +| Dataset | MJSynth,SynthText | +| Training Parameters | epoch=3, batch_size=192 | +| Optimizer | RMSProp | +| Loss Function | CTCLoss | +| Speed | 1pc: 1180 ms/step; 8pcs: 1180 ms/step | +| Total time | 1pc: 62.9 hours; 8pcs: 8.67 hours | +| Parameters (M) | 177 | +| Scripts | | + +### Evaluation Performance + +| Parameters | CNNCTC | +| ------------------- | --------------------------- | +| Model Version | V1 | +| Resource | Ascend 910; OS Euler2.8 | +| Uploaded Date | 09/28/2020 (month/day/year) | +| MindSpore Version | 1.0.0 | +| Dataset | IIIT5K | +| batch_size | 192 | +| outputs | Accuracy | +| Accuracy | 85% | +| Model for inference | 675M (.ckpt file) | + +### Inference Performance + +| Parameters | Ascend | +| ------------------- | --------------------------- | +| Model Version | CNNCTC | +| Resource | Ascend 310; CentOS 3.10 | +| Uploaded Date | 19/05/2021 (month/day/year) | +| MindSpore Version | 1.2.0 | +| Dataset | IIIT5K | +| batch_size | 1 | +| outputs | Accuracy | +| Accuracy | Accuracy=0.8642 | +| Model for inference | 675M(.ckpt file) | + +## [How to use](#contents) + +### Inference + +If you need to use the trained model to perform inference on multiple hardware platforms, such as GPU, Ascend 910 or Ascend 310, you can refer to this [Link](https://www.mindspore.cn/docs/programming_guide/en/master/multi_platform_inference.html). Following the steps below, this is a simple example: + +- Running on Ascend + + ```python + # Set context + context.set_context(mode=context.GRAPH_HOME, device_target=cfg.device_target) + context.set_context(device_id=cfg.device_id) + + # Load unseen dataset for inference + dataset = dataset.create_dataset(cfg.data_path, 1, False) + + # Define model + net = CNNCTC(cfg.NUM_CLASS, cfg.HIDDEN_SIZE, cfg.FINAL_FEATURE_WIDTH) + opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, + cfg.momentum, weight_decay=cfg.weight_decay) + loss = P.CTCLoss(preprocess_collapse_repeated=False, + ctc_merge_repeated=True, + ignore_longer_outputs_than_inputs=False) + model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}) + + # Load pre-trained model + param_dict = load_checkpoint(cfg.checkpoint_path) + load_param_into_net(net, param_dict) + net.set_train(False) + + # Make predictions on the unseen dataset + acc = model.eval(dataset) + print("accuracy: ", acc) + ``` + +### Continue Training on the Pretrained Model + +- running on Ascend + + ```python + # Load dataset + dataset = create_dataset(cfg.data_path, 1) + batch_num = dataset.get_dataset_size() + + # Define model + net = CNNCTC(cfg.NUM_CLASS, cfg.HIDDEN_SIZE, cfg.FINAL_FEATURE_WIDTH) + # Continue training if set pre_trained to be True + if cfg.pre_trained: + param_dict = load_checkpoint(cfg.checkpoint_path) + load_param_into_net(net, param_dict) + lr = lr_steps(0, lr_max=cfg.lr_init, total_epochs=cfg.epoch_size, + steps_per_epoch=batch_num) + opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), + Tensor(lr), cfg.momentum, weight_decay=cfg.weight_decay) + loss = P.CTCLoss(preprocess_collapse_repeated=False, + ctc_merge_repeated=True, + ignore_longer_outputs_than_inputs=False) + model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}, + amp_level="O2", keep_batchnorm_fp32=False, loss_scale_manager=None) + + # Set callbacks + config_ck = CheckpointConfig(save_checkpoint_steps=batch_num * 5, + keep_checkpoint_max=cfg.keep_checkpoint_max) + time_cb = TimeMonitor(data_size=batch_num) + ckpoint_cb = ModelCheckpoint(prefix="train_googlenet_cifar10", directory="./", + config=config_ck) + loss_cb = LossMonitor() + + # Start training + model.train(cfg.epoch_size, dataset, callbacks=[time_cb, ckpoint_cb, loss_cb]) + print("train success") + ``` + +# [ModelZoo Homepage](#contents) + + Please check the official [homepage](https://gitee.com/mindspore/models). diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/README_CN.md b/examples/natural_robustness/ocr_evaluate/cnn_ctc/README_CN.md new file mode 100644 index 0000000..31b0e62 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/README_CN.md @@ -0,0 +1,523 @@ +# 目录 + + + +- [目录](#目录) +- [CNN+CTC描述](#cnnctc描述) +- [模型架构](#模型架构) +- [数据集](#数据集) +- [特性](#特性) + - [混合精度](#混合精度) +- [环境要求](#环境要求) +- [快速入门](#快速入门) +- [脚本说明](#脚本说明) + - [脚本及样例代码](#脚本及样例代码) + - [脚本参数](#脚本参数) + - [训练过程](#训练过程) + - [训练](#训练) + - [训练结果](#训练结果) + - [评估过程](#评估过程) + - [评估](#评估) + - [推理过程](#推理过程) + - [导出MindIR](#导出mindir) + - [在Ascend310执行推理](#在ascend310执行推理) + - [结果](#结果) +- [模型描述](#模型描述) + - [性能](#性能) + - [训练性能](#训练性能) + - [评估性能](#评估性能) + - [推理性能](#推理性能) + - [用法](#用法) + - [推理](#推理) + - [在预训练模型上继续训练](#在预训练模型上继续训练) +- [ModelZoo主页](#modelzoo主页) + + + +# CNN+CTC描述 + +本文描述了对场景文本识别(STR)的三个主要贡献。 +首先检查训练和评估数据集不一致的内容,以及导致的性能差距。 +再引入一个统一的四阶段STR框架,目前大多数STR模型都能够适应这个框架。 +使用这个框架可以广泛评估以前提出的STR模块,并发现以前未开发的模块组合。 +第三,分析在一致的训练和评估数据集下,模块对性能的贡献,包括准确率、速度和内存需求。 +这些分析清除了当前比较的障碍,有助于了解现有模块的性能增益。 + +[论文](https://arxiv.org/abs/1904.01906): J. Baek, G. Kim, J. Lee, S. Park, D. Han, S. Yun, S. J. Oh, and H. Lee, “What is wrong with scene text recognition model comparisons? dataset and model analysis,” ArXiv, vol. abs/1904.01906, 2019. + +# 模型架构 + +示例:在MindSpore上使用MJSynth和SynthText数据集训练CNN+CTC模型进行文本识别。 + +# 数据集 + +[MJSynth](https://www.robots.ox.ac.uk/~vgg/data/text/)和[SynthText](https://github.com/ankush-me/SynthText)数据集用于模型训练。[The IIIT 5K-word dataset](https://cvit.iiit.ac.in/research/projects/cvit-projects/the-iiit-5k-word-dataset)数据集用于评估。 + +- 步骤1: + +所有数据集均经过预处理,以.lmdb格式存储,点击[**此处**](https://drive.google.com/drive/folders/192UfE9agQUMNq6AgU3_E05_FcPZK4hyt)可下载。 + +- 步骤2: + +解压下载的文件,重命名MJSynth数据集为MJ,SynthText数据集为ST,IIIT数据集为IIIT。 + +- 步骤3: + +将上述三个数据集移至`cnctc_data`文件夹中,结构如下: + +```python +|--- CNNCTC/ + |--- cnnctc_data/ + |--- ST/ + data.mdb + lock.mdb + |--- MJ/ + data.mdb + lock.mdb + |--- IIIT/ + data.mdb + lock.mdb + + ...... +``` + +- 步骤4: + +预处理数据集: + +```shell +python src/preprocess_dataset.py +``` + +这大约需要75分钟。 + +# 特性 + +## 混合精度 + +采用[混合精度](https://www.mindspore.cn/docs/programming_guide/zh-CN/master/enable_mixed_precision.html)的训练方法使用支持单精度和半精度数据来提高深度学习神经网络的训练速度,同时保持单精度训练所能达到的网络精度。混合精度训练提高计算速度、减少内存使用的同时,支持在特定硬件上训练更大的模型或实现更大批次的训练。 +以FP16算子为例,如果输入数据类型为FP32,MindSpore后台会自动降低精度来处理数据。用户可打开INFO日志,搜索“reduce precision”查看精度降低的算子。 + +# 环境要求 + +- 硬件(Ascend) + + - 准备Ascend或GPU处理器搭建硬件环境。 + +- 框架 + + - [MindSpore](https://www.mindspore.cn/install) + +- 如需查看详情,请参见如下资源: + - [MindSpore教程](https://www.mindspore.cn/tutorials/zh-CN/master/index.html) + + - [MindSpore Python API](https://www.mindspore.cn/docs/api/zh-CN/master/index.html) + +# 快速入门 + +- 安装依赖: + +```python +pip install lmdb +pip install Pillow +pip install tqdm +pip install six +``` + +```default_config.yaml + +TRAIN_DATASET_PATH: /home/DataSet/MJ-ST-IIIT/ST-MJ/ +TRAIN_DATASET_INDEX_PATH: /home/DataSet/MJ-ST-IIIT/st_mj_fixed_length_index_list.pkl +TEST_DATASET_PATH: /home/DataSet/MJ-ST-IIIT/IIIT5K_3000 + +根据实际路径修改参数 +``` + +- 单机训练: + +```shell +bash scripts/run_standalone_train_ascend.sh [DEVICE_ID] [PRETRAINED_CKPT(options)] +# example: bash scripts/run_standalone_train_ascend.sh 0 +``` + +- 分布式训练: + +```shell +bash scripts/run_distribute_train_ascend.sh [RANK_TABLE_FILE] [PRETRAINED_CKPT(options)] +# example: bash scripts/run_distribute_train_ascend.sh ~/hccl_8p.json +``` + +- 评估: + +```shell +bash scripts/run_eval_ascend.sh DEVICE_ID TRAINED_CKPT +# example: scripts/run_eval_ascend.sh 0 /home/model/cnnctc/ckpt/CNNCTC-1_8000.ckpt +``` + +# 脚本说明 + +## 脚本及样例代码 + +完整代码结构如下: + +```python +|--- CNNCTC/ + |---README_CN.md // CNN+CTC相关描述 + |---README.md // CNN+CTC相关描述 + |---train.py // 训练脚本 + |---eval.py // 评估脚本 + |---export.py // 模型导出脚本 + |---postprocess.py // 推理后处理脚本 + |---preprocess.py // 推理前处理脚本 + |---ascend310_infer // 用于310推理 + |---default_config.yaml // 参数配置 + |---scripts + |---run_standalone_train_ascend.sh // Ascend单机shell脚本 + |---run_distribute_train_ascend.sh // Ascend分布式shell脚本 + |---run_eval_ascend.sh // Ascend评估shell脚本 + |---run_infer_310.sh // Ascend310推理的shell脚本 + |---src + |---__init__.py // init文件 + |---cnn_ctc.py // cnn_ctc网络 + |---callback.py // 损失回调文件 + |---dataset.py // 处理数据集 + |---util.py // 常规操作 + |---generate_hccn_file.py // 生成分布式json文件 + |---preprocess_dataset.py // 预处理数据集 + |---model_utils + |---config.py # 参数生成 + |---device_adapter.py # 设备相关信息 + |---local_adapter.py # 设备相关信息 + |---moxing_adapter.py # 装饰器(主要用于ModelArts数据拷贝) + +``` + +## 脚本参数 + +在`default_config.yaml`中可以同时配置训练参数和评估参数。 + +参数: + +- `--CHARACTER`:字符标签。 +- `--NUM_CLASS`:类别数,包含所有字符标签和CTCLoss的标签。 +- `--HIDDEN_SIZE`:模型隐藏大小。 +- `--FINAL_FEATURE_WIDTH`:特性的数量。 +- `--IMG_H`:输入图像高度。 +- `--IMG_W`:输入图像宽度。 +- `--TRAIN_DATASET_PATH`:训练数据集的路径。 +- `--TRAIN_DATASET_INDEX_PATH`:决定顺序的训练数据集索引文件的路径。 +- `--TRAIN_BATCH_SIZE`:训练批次大小。在批次大小和索引文件中,必须确保输入数据是固定的形状。 +- `--TRAIN_DATASET_SIZE`:训练数据集大小。 +- `--TEST_DATASET_PATH`:测试数据集的路径。 +- `--TEST_BATCH_SIZE`:测试批次大小。 +- `--TRAIN_EPOCHS`:总训练轮次。 +- `--CKPT_PATH`:模型检查点文件路径,可用于恢复训练和评估。 +- `--SAVE_PATH`:模型检查点文件保存路径。 +- `--LR`:单机训练学习率。 +- `--LR_PARA`:分布式训练学习率。 +- `--Momentum`:动量。 +- `--LOSS_SCALE`:损失放大,避免梯度下溢。 +- `--SAVE_CKPT_PER_N_STEP`:每N步保存模型检查点文件。 +- `--KEEP_CKPT_MAX_NUM`:模型检查点文件保存数量上限。 + +## 训练过程 + +### 训练 + +- 单机训练: + +```shell +bash scripts/run_standalone_train_ascend.sh [DEVICE_ID] [PRETRAINED_CKPT(options)] +# example: bash scripts/run_standalone_train_ascend.sh 0 +``` + +结果和检查点被写入`./train`文件夹。日志可以在`./train/log`中找到,损失值记录在`./train/loss.log`中。 + +`$PRETRAINED_CKPT`为模型检查点的路径,**可选**。如果值为none,模型将从头开始训练。 + +- 分布式训练: + +```shell +bash scripts/run_distribute_train_ascend.sh [RANK_TABLE_FILE] [PRETRAINED_CKPT(options)] +# example: bash scripts/run_distribute_train_ascend.sh ~/hccl_8p.json +``` + +结果和检查点分别写入设备`i`的`./train_parallel_{i}`文件夹。 +日志可以在`./train_parallel_{i}/log_{i}.log`中找到,损失值记录在`./train_parallel_{i}/loss.log`中。 + +在Ascend上运行分布式任务时需要`$RANK_TABLE_FILE`。 +`$PATH_TO_CHECKPOINT`为模型检查点的路径,**可选**。如果值为none,模型将从头开始训练。 + +> 注意: + + RANK_TABLE_FILE相关参考资料见[链接](https://www.mindspore.cn/docs/programming_guide/zh-CN/master/distributed_training_ascend.html), 获取device_ip方法详见[链接](https://gitee.com/mindspore/models/tree/master/utils/hccl_tools). + +### 训练结果 + +训练结果保存在示例路径中,文件夹名称以“train”或“train_parallel”开头。您可在此路径下的日志中找到检查点文件以及结果,如下所示。 + +```python +# 分布式训练结果(8P) +epoch: 1 step: 1 , loss is 76.25, average time per step is 0.335177839748392712 +epoch: 1 step: 2 , loss is 73.46875, average time per step is 0.36798572540283203 +epoch: 1 step: 3 , loss is 69.46875, average time per step is 0.3429678678512573 +epoch: 1 step: 4 , loss is 64.3125, average time per step is 0.33512671788533527 +epoch: 1 step: 5 , loss is 58.375, average time per step is 0.33149147033691406 +epoch: 1 step: 6 , loss is 52.7265625, average time per step is 0.3292975425720215 +... +epoch: 1 step: 8689 , loss is 9.706798802612482, average time per step is 0.3184656601312549 +epoch: 1 step: 8690 , loss is 9.70612545289855, average time per step is 0.3184725407765116 +epoch: 1 step: 8691 , loss is 9.70695776049204, average time per step is 0.31847309686135555 +epoch: 1 step: 8692 , loss is 9.707279624277456, average time per step is 0.31847339290613375 +epoch: 1 step: 8693 , loss is 9.70763437950938, average time per step is 0.3184720295013031 +epoch: 1 step: 8694 , loss is 9.707695425072046, average time per step is 0.31847410284595573 +epoch: 1 step: 8695 , loss is 9.708408273381295, average time per step is 0.31847338271072345 +epoch: 1 step: 8696 , loss is 9.708703753591953, average time per step is 0.3184726025560777 +epoch: 1 step: 8697 , loss is 9.709536406025824, average time per step is 0.31847212061114694 +epoch: 1 step: 8698 , loss is 9.708542263610315, average time per step is 0.3184715309307257 +``` + +## 评估过程 + +### 评估 + +- 评估: + +```shell +bash scripts/run_eval_ascend.sh [DEVICE_ID] [TRAINED_CKPT] +# example: scripts/run_eval_ascend.sh 0 /home/model/cnnctc/ckpt/CNNCTC-1_8000.ckpt +``` + +在IIIT数据集上评估模型,并打印样本结果和总准确率。 + +- 如果要在modelarts上进行模型的训练,可以参考modelarts的[官方指导文档](https://support.huaweicloud.com/modelarts/) 开始进行模型的训练和推理,具体操作如下: + +```ModelArts +# 在ModelArts上使用分布式训练示例: +# 数据集存放方式 + +# ├── CNNCTC_Data # dataset dir +# ├──train # train dir +# ├── ST_MJ # train dataset dir +# ├── data.mdb # data file +# ├── lock.mdb +# ├── st_mj_fixed_length_index_list.pkl +# ├── eval # eval dir +# ├── IIIT5K_3000 # eval dataset dir +# ├── checkpoint # checkpoint dir + +# (1) 选择a(修改yaml文件参数)或者b(ModelArts创建训练作业修改参数)其中一种方式。 +# a. 设置 "enable_modelarts=True" +# 设置 "run_distribute=True" +# 设置 "TRAIN_DATASET_PATH=/cache/data/ST_MJ/" +# 设置 "TRAIN_DATASET_INDEX_PATH=/cache/data/st_mj_fixed_length_index_list.pkl" +# 设置 "SAVE_PATH=/cache/train/checkpoint" + +# b. 增加 "enable_modelarts=True" 参数在modearts的界面上。 +# 在modelarts的界面上设置方法a所需要的参数 +# 注意:路径参数不需要加引号 + +# (2)设置网络配置文件的路径 "_config_path=/The path of config in default_config.yaml/" +# (3) 在modelarts的界面上设置代码的路径 "/path/cnnctc"。 +# (4) 在modelarts的界面上设置模型的启动文件 "train.py" 。 +# (5) 在modelarts的界面上设置模型的数据路径 ".../CNNCTC_Data/train"(选择CNNCTC_Data/train文件夹路径) , +# 模型的输出路径"Output file path" 和模型的日志路径 "Job log path" 。 +# (6) 开始模型的训练。 + +# 在modelarts上使用模型推理的示例 +# (1) 把训练好的模型地方到桶的对应位置。 +# (2) 选择a或者b其中一种方式。 +# a.设置 "enable_modelarts=True" +# 设置 "TEST_DATASET_PATH=/cache/data/IIIT5K_3000/" +# 设置 "CHECKPOINT_PATH=/cache/data/checkpoint/checkpoint file name" + +# b. 增加 "enable_modelarts=True" 参数在modearts的界面上。 +# 在modelarts的界面上设置方法a所需要的参数 +# 注意:路径参数不需要加引号 + +# (3) 设置网络配置文件的路径 "_config_path=/The path of config in default_config.yaml/" +# (4) 在modelarts的界面上设置代码的路径 "/path/cnnctc"。 +# (5) 在modelarts的界面上设置模型的启动文件 "eval.py" 。 +# (6) 在modelarts的界面上设置模型的数据路径 "../CNNCTC_Data/eval"(选择CNNCTC_Data/eval文件夹路径) , +# 模型的输出路径"Output file path" 和模型的日志路径 "Job log path" 。 +# (7) 开始模型的推理。 +``` + +## 推理过程 + +### 导出MindIR + +```shell +python export.py --ckpt_file [CKPT_PATH] --file_format [EXPORT_FORMAT] --TEST_BATCH_SIZE [BATCH_SIZE] +``` + +参数ckpt_file为必填项, +`EXPORT_FORMAT` 可选 ["AIR", "MINDIR"]. +`BATCH_SIZE` 目前仅支持batch_size为1的推理. + +- 在modelarts上导出MindIR + +```Modelarts +在ModelArts上导出MindIR示例 +数据集存放方式同Modelart训练 +# (1) 选择a(修改yaml文件参数)或者b(ModelArts创建训练作业修改参数)其中一种方式。 +# a. 设置 "enable_modelarts=True" +# 设置 "file_name=cnnctc" +# 设置 "file_format=MINDIR" +# 设置 "ckpt_file=/cache/data/checkpoint file name" + +# b. 增加 "enable_modelarts=True" 参数在modearts的界面上。 +# 在modelarts的界面上设置方法a所需要的参数 +# 注意:路径参数不需要加引号 +# (2)设置网络配置文件的路径 "_config_path=/The path of config in default_config.yaml/" +# (3) 在modelarts的界面上设置代码的路径 "/path/cnnctc"。 +# (4) 在modelarts的界面上设置模型的启动文件 "export.py" 。 +# (5) 在modelarts的界面上设置模型的数据路径 ".../CNNCTC_Data/eval/checkpoint"(选择CNNCTC_Data/eval/checkpoint文件夹路径) , +# MindIR的输出路径"Output file path" 和模型的日志路径 "Job log path" 。 +``` + +### 在Ascend310执行推理 + +在执行推理前,mindir文件必须通过`export.py`脚本导出。以下展示了使用mindir模型执行推理的示例。 + +```shell +# Ascend310 inference +bash run_infer_310.sh [MINDIR_PATH] [DATA_PATH] [DVPP] [DEVICE_ID] +``` + +- `DVPP` 为必填项,需要在["DVPP", "CPU"]选择,大小写均可。CNNCTC目前仅支持使用CPU算子进行推理。 +- `DEVICE_ID` 可选,默认值为0。 + +### 结果 + +推理结果保存在脚本执行的当前路径,你可以在acc.log中看到以下精度计算结果。 + +```bash +'Accuracy':0.8642 +``` + +# 模型描述 + +## 性能 + +### 训练性能 + +| 参数 | CNNCTC | +| -------------------------- | ----------------------------------------------------------- | +| 模型版本 | V1 | +| 资源 | Ascend 910;CPU 2.60GHz,192核;内存:755G | +| 上传日期 | 2020-09-28 | +| MindSpore版本 | 1.0.0 | +| 数据集 | MJSynth、SynthText | +| 训练参数 | epoch=3, batch_size=192 | +| 优化器 | RMSProp | +| 损失函数 | CTCLoss | +| 速度 | 1卡:300毫秒/步;8卡:310毫秒/步 | +| 总时间 | 1卡:18小时;8卡:2.3小时 | +| 参数(M) | 177 | +| 脚本 | | + +### 评估性能 + +| 参数 | CNNCTC | +| ------------------- | --------------------------- | +| 模型版本 | V1 | +| 资源 | Ascend 910 | +| 上传日期 | 2020-09-28 | +| MindSpore版本 | 1.0.0 | +| 数据集 | IIIT5K | +| batch_size | 192 | +| 输出 |准确率 | +| 准确率 | 85% | +| 推理模型 | 675M(.ckpt文件) | + +### 推理性能 + +| 参数 | Ascend | +| -------------- | ---------------------------| +| 模型版本 | CNNCTC | +| 资源 | Ascend 310;系统 CentOS 3.10 | +| 上传日期 | 2021-05-19 | +| MindSpore版本 | 1.2.0 | +| 数据集 | IIIT5K | +| batch_size | 1 | +| 输出 | Accuracy | +| 准确率 | Accuracy=0.8642 | +| 推理模型 | 675M(.ckpt文件) | + +## 用法 + +### 推理 + +如果您需要在GPU、Ascend 910、Ascend 310等多个硬件平台上使用训练好的模型进行推理,请参考此[链接](https://www.mindspore.cn/docs/programming_guide/zh-CN/master/multi_platform_inference.html)。以下为简单示例: + +- Ascend处理器环境运行 + + ```python + # 设置上下文 + context.set_context(mode=context.GRAPH_HOME, device_target=cfg.device_target) + context.set_context(device_id=cfg.device_id) + + # 加载未知数据集进行推理 + dataset = dataset.create_dataset(cfg.data_path, 1, False) + + # 定义模型 + net = CNNCTC(cfg.NUM_CLASS, cfg.HIDDEN_SIZE, cfg.FINAL_FEATURE_WIDTH) + opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, + cfg.momentum, weight_decay=cfg.weight_decay) + loss = P.CTCLoss(preprocess_collapse_repeated=False, + ctc_merge_repeated=True, + ignore_longer_outputs_than_inputs=False) + model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}) + + # 加载预训练模型 + param_dict = load_checkpoint(cfg.checkpoint_path) + load_param_into_net(net, param_dict) + net.set_train(False) + + # Make predictions on the unseen dataset + acc = model.eval(dataset) + print("accuracy: ", acc) + ``` + +### 在预训练模型上继续训练 + +- Ascend处理器环境运行 + + ```python + # 加载数据集 + dataset = create_dataset(cfg.data_path, 1) + batch_num = dataset.get_dataset_size() + + # 定义模型 + net = CNNCTC(cfg.NUM_CLASS, cfg.HIDDEN_SIZE, cfg.FINAL_FEATURE_WIDTH) + # 如果pre_trained为True,则继续训练 + if cfg.pre_trained: + param_dict = load_checkpoint(cfg.checkpoint_path) + load_param_into_net(net, param_dict) + lr = lr_steps(0, lr_max=cfg.lr_init, total_epochs=cfg.epoch_size, + steps_per_epoch=batch_num) + opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), + Tensor(lr), cfg.momentum, weight_decay=cfg.weight_decay) + loss = P.CTCLoss(preprocess_collapse_repeated=False, + ctc_merge_repeated=True, + ignore_longer_outputs_than_inputs=False) + model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}, + amp_level="O2", keep_batchnorm_fp32=False, loss_scale_manager=None) + + # 设置回调 + config_ck = CheckpointConfig(save_checkpoint_steps=batch_num * 5, + keep_checkpoint_max=cfg.keep_checkpoint_max) + time_cb = TimeMonitor(data_size=batch_num) + ckpoint_cb = ModelCheckpoint(prefix="train_googlenet_cifar10", directory="./", + config=config_ck) + loss_cb = LossMonitor() + + # 开始训练 + model.train(cfg.epoch_size, dataset, callbacks=[time_cb, ckpoint_cb, loss_cb]) + print("train success") + ``` + +# ModelZoo主页 + + 请浏览官网[主页](https://gitee.com/mindspore/models)。 diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/eval.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/eval.py new file mode 100644 index 0000000..d528938 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/eval.py @@ -0,0 +1,111 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""cnnctc eval""" + +import time +import numpy as np +from mindspore import Tensor, context +import mindspore.common.dtype as mstype +from mindspore.train.serialization import load_checkpoint, load_param_into_net +from mindspore.dataset import GeneratorDataset +from src.util import CTCLabelConverter, AverageMeter +from src.dataset import iiit_generator_batch, adv_iiit_generator_batch +from src.cnn_ctc import CNNCTC +from src.model_utils.config import config +from src.model_utils.moxing_adapter import moxing_wrapper + +context.set_context(mode=context.GRAPH_MODE, save_graphs=False, save_graphs_path=".") + + +def test_dataset_creator(is_adv=False): + if is_adv: + ds = GeneratorDataset(adv_iiit_generator_batch(), ['img', 'label_indices', 'text', + 'sequence_length', 'label_str']) + + else: + ds = GeneratorDataset(iiit_generator_batch, ['img', 'label_indices', 'text', + 'sequence_length', 'label_str']) + return ds + + +@moxing_wrapper(pre_process=None) +def test(): + """Eval cnn-ctc model.""" + target = config.device_target + context.set_context(device_target=target) + + ds = test_dataset_creator(is_adv=config.IS_ADV) + + net = CNNCTC(config.NUM_CLASS, config.HIDDEN_SIZE, config.FINAL_FEATURE_WIDTH) + + ckpt_path = config.CHECKPOINT_PATH + param_dict = load_checkpoint(ckpt_path) + load_param_into_net(net, param_dict) + print('parameters loaded! from: ', ckpt_path) + + converter = CTCLabelConverter(config.CHARACTER) + + model_run_time = AverageMeter() + npu_to_cpu_time = AverageMeter() + postprocess_time = AverageMeter() + + count = 0 + correct_count = 0 + for data in ds.create_tuple_iterator(): + img, _, text, _, length = data + + img_tensor = Tensor(img, mstype.float32) + + model_run_begin = time.time() + model_predict = net(img_tensor) + model_run_end = time.time() + model_run_time.update(model_run_end - model_run_begin) + + npu_to_cpu_begin = time.time() + model_predict = np.squeeze(model_predict.asnumpy()) + npu_to_cpu_end = time.time() + npu_to_cpu_time.update(npu_to_cpu_end - npu_to_cpu_begin) + + postprocess_begin = time.time() + preds_size = np.array([model_predict.shape[1]] * config.TEST_BATCH_SIZE) + preds_index = np.argmax(model_predict, 2) + preds_index = np.reshape(preds_index, [-1]) + preds_str = converter.decode(preds_index, preds_size) + postprocess_end = time.time() + postprocess_time.update(postprocess_end - postprocess_begin) + + label_str = converter.reverse_encode(text.asnumpy(), length.asnumpy()) + + if count == 0: + model_run_time.reset() + npu_to_cpu_time.reset() + postprocess_time.reset() + else: + print('---------model run time--------', model_run_time.avg) + print('---------npu_to_cpu run time--------', npu_to_cpu_time.avg) + print('---------postprocess run time--------', postprocess_time.avg) + + print("Prediction samples: \n", preds_str[:5]) + print("Ground truth: \n", label_str[:5]) + for pred, label in zip(preds_str, label_str): + if pred == label: + correct_count += 1 + count += 1 + print(count) + print('accuracy: ', correct_count / count) + + +if __name__ == '__main__': + test() diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/export.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/export.py new file mode 100644 index 0000000..065f4d1 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/export.py @@ -0,0 +1,51 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""export checkpoint file into air, onnx, mindir models + suggest run as python export.py --filename cnnctc --file_format MINDIR --ckpt_file [ckpt file path] +""" +import os +import numpy as np +from mindspore import Tensor, context, load_checkpoint, export +import mindspore.common.dtype as mstype +from src.cnn_ctc import CNNCTC +from src.model_utils.config import config +from src.model_utils.moxing_adapter import moxing_wrapper + + +context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target) +if config.device_target == "Ascend": + context.set_context(device_id=config.device_id) + + +def modelarts_pre_process(): + config.file_name = os.path.join(config.output_path, config.file_name) + + +@moxing_wrapper(pre_process=modelarts_pre_process) +def model_export(): + """Export model.""" + net = CNNCTC(config.NUM_CLASS, config.HIDDEN_SIZE, config.FINAL_FEATURE_WIDTH) + + load_checkpoint(config.ckpt_file, net=net) + + bs = config.TEST_BATCH_SIZE + + input_data = Tensor(np.zeros([bs, 3, config.IMG_H, config.IMG_W]), mstype.float32) + + export(net, input_data, file_name=config.file_name, file_format=config.file_format) + + +if __name__ == '__main__': + model_export() diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/mindspore_hub_conf.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/mindspore_hub_conf.py new file mode 100644 index 0000000..2fc1a4a --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/mindspore_hub_conf.py @@ -0,0 +1,30 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""hub config""" +from src.cnn_ctc import CNNCTC +from src.config import Config_CNNCTC + +def cnnctc_net(*args, **kwargs): + return CNNCTC(*args, **kwargs) + + +def create_network(name, *args, **kwargs): + """ + create cnnctc network + """ + if name == "cnnctc": + config = Config_CNNCTC + return cnnctc_net(config.NUM_CLASS, config.HIDDEN_SIZE, config.FINAL_FEATURE_WIDTH, *args, **kwargs) + raise NotImplementedError(f"{name} is not implemented in the repo") diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/postprocess.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/postprocess.py new file mode 100644 index 0000000..29417ce --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/postprocess.py @@ -0,0 +1,54 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""post process for 310 inference""" +import os +import numpy as np +from src.model_utils.config import config +from src.util import CTCLabelConverter + + +def calcul_acc(labels, preds): + return sum(1 for x, y in zip(labels, preds) if x == y) / len(labels) + + +def get_result(result_path, label_path): + """Get result.""" + converter = CTCLabelConverter(config.CHARACTER) + files = os.listdir(result_path) + preds = [] + labels = [] + label_dict = {} + with open(label_path, 'r') as f: + lines = f.readlines() + for line in lines: + label_dict[line.split(',')[0]] = line.split(',')[1].replace('\n', '') + for file in files: + file_name = file.split('.')[0] + label = label_dict[file_name] + labels.append(label) + new_result_path = os.path.join(result_path, file) + output = np.fromfile(new_result_path, dtype=np.float32) + output = np.reshape(output, (config.FINAL_FEATURE_WIDTH, config.NUM_CLASS)) + model_predict = np.squeeze(output) + preds_size = np.array([model_predict.shape[0]] * 1) + preds_index = np.argmax(model_predict, axis=1) + preds_str = converter.decode(preds_index, preds_size) + preds.append(preds_str[0]) + acc = calcul_acc(labels, preds) + print("Total data: {}, accuracy: {}".format(len(labels), acc)) + + +if __name__ == '__main__': + get_result(config.result_path, config.label_path) diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/preprocess.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/preprocess.py new file mode 100644 index 0000000..4abdeea --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/preprocess.py @@ -0,0 +1,96 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""post process for 310 inference""" +import os +import sys +import six +import lmdb +from PIL import Image +from src.model_utils.config import config +from src.util import CTCLabelConverter + + +def get_img_from_lmdb(env_, ind): + """Get image_from lmdb.""" + with env_.begin(write=False) as txn_: + label_key = 'label-%09d'.encode() % ind + label_ = txn_.get(label_key).decode('utf-8') + img_key = 'image-%09d'.encode() % ind + imgbuf = txn_.get(img_key) + + buf = six.BytesIO() + buf.write(imgbuf) + buf.seek(0) + try: + img_ = Image.open(buf).convert('RGB') # for color image + + except IOError: + print(f'Corrupted image for {ind}') + # make dummy image and dummy label for corrupted image. + img_ = Image.new('RGB', (config.IMG_W, config.IMG_H)) + label_ = '[dummy_label]' + + label_ = label_.lower() + + return img_, label_ + + +if __name__ == '__main__': + max_len = int((26 + 1) // 2) + converter = CTCLabelConverter(config.CHARACTER) + env = lmdb.open(config.TEST_DATASET_PATH, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False) + if not env: + print('cannot create lmdb from %s' % (config.TEST_DATASET_PATH)) + sys.exit(0) + + with env.begin(write=False) as txn: + n_samples = int(txn.get('num-samples'.encode())) + n_samples = n_samples + + # Filtering + filtered_index_list = [] + for index_ in range(n_samples): + index_ += 1 # lmdb starts with 1 + label_key_ = 'label-%09d'.encode() % index_ + label = txn.get(label_key_).decode('utf-8') + + if len(label) > max_len: + continue + + illegal_sample = False + for char_item in label.lower(): + if char_item not in config.CHARACTER: + illegal_sample = True + break + if illegal_sample: + continue + + filtered_index_list.append(index_) + + img_ret = [] + text_ret = [] + + print(f'num of samples in IIIT dataset: {len(filtered_index_list)}') + i = 0 + label_dict = {} + for index in filtered_index_list: + img, label = get_img_from_lmdb(env, index) + img_name = os.path.join(config.preprocess_output, str(i) + ".png") + img.save(img_name) + label_dict[str(i)] = label + i += 1 + with open('./label.txt', 'w') as file: + for k, v in label_dict.items(): + file.write(str(k) + ',' + str(v) + '\n') diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/requirements.txt b/examples/natural_robustness/ocr_evaluate/cnn_ctc/requirements.txt new file mode 100644 index 0000000..2830093 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/requirements.txt @@ -0,0 +1,7 @@ +lmdb +tqdm +six +numpy +pillow +pyyaml + diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_eval_ascend.sh b/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_eval_ascend.sh new file mode 100644 index 0000000..1572c50 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_eval_ascend.sh @@ -0,0 +1,50 @@ +#!/bin/bash +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +if [ $# -ne 2 ] +then + echo "Usage: sh scripts/run_eval_ascend.sh [DEVICE_ID] [TRAINED_CKPT]" +exit 1 +fi + +get_real_path(){ + if [ "${1:0:1}" == "/" ]; then + echo "$1" + else + echo "$(realpath -m $PWD/$1)" + fi +} + +PATH1=$(get_real_path $2) +echo $PATH1 +if [ ! -f $PATH1 ] +then + echo "error: TRAINED_CKPT=$PATH1 is not a file" +exit 1 +fi + +ulimit -u unlimited +export DEVICE_ID=$1 + +if [ -d "eval" ]; +then + rm -rf ./eval +fi +mkdir ./eval +echo "start inferring for device $DEVICE_ID" +env > ./eval/env.log +python eval.py --CHECKPOINT_PATH=$PATH1 &> ./eval/log & +#cd .. || exit diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_eval_gpu.sh b/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_eval_gpu.sh new file mode 100644 index 0000000..cb6ff79 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_eval_gpu.sh @@ -0,0 +1,50 @@ +#!/bin/bash +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +if [ $# -ne 1 ] +then + echo "Usage: sh run_eval_gpu.sh [TRAINED_CKPT]" +exit 1 +fi + +get_real_path(){ + if [ "${1:0:1}" == "/" ]; then + echo "$1" + else + echo "$(realpath -m $PWD/$1)" + fi +} + +PATH1=$(get_real_path $1) +echo $PATH1 +if [ ! -f $PATH1 ] +then + echo "error: TRAINED_CKPT=$PATH1 is not a file" +exit 1 +fi + +#ulimit -u unlimited +export DEVICE_ID=0 + +if [ -d "eval" ]; +then + rm -rf ./eval +fi +mkdir ./eval +echo "start inferring for device $DEVICE_ID" +env > ./eval/env.log +python eval.py --device_target="GPU" --device_id=$DEVICE_ID --CHECKPOINT_PATH=$PATH1 &> ./eval/log & +#cd .. || exit diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_standalone_train_ascend.sh b/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_standalone_train_ascend.sh new file mode 100644 index 0000000..f9197f5 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_standalone_train_ascend.sh @@ -0,0 +1,49 @@ +#!/bin/bash +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +if [ $# != 1 ] && [ $# != 2 ] +then + echo "run as sh scripts/run_standalone_train_ascend.sh DEVICE_ID PRE_TRAINED(options)" +exit 1 +fi + +get_real_path(){ + if [ "${1:0:1}" == "/" ]; then + echo "$1" + else + echo "$(realpath -m $PWD/$1)" + fi +} +PATH1=$(get_real_path $2) + +export DEVICE_ID=$1 + +ulimit -u unlimited + +if [ -d "train" ]; +then + rm -rf ./train +fi +mkdir ./train +echo "start training for device $DEVICE_ID" +env > env.log +if [ -f $PATH1 ] +then + python train.py --PRED_TRAINED=$PATH1 --run_distribute=False &> log & +else + python train.py --run_distribute=False &> log & +fi +cd .. || exit diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_standalone_train_gpu.sh b/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_standalone_train_gpu.sh new file mode 100644 index 0000000..c11410b --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/scripts/run_standalone_train_gpu.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +get_real_path(){ + if [ "${1:0:1}" == "/" ]; then + echo "$1" + else + echo "$(realpath -m $PWD/$1)" + fi +} +PATH1=$(get_real_path $1) +echo $PATH1 + +export DEVICE_NUM=1 +export RANK_SIZE=1 + +if [ -d "train" ]; +then + rm -rf ./train +fi +mkdir ./train +env > ./train/env.log +if [ -f $PATH1 ] +then + python train.py --device_target="GPU" --PRED_TRAINED=$PATH1 --run_distribute=False &> log & +else + python train.py --device_target="GPU" --run_distribute=False &> ./train/log & +fi +#cd .. || exit diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/__init__.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/__init__.py new file mode 100644 index 0000000..8d62ac3 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""src init file""" diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/callback.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/callback.py new file mode 100644 index 0000000..9b048d1 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/callback.py @@ -0,0 +1,73 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""loss callback""" + +import time +import numpy as np +from mindspore.train.callback import Callback +from .util import AverageMeter + +class LossCallBack(Callback): + """ + Monitor the loss in training. + + If the loss is NAN or INF terminating training. + + Note: + If per_print_times is 0 do not print loss. + + Args: + per_print_times (int): Print loss every times. Default: 1. + """ + + def __init__(self, per_print_times=1): + super(LossCallBack, self).__init__() + if not isinstance(per_print_times, int) or per_print_times < 0: + raise ValueError("print_step must be int and >= 0.") + self._per_print_times = per_print_times + self.loss_avg = AverageMeter() + self.timer = AverageMeter() + self.start_time = time.time() + + def step_end(self, run_context): + """step end.""" + cb_params = run_context.original_args() + + loss = np.array(cb_params.net_outputs) + + cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num + 1 + cur_num = cb_params.cur_step_num + + if cur_step_in_epoch % 2000 == 1: + self.loss_avg = AverageMeter() + self.timer = AverageMeter() + self.start_time = time.time() + else: + self.timer.update(time.time() - self.start_time) + self.start_time = time.time() + + self.loss_avg.update(loss) + + if self._per_print_times != 0 and cur_num % self._per_print_times == 0: + loss_file = open("./loss.log", "a+") + loss_file.write("epoch: %s step: %s , loss is %s, average time per step is %s" % ( + cb_params.cur_epoch_num, cur_step_in_epoch, + self.loss_avg.avg, self.timer.avg)) + loss_file.write("\n") + loss_file.close() + + print("epoch: %s step: %s , loss is %s, average time per step is %s" % ( + cb_params.cur_epoch_num, cur_step_in_epoch, + self.loss_avg.avg, self.timer.avg)) diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/cnn_ctc.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/cnn_ctc.py new file mode 100644 index 0000000..2d1ea28 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/cnn_ctc.py @@ -0,0 +1,389 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""cnn_ctc network define""" + +import mindspore.common.dtype as mstype +import mindspore.nn as nn +from mindspore import Tensor, Parameter, ParameterTuple, context +from mindspore.common.initializer import TruncatedNormal, initializer +from mindspore.communication.management import get_group_size +from mindspore.context import ParallelMode +from mindspore.nn.wrap.grad_reducer import DistributedGradReducer +from mindspore.ops import composite as C +from mindspore.ops import functional as F +from mindspore.ops import operations as P + +grad_scale = C.MultitypeFuncGraph("grad_scale") +reciprocal = P.Reciprocal() + + +@grad_scale.register("Tensor", "Tensor") +def tensor_grad_scale(scale, grad): + return grad * F.cast(reciprocal(scale), F.dtype(grad)) + + +_grad_overflow = C.MultitypeFuncGraph("_grad_overflow") +grad_overflow = P.FloatStatus() + + +@_grad_overflow.register("Tensor") +def _tensor_grad_overflow(grad): + return grad_overflow(grad) + + +GRADIENT_CLIP_MIN = -64000 +GRADIENT_CLIP_MAX = 64000 + + +class ClipGradients(nn.Cell): + """ + Clip large gradients, typically generated from overflow. + """ + + def __init__(self): + super(ClipGradients, self).__init__() + self.clip_by_norm = nn.ClipByNorm() + self.cast = P.Cast() + self.dtype = P.DType() + + def construct(self, grads, clip_min, clip_max): + new_grads = () + for grad in grads: + dt = self.dtype(grad) + + t = C.clip_by_value(grad, self.cast(F.tuple_to_array((clip_min,)), dt), + self.cast(F.tuple_to_array((clip_max,)), dt)) + t = self.cast(t, dt) + new_grads = new_grads + (t,) + return new_grads + + +class CNNCTCTrainOneStepWithLossScaleCell(nn.Cell): + """ + Encapsulation class of CNNCTC network training. + Used for GPU training in order to manage overflowing gradients. + Args: + network (Cell): The training network. Note that loss function should have been added. + optimizer (Optimizer): Optimizer for updating the weights. + scale_sense (Cell): Loss scaling value. + """ + + def __init__(self, network, optimizer, scale_sense): + super(CNNCTCTrainOneStepWithLossScaleCell, self).__init__(auto_prefix=False) + self.network = network + self.optimizer = optimizer + + if isinstance(scale_sense, nn.Cell): + self.loss_scaling_manager = scale_sense + self.scale_sense = Parameter(Tensor(scale_sense.get_loss_scale(), + dtype=mstype.float32), name="scale_sense") + elif isinstance(scale_sense, Tensor): + if scale_sense.shape == (1,) or scale_sense.shape == (): + self.scale_sense = Parameter(scale_sense, name='scale_sense') + else: + raise ValueError("The shape of scale_sense must be (1,) or (), but got {}".format( + scale_sense.shape)) + else: + raise TypeError("The scale_sense must be Cell or Tensor, but got {}".format( + type(scale_sense))) + + self.network.set_grad() + self.weights = ParameterTuple(network.trainable_params()) + + self.grad = C.GradOperation(get_by_list=True, + sens_param=True) + + self.reducer_flag = False + self.parallel_mode = context.get_auto_parallel_context("parallel_mode") + if self.parallel_mode not in ParallelMode.MODE_LIST: + raise ValueError("Parallel mode does not support: ", self.parallel_mode) + if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]: + self.reducer_flag = True + self.grad_reducer = None + if self.reducer_flag: + mean = context.get_auto_parallel_context("gradients_mean") + degree = get_group_size() + self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, degree) + self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE) + + self.clip_gradients = ClipGradients() + self.cast = P.Cast() + self.addn = P.AddN() + self.reshape = P.Reshape() + self.hyper_map = C.HyperMap() + self.less_equal = P.LessEqual() + self.allreduce = P.AllReduce() + + def construct(self, img, label_indices, text, sequence_length): + """model construct.""" + weights = self.weights + loss = self.network(img, label_indices, text, sequence_length) + + scaling_sens = self.scale_sense + + grads = self.grad(self.network, weights)(img, label_indices, text, sequence_length, + self.cast(scaling_sens, mstype.float32)) + + grads = self.hyper_map(F.partial(grad_scale, scaling_sens), grads) + grads = self.clip_gradients(grads, GRADIENT_CLIP_MIN, GRADIENT_CLIP_MAX) + + if self.reducer_flag: + # apply grad reducer on grads + grads = self.grad_reducer(grads) + + self.optimizer(grads) + return (loss, scaling_sens) + + +class CNNCTC(nn.Cell): + """CNNCTC model construct.""" + def __init__(self, num_class, hidden_size, final_feature_width): + super(CNNCTC, self).__init__() + + self.num_class = num_class + self.hidden_size = hidden_size + self.final_feature_width = final_feature_width + + self.feature_extraction = ResNetFeatureExtractor() + self.prediction = nn.Dense(self.hidden_size, self.num_class) + + self.transpose = P.Transpose() + self.reshape = P.Reshape() + + def construct(self, x): + x = self.feature_extraction(x) + x = self.transpose(x, (0, 3, 1, 2)) # [b, c, h, w] -> [b, w, c, h] + + x = self.reshape(x, (-1, self.hidden_size)) + x = self.prediction(x) + x = self.reshape(x, (-1, self.final_feature_width, self.num_class)) + + return x + + +class WithLossCell(nn.Cell): + """Add loss cell for network.""" + def __init__(self, backbone, loss_fn): + super(WithLossCell, self).__init__(auto_prefix=False) + self._backbone = backbone + self._loss_fn = loss_fn + + def construct(self, img, label_indices, text, sequence_length): + model_predict = self._backbone(img) + return self._loss_fn(model_predict, label_indices, text, sequence_length) + + @property + def backbone_network(self): + return self._backbone + + +class CTCLoss(nn.Cell): + """Loss of CTC.""" + def __init__(self): + super(CTCLoss, self).__init__() + + self.loss = P.CTCLoss(preprocess_collapse_repeated=False, + ctc_merge_repeated=True, + ignore_longer_outputs_than_inputs=False) + + self.mean = P.ReduceMean() + self.transpose = P.Transpose() + self.reshape = P.Reshape() + + def construct(self, inputs, labels_indices, labels_values, sequence_length): + inputs = self.transpose(inputs, (1, 0, 2)) + + loss, _ = self.loss(inputs, labels_indices, labels_values, sequence_length) + + loss = self.mean(loss) + return loss + + +class ResNetFeatureExtractor(nn.Cell): + """Extractor of ResNet feature.""" + def __init__(self): + super(ResNetFeatureExtractor, self).__init__() + self.conv_net = ResNet(3, 512, BasicBlock, [1, 2, 5, 3]) + + def construct(self, feature_map): + return self.conv_net(feature_map) + + +class ResNet(nn.Cell): + """Network of ResNet.""" + def __init__(self, input_channel, output_channel, block, layers): + super(ResNet, self).__init__() + + self.output_channel_block = [int(output_channel / 4), int(output_channel / 2), output_channel, output_channel] + + self.inplanes = int(output_channel / 8) + self.conv0_1 = ms_conv3x3(input_channel, int(output_channel / 16), stride=1, padding=1, pad_mode='pad') + self.bn0_1 = ms_fused_bn(int(output_channel / 16)) + self.conv0_2 = ms_conv3x3(int(output_channel / 16), self.inplanes, stride=1, padding=1, pad_mode='pad') + self.bn0_2 = ms_fused_bn(self.inplanes) + self.relu = P.ReLU() + + self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='valid') + self.layer1 = self._make_layer(block, self.output_channel_block[0], layers[0]) + self.conv1 = ms_conv3x3(self.output_channel_block[0], self.output_channel_block[0], stride=1, padding=1, + pad_mode='pad') + self.bn1 = ms_fused_bn(self.output_channel_block[0]) + + self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='valid') + self.layer2 = self._make_layer(block, self.output_channel_block[1], layers[1]) + self.conv2 = ms_conv3x3(self.output_channel_block[1], self.output_channel_block[1], stride=1, padding=1, + pad_mode='pad') + self.bn2 = ms_fused_bn(self.output_channel_block[1]) + + self.pad = P.Pad(((0, 0), (0, 0), (0, 0), (2, 2))) + self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=(2, 1), pad_mode='valid') + self.layer3 = self._make_layer(block, self.output_channel_block[2], layers[2]) + self.conv3 = ms_conv3x3(self.output_channel_block[2], self.output_channel_block[2], stride=1, padding=1, + pad_mode='pad') + self.bn3 = ms_fused_bn(self.output_channel_block[2]) + + self.layer4 = self._make_layer(block, self.output_channel_block[3], layers[3]) + self.conv4_1 = ms_conv2x2(self.output_channel_block[3], self.output_channel_block[3], stride=(2, 1), + pad_mode='valid') + self.bn4_1 = ms_fused_bn(self.output_channel_block[3]) + + self.conv4_2 = ms_conv2x2(self.output_channel_block[3], self.output_channel_block[3], stride=1, padding=0, + pad_mode='valid') + self.bn4_2 = ms_fused_bn(self.output_channel_block[3]) + + def _make_layer(self, block, planes, blocks, stride=1): + """make layer""" + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.SequentialCell( + [ms_conv1x1(self.inplanes, planes * block.expansion, stride=stride), + ms_fused_bn(planes * block.expansion)] + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.SequentialCell(layers) + + def construct(self, x): + """model construct""" + x = self.conv0_1(x) + x = self.bn0_1(x) + x = self.relu(x) + x = self.conv0_2(x) + x = self.bn0_2(x) + x = self.relu(x) + + x = self.maxpool1(x) + x = self.layer1(x) + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + + x = self.maxpool2(x) + x = self.layer2(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + + x = self.maxpool3(x) + x = self.layer3(x) + x = self.conv3(x) + x = self.bn3(x) + x = self.relu(x) + + x = self.layer4(x) + x = self.pad(x) + x = self.conv4_1(x) + x = self.bn4_1(x) + x = self.relu(x) + x = self.conv4_2(x) + x = self.bn4_2(x) + x = self.relu(x) + + return x + + +class BasicBlock(nn.Cell): + """BasicBlock""" + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + + self.conv1 = ms_conv3x3(inplanes, planes, stride=stride, padding=1, pad_mode='pad') + self.bn1 = ms_fused_bn(planes) + self.conv2 = ms_conv3x3(planes, planes, stride=stride, padding=1, pad_mode='pad') + self.bn2 = ms_fused_bn(planes) + self.relu = P.ReLU() + self.downsample = downsample + self.add = P.Add() + + def construct(self, x): + """Basic block construct""" + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + out = self.add(out, residual) + out = self.relu(out) + + return out + + +def weight_variable(shape, half_precision=False): + if half_precision: + return initializer(TruncatedNormal(0.02), shape, dtype=mstype.float16) + + return TruncatedNormal(0.02) + + +def ms_conv3x3(in_channels, out_channels, stride=1, padding=0, pad_mode='same', has_bias=False): + """Get a conv2d layer with 3x3 kernel size.""" + init_value = weight_variable((out_channels, in_channels, 3, 3)) + return nn.Conv2d(in_channels, out_channels, + kernel_size=3, stride=stride, padding=padding, pad_mode=pad_mode, weight_init=init_value, + has_bias=has_bias) + + +def ms_conv1x1(in_channels, out_channels, stride=1, padding=0, pad_mode='same', has_bias=False): + """Get a conv2d layer with 1x1 kernel size.""" + init_value = weight_variable((out_channels, in_channels, 1, 1)) + return nn.Conv2d(in_channels, out_channels, + kernel_size=1, stride=stride, padding=padding, pad_mode=pad_mode, weight_init=init_value, + has_bias=has_bias) + + +def ms_conv2x2(in_channels, out_channels, stride=1, padding=0, pad_mode='same', has_bias=False): + """Get a conv2d layer with 2x2 kernel size.""" + init_value = weight_variable((out_channels, in_channels, 1, 1)) + return nn.Conv2d(in_channels, out_channels, + kernel_size=2, stride=stride, padding=padding, pad_mode=pad_mode, weight_init=init_value, + has_bias=has_bias) + + +def ms_fused_bn(channels, momentum=0.1): + """Get a fused batchnorm""" + return nn.BatchNorm2d(channels, momentum=momentum) diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/dataset.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/dataset.py new file mode 100644 index 0000000..3d78c89 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/dataset.py @@ -0,0 +1,343 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""cnn_ctc dataset""" + +import sys +import pickle +import math +import six +import numpy as np +from PIL import Image +import lmdb +from mindspore.communication.management import get_rank, get_group_size +from src.model_utils.config import config +from src.util import CTCLabelConverter + + +class NormalizePAD: + """Normalize pad.""" + + def __init__(self, max_size, pad_type='right'): + self.max_size = max_size + self.pad_type = pad_type + + def __call__(self, img): + # toTensor + img = np.array(img, dtype=np.float32) + # normalize + means = [121.58949, 123.93914, 123.418655] + stds = [65.70353, 65.142426, 68.61079] + img = np.subtract(img, means) + img = np.true_divide(img, stds) + + img = img.transpose([2, 0, 1]) + img = img.astype(np.float) + + _, _, w = img.shape + pad_img = np.zeros(shape=self.max_size, dtype=np.float32) + pad_img[:, :, :w] = img # right pad + if self.max_size[2] != w: # add border Pad + pad_img[:, :, w:] = np.tile(np.expand_dims(img[:, :, w - 1], 2), (1, 1, self.max_size[2] - w)) + + return pad_img + + +class AlignCollate: + """Align collate""" + + def __init__(self, img_h=32, img_w=100): + self.img_h = img_h + self.img_w = img_w + + def __call__(self, images): + + resized_max_w = self.img_w + input_channel = 3 + transform = NormalizePAD((input_channel, self.img_h, resized_max_w)) + + resized_images = [] + for image in images: + w, h = image.size + ratio = w / float(h) + if math.ceil(self.img_h * ratio) > self.img_w: + resized_w = self.img_w + else: + resized_w = math.ceil(self.img_h * ratio) + + resized_image = image.resize((resized_w, self.img_h), Image.BICUBIC) + resized_images.append(transform(resized_image)) + + image_tensors = np.concatenate([np.expand_dims(t, 0) for t in resized_images], 0) + + return image_tensors + + +def get_img_from_lmdb(env, index, is_adv=False): + """get image from lmdb.""" + with env.begin(write=False) as txn: + label_key = 'label-%09d'.encode() % index + label = txn.get(label_key).decode('utf-8') + if is_adv: + img_key = 'adv_image-%09d'.encode() % index + else: + img_key = 'image-%09d'.encode() % index + imgbuf = txn.get(img_key) + + buf = six.BytesIO() + buf.write(imgbuf) + buf.seek(0) + try: + img = Image.open(buf).convert('RGB') # for color image + + except IOError: + print(f'Corrupted image for {index}') + # make dummy image and dummy label for corrupted image. + img = Image.new('RGB', (config.IMG_W, config.IMG_H)) + label = '[dummy_label]' + + label = label.lower() + + return img, label + + +class STMJGeneratorBatchFixedLength: + """ST_MJ Generator with Batch Fixed Length""" + + def __init__(self): + self.align_collector = AlignCollate() + self.converter = CTCLabelConverter(config.CHARACTER) + self.env = lmdb.open(config.TRAIN_DATASET_PATH, max_readers=32, readonly=True, lock=False, readahead=False, + meminit=False) + if not self.env: + print('cannot create lmdb from %s' % (config.TRAIN_DATASET_PATH)) + raise ValueError(config.TRAIN_DATASET_PATH) + + with open(config.TRAIN_DATASET_INDEX_PATH, 'rb') as f: + self.st_mj_filtered_index_list = pickle.load(f) + + print(f'num of samples in ST_MJ dataset: {len(self.st_mj_filtered_index_list)}') + self.dataset_size = len(self.st_mj_filtered_index_list) // config.TRAIN_BATCH_SIZE + self.batch_size = config.TRAIN_BATCH_SIZE + + def __len__(self): + return self.dataset_size + + def __getitem__(self, item): + img_ret = [] + text_ret = [] + + for i in range(item * self.batch_size, (item + 1) * self.batch_size): + index = self.st_mj_filtered_index_list[i] + img, label = get_img_from_lmdb(self.env, index) + + img_ret.append(img) + text_ret.append(label) + + img_ret = self.align_collector(img_ret) + text_ret, length = self.converter.encode(text_ret) + + label_indices = [] + for i, _ in enumerate(length): + for j in range(length[i]): + label_indices.append((i, j)) + label_indices = np.array(label_indices, np.int64) + sequence_length = np.array([config.FINAL_FEATURE_WIDTH] * config.TRAIN_BATCH_SIZE, dtype=np.int32) + text_ret = text_ret.astype(np.int32) + + return img_ret, label_indices, text_ret, sequence_length + + +class STMJGeneratorBatchFixedLengthPara: + """ST_MJ Generator with batch fixed length Para""" + + def __init__(self): + self.align_collector = AlignCollate() + self.converter = CTCLabelConverter(config.CHARACTER) + self.env = lmdb.open(config.TRAIN_DATASET_PATH, max_readers=32, readonly=True, lock=False, readahead=False, + meminit=False) + if not self.env: + print('cannot create lmdb from %s' % (config.TRAIN_DATASET_PATH)) + raise ValueError(config.TRAIN_DATASET_PATH) + + with open(config.TRAIN_DATASET_INDEX_PATH, 'rb') as f: + self.st_mj_filtered_index_list = pickle.load(f) + + print(f'num of samples in ST_MJ dataset: {len(self.st_mj_filtered_index_list)}') + self.rank_id = get_rank() + self.rank_size = get_group_size() + self.dataset_size = len(self.st_mj_filtered_index_list) // config.TRAIN_BATCH_SIZE // self.rank_size + self.batch_size = config.TRAIN_BATCH_SIZE + + def __len__(self): + return self.dataset_size + + def __getitem__(self, item): + img_ret = [] + text_ret = [] + + rank_item = (item * self.rank_size) + self.rank_id + for i in range(rank_item * self.batch_size, (rank_item + 1) * self.batch_size): + index = self.st_mj_filtered_index_list[i] + img, label = get_img_from_lmdb(self.env, index) + + img_ret.append(img) + text_ret.append(label) + + img_ret = self.align_collector(img_ret) + text_ret, length = self.converter.encode(text_ret) + + label_indices = [] + for i, _ in enumerate(length): + for j in range(length[i]): + label_indices.append((i, j)) + label_indices = np.array(label_indices, np.int64) + sequence_length = np.array([config.FINAL_FEATURE_WIDTH] * config.TRAIN_BATCH_SIZE, dtype=np.int32) + text_ret = text_ret.astype(np.int32) + + return img_ret, label_indices, text_ret, sequence_length + + +def iiit_generator_batch(): + """IIIT dataset generator""" + max_len = int((26 + 1) // 2) + + align_collector = AlignCollate() + + converter = CTCLabelConverter(config.CHARACTER) + + env = lmdb.open(config.TEST_DATASET_PATH, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False) + if not env: + print('cannot create lmdb from %s' % (config.TEST_DATASET_PATH)) + sys.exit(0) + + with env.begin(write=False) as txn: + n_samples = int(txn.get('num-samples'.encode())) + n_samples = n_samples + + # Filtering + filtered_index_list = [] + for index in range(n_samples): + index += 1 # lmdb starts with 1 + label_key = 'label-%09d'.encode() % index + label = txn.get(label_key).decode('utf-8') + + if len(label) > max_len: + continue + + illegal_sample = False + for char_item in label.lower(): + if char_item not in config.CHARACTER: + illegal_sample = True + break + if illegal_sample: + continue + + filtered_index_list.append(index) + + img_ret = [] + text_ret = [] + + print(f'num of samples in IIIT dataset: {len(filtered_index_list)}') + + for index in filtered_index_list: + img, label = get_img_from_lmdb(env, index, config.IS_ADV) + + img_ret.append(img) + text_ret.append(label) + + if len(img_ret) == config.TEST_BATCH_SIZE: + img_ret = align_collector(img_ret) + text_ret, length = converter.encode(text_ret) + + label_indices = [] + for i, _ in enumerate(length): + for j in range(length[i]): + label_indices.append((i, j)) + label_indices = np.array(label_indices, np.int64) + sequence_length = np.array([26] * config.TEST_BATCH_SIZE, dtype=np.int32) + text_ret = text_ret.astype(np.int32) + + yield img_ret, label_indices, text_ret, sequence_length, length + # return img_ret, label_indices, text_ret, sequence_length, length + + img_ret = [] + text_ret = [] + + +def adv_iiit_generator_batch(): + """Perturb IIII dataset generator.""" + max_len = int((26 + 1) // 2) + + align_collector = AlignCollate() + + converter = CTCLabelConverter(config.CHARACTER) + + env = lmdb.open(config.ADV_TEST_DATASET_PATH, max_readers=32, readonly=True, lock=False, readahead=False, + meminit=False) + if not env: + print('cannot create lmdb from %s' % (config.ADV_TEST_DATASET_PATH)) + sys.exit(0) + + with env.begin(write=False) as txn: + n_samples = int(txn.get('num-samples'.encode())) + n_samples = n_samples + + # Filtering + filtered_index_list = [] + for index in range(n_samples): + index += 1 # lmdb starts with 1 + label_key = 'label-%09d'.encode() % index + label = txn.get(label_key).decode('utf-8') + + if len(label) > max_len: + continue + + illegal_sample = False + for char_item in label.lower(): + if char_item not in config.CHARACTER: + illegal_sample = True + break + if illegal_sample: + continue + + filtered_index_list.append(index) + + img_ret = [] + text_ret = [] + + print(f'num of samples in IIIT dataset: {len(filtered_index_list)}') + + for index in filtered_index_list: + img, label = get_img_from_lmdb(env, index, is_adv=True) + + img_ret.append(img) + text_ret.append(label) + + if len(img_ret) == config.TEST_BATCH_SIZE: + img_ret = align_collector(img_ret) + text_ret, length = converter.encode(text_ret) + + label_indices = [] + for i, _ in enumerate(length): + for j in range(length[i]): + label_indices.append((i, j)) + label_indices = np.array(label_indices, np.int64) + sequence_length = np.array([26] * config.TEST_BATCH_SIZE, dtype=np.int32) + text_ret = text_ret.astype(np.int32) + + yield img_ret, label_indices, text_ret, sequence_length, length + + img_ret = [] + text_ret = [] diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/lr_schedule.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/lr_schedule.py new file mode 100644 index 0000000..8ac9eac --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/lr_schedule.py @@ -0,0 +1,41 @@ +# Copyright 2020-2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""lr generator for cnnctc""" +import math + +def linear_warmup_learning_rate(current_step, warmup_steps, base_lr, init_lr): + lr_inc = (float(base_lr) - float(init_lr)) / float(warmup_steps) + learning_rate = float(init_lr) + lr_inc * current_step + return learning_rate + +def a_cosine_learning_rate(current_step, base_lr, warmup_steps, decay_steps): + base = float(current_step - warmup_steps) / float(decay_steps) + learning_rate = (1 + math.cos(base * math.pi)) / 2 * base_lr + return learning_rate + +def dynamic_lr(config, steps_per_epoch): + """dynamic learning rate generator""" + base_lr = config.base_lr + total_steps = steps_per_epoch * config.TRAIN_EPOCHS + warmup_steps = int(config.warmup_step) + decay_steps = total_steps - warmup_steps + lr = [] + for i in range(total_steps): + if i < warmup_steps: + lr.append(linear_warmup_learning_rate(i, warmup_steps, base_lr, base_lr * config.warmup_ratio)) + else: + lr.append(a_cosine_learning_rate(i, base_lr, warmup_steps, decay_steps)) + + return lr diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/__init__.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/config.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/config.py new file mode 100644 index 0000000..cc3a81b --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/config.py @@ -0,0 +1,131 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License Version 2.0(the "License"); +# you may not use this file except in compliance with the License. +# you may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing software +# distributed under the License is distributed on an "AS IS" BASIS +# WITHOUT WARRANT IES OR CONITTONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ==================================================================================== + +"""Parse arguments""" +import os +import ast +import argparse +from pprint import pprint, pformat +import yaml + + +_config_path = '../../../default_config.yaml' + + +class Config: + """ + Configuration namespace. Convert dictionary to members + """ + def __init__(self, cfg_dict): + for k, v in cfg_dict.items(): + if isinstance(v, (list, tuple)): + setattr(self, k, [Config(x) if isinstance(x, dict) else x for x in v]) + else: + setattr(self, k, Config(v) if isinstance(v, dict) else v) + + def __str__(self): + return pformat(self.__dict__) + + def __repr__(self): + return self.__str__() + + +def parse_cli_to_yaml(parser, cfg, helper=None, choices=None, cfg_path='default_config.yaml'): + """ + Parse command line arguments to the configuration according to the default yaml + + Args: + parser: Parent parser + cfg: Base configuration + helper: Helper description + cfg_path: Path to the default yaml config + """ + parser = argparse.ArgumentParser(description='[REPLACE THIS at config.py]', + parents=[parser]) + helper = {} if helper is None else helper + choices = {} if choices is None else choices + for item in cfg: + if not isinstance(cfg[item], list) and not isinstance(cfg[item], dict): + help_description = helper[item] if item in helper else 'Please reference to {}'.format(cfg_path) + choice = choices[item] if item in choices else None + if isinstance(cfg[item], bool): + parser.add_argument('--' + item, type=ast.literal_eval, default=cfg[item], choices=choice, + help=help_description) + else: + parser.add_argument('--' + item, type=type(cfg[item]), default=cfg[item], choices=choice, + help=help_description) + args = parser.parse_args() + return args + + +def parse_yaml(yaml_path): + """ + Parse the yaml config file + + Args: + yaml_path: Path to the yaml config + """ + with open(yaml_path, 'r') as fin: + try: + cfgs = yaml.load_all(fin.read(), Loader=yaml.FullLoader) + cfgs = [x for x in cfgs] + if len(cfgs) == 1: + cfg_helper = {} + cfg = cfgs[0] + cfg_choices = {} + elif len(cfgs) == 2: + cfg, cfg_helper = cfgs + cfg_choices = {} + elif len(cfgs) == 3: + cfg, cfg_helper, cfg_choices = cfgs + else: + raise ValueError('At most 3 docs (config description for help, choices) are supported in config yaml') + print(cfg_helper) + except: + raise ValueError('Failed to parse yaml') + return cfg, cfg_helper, cfg_choices + + +def merge(args, cfg): + """ + Merge the base config from yaml file and command line arguments + + Args: + args: command line arguments + cfg: Base configuration + """ + args_var = vars(args) + for item in args_var: + cfg[item] = args_var[item] + return cfg + + +def get_config(): + """ + Get Config according to the yaml file and cli arguments + """ + parser = argparse.ArgumentParser(description='default name', add_help=False) + current_dir = os.path.dirname(os.path.abspath(__file__)) + parser.add_argument('--config_path', type=str, default=os.path.join(current_dir, _config_path), + help='Config file path') + path_args, _ = parser.parse_known_args() + default, helper, choices = parse_yaml(path_args.config_path) + args = parse_cli_to_yaml(parser=parser, cfg=default, helper=helper, choices=choices, cfg_path=path_args.config_path) + final_config = merge(args, default) + pprint(final_config) + print("Please check the above information for the configurations", flush=True) + return Config(final_config) + +config = get_config() diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/device_adapter.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/device_adapter.py new file mode 100644 index 0000000..ad8415a --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/device_adapter.py @@ -0,0 +1,26 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License Version 2.0(the "License"); +# you may not use this file except in compliance with the License. +# you may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing software +# distributed under the License is distributed on an "AS IS" BASIS +# WITHOUT WARRANT IES OR CONITTONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ==================================================================================== + +"""Device adapter for ModelArts""" + +from .config import config +if config.enable_modelarts: + from .moxing_adapter import get_device_id, get_device_num, get_rank_id, get_job_id +else: + from .local_adapter import get_device_id, get_device_num, get_rank_id, get_job_id + +__all__ = [ + 'get_device_id', 'get_device_num', 'get_job_id', 'get_rank_id' +] diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/local_adapter.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/local_adapter.py new file mode 100644 index 0000000..4ff88c4 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/local_adapter.py @@ -0,0 +1,36 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License Version 2.0(the "License"); +# you may not use this file except in compliance with the License. +# you may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing software +# distributed under the License is distributed on an "AS IS" BASIS +# WITHOUT WARRANT IES OR CONITTONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ==================================================================================== + +"""Local adapter""" + +import os + +def get_device_id(): + device_id = os.getenv('DEVICE_ID', '0') + return int(device_id) + + +def get_device_num(): + device_num = os.getenv('RANK_SIZE', '1') + return int(device_num) + + +def get_rank_id(): + global_rank_id = os.getenv('RANK_ID', '0') + return int(global_rank_id) + + +def get_job_id(): + return 'Local Job' diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/moxing_adapter.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/moxing_adapter.py new file mode 100644 index 0000000..c2d2282 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/model_utils/moxing_adapter.py @@ -0,0 +1,124 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License Version 2.0(the "License"); +# you may not use this file except in compliance with the License. +# you may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing software +# distributed under the License is distributed on an "AS IS" BASIS +# WITHOUT WARRANT IES OR CONITTONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ==================================================================================== + +"""Moxing adapter for ModelArts""" + +import os +import functools +from mindspore import context +from .config import config + + +_global_syn_count = 0 + + +def get_device_id(): + device_id = os.getenv('DEVICE_ID', '0') + return int(device_id) + + +def get_device_num(): + device_num = os.getenv('RANK_SIZE', '1') + return int(device_num) + + +def get_rank_id(): + global_rank_id = os.getenv('RANK_ID', '0') + return int(global_rank_id) + + +def get_job_id(): + job_id = os.getenv('JOB_ID') + job_id = job_id if job_id != "" else "default" + return job_id + + +def sync_data(from_path, to_path): + """ + Download data from remote obs to local directory if the first url is remote url and the second one is local + Uploca data from local directory to remote obs in contrast + """ + import moxing as mox + import time + global _global_syn_count + sync_lock = '/tmp/copy_sync.lock' + str(_global_syn_count) + _global_syn_count += 1 + + # Each server contains 8 devices as most + if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(sync_lock): + print('from path: ', from_path) + print('to path: ', to_path) + mox.file.copy_parallel(from_path, to_path) + print('===finished data synchronization===') + try: + os.mknod(sync_lock) + except IOError: + pass + print('===save flag===') + + while True: + if os.path.exists(sync_lock): + break + time.sleep(1) + print('Finish sync data from {} to {}'.format(from_path, to_path)) + + +def moxing_wrapper(pre_process=None, post_process=None): + """ + Moxing wrapper to download dataset and upload outputs + """ + def wrapper(run_func): + @functools.wraps(run_func) + def wrapped_func(*args, **kwargs): + # Download data from data_url + if config.enable_modelarts: + if config.data_url: + sync_data(config.data_url, config.data_path) + print('Dataset downloaded: ', os.listdir(config.data_path)) + if config.checkpoint_url: + if not os.path.exists(config.load_path): + # os.makedirs(config.load_path) + print('=' * 20 + 'makedirs') + if os.path.isdir(config.load_path): + print('=' * 20 + 'makedirs success') + else: + print('=' * 20 + 'makedirs fail') + sync_data(config.checkpoint_url, config.load_path) + print('Preload downloaded: ', os.listdir(config.load_path)) + if config.train_url: + sync_data(config.train_url, config.output_path) + print('Workspace downloaded: ', os.listdir(config.output_path)) + + context.set_context(save_graphs_path=os.path.join(config.output_path, str(get_rank_id()))) + config.device_num = get_device_num() + config.device_id = get_device_id() + if not os.path.exists(config.output_path): + os.makedirs(config.output_path) + + if pre_process: + pre_process() + + run_func(*args, **kwargs) + + # Upload data to train_url + if config.enable_modelarts: + if post_process: + post_process() + + if config.train_url: + print('Start to copy output directory') + sync_data(config.output_path, config.train_url) + return wrapped_func + return wrapper diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/preprocess_dataset.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/preprocess_dataset.py new file mode 100644 index 0000000..c1a190b --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/preprocess_dataset.py @@ -0,0 +1,172 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""preprocess dataset""" + +import random +import pickle +import numpy as np +import lmdb +from tqdm import tqdm + +def combine_lmdbs(lmdb_paths, lmdb_save_path): + """combine lmdb dataset""" + max_len = int((26 + 1) // 2) + character = '0123456789abcdefghijklmnopqrstuvwxyz' + + env_save = lmdb.open( + lmdb_save_path, + map_size=1099511627776) + + cnt = 0 + for lmdb_path in lmdb_paths: + env = lmdb.open(lmdb_path, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False) + with env.begin(write=False) as txn: + n_samples = int(txn.get('num-samples'.encode())) + n_samples = n_samples + + # Filtering + for index in tqdm(range(n_samples)): + index += 1 # lmdb starts with 1 + label_key = 'label-% '.encode() % index + label = txn.get(label_key).decode('utf-8') + + if len(label) > max_len: + continue + + illegal_sample = False + for char_item in label.lower(): + if char_item not in character: + illegal_sample = True + break + if illegal_sample: + continue + + img_key = 'image-%09d'.encode() % index + imgbuf = txn.get(img_key) + + with env_save.begin(write=True) as txn_save: + cnt += 1 + + label_key_save = 'label-%09d'.encode() % cnt + label_save = label.encode() + image_key_save = 'image-%09d'.encode() % cnt + image_save = imgbuf + + txn_save.put(label_key_save, label_save) + txn_save.put(image_key_save, image_save) + + n_samples = cnt + with env_save.begin(write=True) as txn_save: + txn_save.put('num-samples'.encode(), str(n_samples).encode()) + + +def analyze_lmdb_label_length(lmdb_path, batch_size=192, num_of_combinations=1000): + """analyze lmdb label""" + label_length_dict = {} + + env = lmdb.open(lmdb_path, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False) + with env.begin(write=False) as txn: + n_samples = int(txn.get('num-samples'.encode())) + n_samples = n_samples + + for index in tqdm(range(n_samples)): + index += 1 # lmdb starts with 1 + label_key = 'label-%09d'.encode() % index + label = txn.get(label_key).decode('utf-8') + + label_length = len(label) + if label_length in label_length_dict: + label_length_dict[label_length] += 1 + else: + label_length_dict[label_length] = 1 + + sorted_label_length = sorted(label_length_dict.items(), key=lambda x: x[1], reverse=True) + + label_length_sum = 0 + label_num = 0 + lengths = [] + p = [] + for l, num in sorted_label_length: + label_length_sum += l * num + label_num += num + p.append(num) + lengths.append(l) + for i, _ in enumerate(p): + p[i] /= label_num + + average_overall_length = int(label_length_sum / label_num * batch_size) + + def get_combinations_of_fix_length(fix_length, items, p, batch_size): + ret = np.random.choice(items, batch_size - 1, True, p) + cur_sum = sum(ret) + ret = list(ret) + if fix_length - cur_sum in items: + ret.append(fix_length - cur_sum) + else: + return None + return ret + + result = [] + while len(result) < num_of_combinations: + ret = get_combinations_of_fix_length(average_overall_length, lengths, p, batch_size) + if ret is not None: + result.append(ret) + return result + + +def generate_fix_shape_index_list(lmdb_path, combinations, pkl_save_path, num_of_iters=70000): + """generate fix shape index list""" + length_index_dict = {} + + env = lmdb.open(lmdb_path, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False) + with env.begin(write=False) as txn: + n_samples = int(txn.get('num-samples'.encode())) + n_samples = n_samples + + for index in tqdm(range(n_samples)): + index += 1 # lmdb starts with 1 + label_key = 'label-%09d'.encode() % index + label = txn.get(label_key).decode('utf-8') + + label_length = len(label) + if label_length in length_index_dict: + length_index_dict[label_length].append(index) + else: + length_index_dict[label_length] = [index] + + ret = [] + for _ in range(num_of_iters): + comb = random.choice(combinations) + for l in comb: + ret.append(random.choice(length_index_dict[l])) + + with open(pkl_save_path, 'wb') as f: + pickle.dump(ret, f, -1) + + +if __name__ == '__main__': + # step 1: combine the SynthText dataset and MJSynth dataset into a single lmdb file + print('Begin to combine multiple lmdb datasets') + combine_lmdbs(['/home/workspace/mindspore_dataset/CNNCTC_Data/1_ST/', + '/home/workspace/mindspore_dataset/CNNCTC_Data/MJ_train/'], + '/home/workspace/mindspore_dataset/CNNCTC_Data/ST_MJ') + + # step 2: generate the order of input data, guarantee that the input batch shape is fixed + print('Begin to generate the index order of input data') + combination = analyze_lmdb_label_length('/home/workspace/mindspore_dataset/CNNCTC_Data/ST_MJ') + generate_fix_shape_index_list('/home/workspace/mindspore_dataset/CNNCTC_Data/ST_MJ', combination, + '/home/workspace/mindspore_dataset/CNNCTC_Data/st_mj_fixed_length_index_list.pkl') + + print('Done') diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/util.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/util.py new file mode 100644 index 0000000..ac19fe6 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/src/util.py @@ -0,0 +1,102 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""util file""" + +import numpy as np + +class AverageMeter(): + """Computes and stores the average and current value""" + + def __init__(self): + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + +class CTCLabelConverter(): + """ Convert between text-label and text-index """ + + def __init__(self, character): + # character (str): set of the possible characters. + dict_character = list(character) + + self.dict = {} + for i, char in enumerate(dict_character): + self.dict[char] = i + + self.character = dict_character + ['[blank]'] # dummy '[blank]' token for CTCLoss (index 0) + self.dict['[blank]'] = len(dict_character) + + def encode(self, text): + """convert text-label into text-index. + input: + text: text labels of each image. [batch_size] + + output: + text: concatenated text index for CTCLoss. + [sum(text_lengths)] = [text_index_0 + text_index_1 + ... + text_index_(n - 1)] + length: length of each text. [batch_size] + """ + length = [len(s) for s in text] + text = ''.join(text) + text = [self.dict[char] for char in text] + + return np.array(text), np.array(length) + + def decode(self, text_index, length): + """ convert text-index into text-label. """ + texts = [] + index = 0 + for l in length: + t = text_index[index:index + l] + + char_list = [] + for i in range(l): + # if t[i] != self.dict['[blank]'] and (not (i > 0 and t[i - 1] == t[i])): # removing repeated characters and blank. + if t[i] != self.dict['[blank]'] and ( + not (i > 0 and t[i - 1] == t[i])): # removing repeated characters and blank. + char_list.append(self.character[t[i]]) + text = ''.join(char_list) + + texts.append(text) + index += l + return texts + + def reverse_encode(self, text_index, length): + """ convert text-index into text-label. """ + texts = [] + index = 0 + for l in length: + t = text_index[index:index + l] + + char_list = [] + for i in range(l): + if t[i] != self.dict['[blank]']: # removing repeated characters and blank. + char_list.append(self.character[t[i]]) + text = ''.join(char_list) + + texts.append(text) + index += l + return texts diff --git a/examples/natural_robustness/ocr_evaluate/cnn_ctc/train.py b/examples/natural_robustness/ocr_evaluate/cnn_ctc/train.py new file mode 100644 index 0000000..87e6eeb --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/cnn_ctc/train.py @@ -0,0 +1,148 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""cnnctc train""" + + +import numpy as np +import mindspore +import mindspore.common.dtype as mstype +from mindspore import context +from mindspore import Tensor +from mindspore.common import set_seed +from mindspore.communication.management import init, get_rank, get_group_size +from mindspore.dataset import GeneratorDataset +from mindspore.train.callback import ModelCheckpoint, CheckpointConfig +from mindspore.train.model import Model +from mindspore.train.serialization import load_checkpoint, load_param_into_net +from src.callback import LossCallBack +from src.cnn_ctc import CNNCTC, CTCLoss, WithLossCell, CNNCTCTrainOneStepWithLossScaleCell +from src.dataset import STMJGeneratorBatchFixedLength, STMJGeneratorBatchFixedLengthPara +from src.lr_schedule import dynamic_lr +from src.model_utils.config import config +from src.model_utils.device_adapter import get_device_id +from src.model_utils.moxing_adapter import moxing_wrapper + + +set_seed(1) + + +context.set_context(mode=context.GRAPH_MODE, save_graphs=False, save_graphs_path=".") + + +def dataset_creator(run_distribute): + """dataset creator""" + if run_distribute: + st_dataset = STMJGeneratorBatchFixedLengthPara() + else: + st_dataset = STMJGeneratorBatchFixedLength() + + ds = GeneratorDataset(st_dataset, + ['img', 'label_indices', 'text', 'sequence_length'], + num_parallel_workers=8) + + return ds + + +def modelarts_pre_process(): + pass + + +@moxing_wrapper(pre_process=modelarts_pre_process) +def train(): + """train cnnctc model""" + target = config.device_target + context.set_context(device_target=target) + + if target == "Ascend": + device_id = get_device_id() + context.set_context(device_id=device_id) + + if config.run_distribute: + init() + context.set_auto_parallel_context(parallel_mode="data_parallel") + + ckpt_save_dir = config.SAVE_PATH + else: + # GPU target + device_id = get_device_id() + context.set_context(device_id=device_id) + if config.run_distribute: + init() + context.set_auto_parallel_context(device_num=get_group_size(), + parallel_mode="data_parallel", + gradients_mean=False, + gradient_fp32_sync=False) + + ckpt_save_dir = config.SAVE_PATH + "ckpt_" + str(get_rank()) + "/" + print(ckpt_save_dir) + else: + ckpt_save_dir = config.SAVE_PATH + "ckpt_standalone/" + + ds = dataset_creator(config.run_distribute) + + net = CNNCTC(config.NUM_CLASS, config.HIDDEN_SIZE, config.FINAL_FEATURE_WIDTH) + net.set_train(True) + + if config.PRED_TRAINED: + param_dict = load_checkpoint(config.PRED_TRAINED) + load_param_into_net(net, param_dict) + print('parameters loaded!') + else: + print('train from scratch...') + + criterion = CTCLoss() + dataset_size = ds.get_dataset_size() + lr = Tensor(dynamic_lr(config, dataset_size), mstype.float32) + opt = mindspore.nn.RMSProp(params=net.trainable_params(), + centered=True, + learning_rate=lr, + momentum=config.MOMENTUM, + loss_scale=config.LOSS_SCALE) + + net = WithLossCell(net, criterion) + + if target == "Ascend": + loss_scale_manager = mindspore.train.loss_scale_manager.FixedLossScaleManager( + config.LOSS_SCALE, False) + net.set_train(True) + model = Model(net, optimizer=opt, loss_scale_manager=loss_scale_manager, amp_level="O2") + else: + scaling_sens = Tensor(np.full((1), config.LOSS_SCALE), dtype=mstype.float32) + net = CNNCTCTrainOneStepWithLossScaleCell(net, opt, scaling_sens) + net.set_train(True) + model = Model(net) + + callback = LossCallBack() + config_ck = CheckpointConfig(save_checkpoint_steps=config.SAVE_CKPT_PER_N_STEP, + keep_checkpoint_max=config.KEEP_CKPT_MAX_NUM) + ckpoint_cb = ModelCheckpoint(prefix="CNNCTC", config=config_ck, directory=ckpt_save_dir) + + if config.run_distribute: + if device_id == 0: + model.train(config.TRAIN_EPOCHS, + ds, + callbacks=[callback, ckpoint_cb], + dataset_sink_mode=False) + else: + model.train(config.TRAIN_EPOCHS, ds, callbacks=[callback], dataset_sink_mode=False) + else: + model.train(config.TRAIN_EPOCHS, + ds, + callbacks=[callback, ckpoint_cb], + dataset_sink_mode=False) + + +if __name__ == '__main__': + train() diff --git a/examples/natural_robustness/ocr_evaluate/default_config.yaml b/examples/natural_robustness/ocr_evaluate/default_config.yaml new file mode 100644 index 0000000..63f94db --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/default_config.yaml @@ -0,0 +1,76 @@ +# Builtin Configurations(DO NOT CHANGE THESE CONFIGURATIONS unlesee you know exactly what you are doing) +enable_modelarts: False +# url for modelarts +data_url: "" +train_url: "" +checkpoint_url: "" +# path for local +data_path: "/cache/data" +output_path: "/cache/train" +load_path: "/cache/checkpoint_path" +device_target: "GPU" +enable_profiling: False + +# ====================================================================================== +# Training options +CHARACTER: "0123456789abcdefghijklmnopqrstuvwxyz" + +# NUM_CLASS = len(CHARACTER) + 1 +NUM_CLASS: 37 + +HIDDEN_SIZE: 512 +FINAL_FEATURE_WIDTH: 26 + +# dataset config +IMG_H: 32 +IMG_W: 100 +TRAIN_DATASET_PATH: "/opt/dataset/CNNCTC_data/MJ-ST-IIIT/ST_MJ/" +TRAIN_DATASET_INDEX_PATH: "/opt/dataset/CNNCTC_data/MJ-ST-IIIT/st_mj_fixed_length_index_list.pkl" +TRAIN_BATCH_SIZE: 192 +TRAIN_EPOCHS: 3 + +# training config +run_distribute: False +PRED_TRAINED: "" +SAVE_PATH: "./" +#LR +base_lr: 0.0005 +warmup_step: 2000 +warmup_ratio: 0.0625 +MOMENTUM: 0.8 +LOSS_SCALE: 8096 +SAVE_CKPT_PER_N_STEP: 2000 +KEEP_CKPT_MAX_NUM: 5 + +# ====================================================================================== +# Eval options +TEST_DATASET_PATH: "/opt/dataset/CNNCTC_data/MJ-ST-IIIT/IIIT5k_3000" +#TEST_DATASET_PATH: "/home/mindarmour/examples/natural_robustness/ocr_evaluate/data" +TEST_BATCH_SIZE: 256 +CHECKPOINT_PATH: "/home/mindarmour/examples/natural_robustness/ocr_evaluate/cnn_ctc/ckpt_standalone/CNNCTC-3_70000.ckpt" +ADV_TEST_DATASET_PATH: "/home/mindarmour/examples/natural_robustness/ocr_evaluate/data" +IS_ADV: False + +# export options +device_id: 0 +file_name: "cnnctc" +file_format: "MINDIR" +ckpt_file: "" + +# 310 infer +result_path: "" +label_path: "" +preprocess_output: "" + +--- +# Help description for each configuration +enable_modelarts: "Whether training on modelarts default: False" +data_url: "Url for modelarts" +train_url: "Url for modelarts" +data_path: "The location of input data" +output_pah: "The location of the output file" +device_target: "device id of GPU or Ascend. (Default: None)" +enable_profiling: "Whether enable profiling while training default: False" +file_name: "CNN&CTC output air name" +file_format: "choices [AIR, MINDIR]" +ckpt_file: "CNN&CTC ckpt file" diff --git a/examples/natural_robustness/ocr_evaluate/eval_and_save.py b/examples/natural_robustness/ocr_evaluate/eval_and_save.py new file mode 100644 index 0000000..a015a46 --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/eval_and_save.py @@ -0,0 +1,100 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""cnnctc eval""" + +import numpy as np +import lmdb +from mindspore import Tensor, context +import mindspore.common.dtype as mstype +from mindspore.train.serialization import load_checkpoint, load_param_into_net +from mindspore.dataset import GeneratorDataset +from cnn_ctc.src.util import CTCLabelConverter +from cnn_ctc.src.dataset import iiit_generator_batch, adv_iiit_generator_batch +from cnn_ctc.src.cnn_ctc import CNNCTC +from cnn_ctc.src.model_utils.config import config + +context.set_context(mode=context.GRAPH_MODE, save_graphs=False, + save_graphs_path=".") + + +def test_dataset_creator(is_adv=False): + if is_adv: + ds = GeneratorDataset(adv_iiit_generator_batch(), ['img', 'label_indices', 'text', + 'sequence_length', 'label_str']) + else: + ds = GeneratorDataset(iiit_generator_batch, ['img', 'label_indices', 'text', + 'sequence_length', 'label_str']) + return ds + + +def test(lmdb_save_path): + """eval cnnctc model on begin and perturb data.""" + target = config.device_target + context.set_context(device_target=target) + + ds = test_dataset_creator(is_adv=config.IS_ADV) + net = CNNCTC(config.NUM_CLASS, config.HIDDEN_SIZE, config.FINAL_FEATURE_WIDTH) + + ckpt_path = config.CHECKPOINT_PATH + param_dict = load_checkpoint(ckpt_path) + load_param_into_net(net, param_dict) + print('parameters loaded! from: ', ckpt_path) + + converter = CTCLabelConverter(config.CHARACTER) + + count = 0 + correct_count = 0 + env_save = lmdb.open(lmdb_save_path, map_size=1099511627776) + with env_save.begin(write=True) as txn_save: + for data in ds.create_tuple_iterator(): + img, _, text, _, length = data + + img_tensor = Tensor(img, mstype.float32) + + model_predict = net(img_tensor) + model_predict = np.squeeze(model_predict.asnumpy()) + + preds_size = np.array([model_predict.shape[1]] * config.TEST_BATCH_SIZE) + preds_index = np.argmax(model_predict, 2) + preds_index = np.reshape(preds_index, [-1]) + preds_str = converter.decode(preds_index, preds_size) + label_str = converter.reverse_encode(text.asnumpy(), length.asnumpy()) + + print("Prediction samples: \n", preds_str[:5]) + print("Ground truth: \n", label_str[:5]) + for pred, label in zip(preds_str, label_str): + if pred == label: + correct_count += 1 + count += 1 + if config.IS_ADV: + pred_key = 'adv_pred-%09d'.encode() % count + else: + pred_key = 'pred-%09d'.encode() % count + + txn_save.put(pred_key, pred.encode()) + accuracy = correct_count / count + return accuracy + + +if __name__ == '__main__': + save_path = config.ADV_TEST_DATASET_PATH + config.IS_ADV = False + config.TEST_DATASET_PATH = save_path + ori_acc = test(lmdb_save_path=save_path) + + config.IS_ADV = True + adv_acc = test(lmdb_save_path=save_path) + print('Accuracy of benign sample: ', ori_acc) + print('Accuracy of perturbed sample: ', adv_acc) diff --git a/examples/natural_robustness/ocr_evaluate/generate_adv_samples.py b/examples/natural_robustness/ocr_evaluate/generate_adv_samples.py new file mode 100644 index 0000000..62b87df --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/generate_adv_samples.py @@ -0,0 +1,139 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Generated natural robustness samples. """ + +import sys +import json +import time +import lmdb +from mindspore_serving.client import Client +from cnn_ctc.src.model_utils.config import config + +config_perturb = [ + {"method": "Contrast", "params": {"alpha": 1.5, "beta": 0}}, + {"method": "GaussianBlur", "params": {"ksize": 5}}, + {"method": "SaltAndPepperNoise", "params": {"factor": 0.05}}, + {"method": "Translate", "params": {"x_bias": 0.1, "y_bias": -0.1}}, + {"method": "Scale", "params": {"factor_x": 0.8, "factor_y": 0.8}}, + {"method": "Shear", "params": {"factor": 1.5, "direction": "horizontal"}}, + {"method": "Rotate", "params": {"angle": 30}}, + {"method": "MotionBlur", "params": {"degree": 5, "angle": 45}}, + {"method": "GradientBlur", "params": {"point": [50, 100], "kernel_num": 3, "center": True}}, + {"method": "GradientLuminance", "params": {"color_start": [255, 255, 255], "color_end": [0, 0, 0], + "start_point": [100, 150], "scope": 0.3, + "bright_rate": 0.3, "pattern": "light", "mode": "circle"}}, + {"method": "GradientLuminance", "params": {"color_start": [255, 255, 255], + "color_end": [0, 0, 0], "start_point": [150, 200], + "scope": 0.3, "pattern": "light", "mode": "horizontal"}}, + {"method": "GradientLuminance", "params": {"color_start": [255, 255, 255], "color_end": [0, 0, 0], + "start_point": [150, 200], "scope": 0.3, + "pattern": "light", "mode": "vertical"}}, + {"method": "Curve", "params": {"curves": 0.5, "depth": 3, "mode": "vertical"}}, + {"method": "Perspective", "params": {"ori_pos": [[0, 0], [0, 800], [800, 0], [800, 800]], + "dst_pos": [[10, 0], [0, 800], [790, 0], [800, 800]]}}, +] + + +def generate_adv_iii5t_3000(lmdb_paths, lmdb_save_path, perturb_config): + """generate perturb iii5t_3000""" + max_len = int((26 + 1) // 2) + + instances = [] + methods_number = 1 + outputs_number = 2 + perturb_config = json.dumps(perturb_config) + + env = lmdb.open(lmdb_paths, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False) + + if not env: + print('cannot create lmdb from %s' % (lmdb_paths)) + sys.exit(0) + with env.begin(write=False) as txn: + n_samples = int(txn.get('num-samples'.encode())) + + # Filtering + filtered_labels = [] + filtered_index_list = [] + for index in range(n_samples): + index += 1 # lmdb starts with 1 + label_key = 'label-%09d'.encode() % index + label = txn.get(label_key).decode('utf-8') + + if len(label) > max_len: continue + illegal_sample = False + for char_item in label.lower(): + if char_item not in config.CHARACTER: + illegal_sample = True + break + if illegal_sample: continue + + filtered_labels.append(label) + filtered_index_list.append(index) + img_key = 'image-%09d'.encode() % index + imgbuf = txn.get(img_key) + instances.append({"img": imgbuf, 'perturb_config': perturb_config, "methods_number": methods_number, + "outputs_number": outputs_number}) + + print(f'num of samples in IIIT dataset: {len(filtered_index_list)}') + + client = Client("10.113.216.54:5500", "perturbation", "natural_perturbation") + start_time = time.time() + result = client.infer(instances) + end_time = time.time() + print('generated natural perturbs images cost: ', end_time - start_time) + env_save = lmdb.open(lmdb_save_path, map_size=1099511627776) + + txn = env.begin(write=False) + with env_save.begin(write=True) as txn_save: + new_index = 1 + for i, index in enumerate(filtered_index_list): + try: + file_names = result[i]['file_names'].split(';') + except: + error_msg = result[i] + raise ValueError(error_msg) + + length = result[i]['file_length'].tolist() + before = 0 + label = filtered_labels[i] + label = label.encode() + img_key = 'image-%09d'.encode() % index + ori_img = txn.get(img_key) + + names_dict = result[i]['names_dict'] + names_dict = json.loads(names_dict) + for name, leng in zip(file_names, length): + label_key = 'label-%09d'.encode() % new_index + txn_save.put(label_key, label) + img_key = 'image-%09d'.encode() % new_index + + adv_img = result[i]['results'] + adv_img = adv_img[before:before + leng] + adv_img_key = 'adv_image-%09d'.encode() % new_index + txn_save.put(img_key, ori_img) + txn_save.put(adv_img_key, adv_img) + + adv_info_key = 'adv_info-%09d'.encode() % new_index + adv_info = json.dumps(names_dict[name]).encode() + txn_save.put(adv_info_key, adv_info) + before = before + leng + new_index += 1 + txn_save.put("num-samples".encode(), str(new_index - 1).encode()) + env.close() + + +if __name__ == '__main__': + save_path_lmdb = config.ADV_TEST_DATASET_PATH + generate_adv_iii5t_3000(config.TEST_DATASET_PATH, save_path_lmdb, config_perturb) diff --git a/examples/natural_robustness/ocr_evaluate/image/catalog.png b/examples/natural_robustness/ocr_evaluate/image/catalog.png new file mode 100644 index 0000000000000000000000000000000000000000..af6fe3594c163834ff34d8f60b90f2df3b77fc23 GIT binary patch literal 14419 zcmbVzWmH`2wk+-t+$FdMcXtaKG`PEaa0za~-8DFA++Bi0aCfJ1m)FTT=gPkOyf^Oq z)xAcq^)2byvuajVhbt*aAtT@;fPsM_%SelV0=-^?fq@Ib!GPX5x}OPvfxQQl5f@SQ z$UI&3QdNb`K6nDIsbR(jLV%-*pMRk1P~K+ih(}%S<8;fA6QatRFZ43=R8YLT7H|eW zY*#(4>)vI$PGoVUDJUAn3(5?}glBOQU41h4-o`*fy?_oI68nOqCZVk61$=@HgM(J1 zgHwGGw030@Q#)<9R_R=7ML3H5VKv4JNh&U_6zT=}6*Wi-Mk@@}$S&jJ4e5>(jua-G zFfSO6lyy?{D=MA^sc0Bd9V%%be~L*Z-oo84WhPDSk}Jra(@a%kc3!wnq8C+@fU zB#cwBj+Pp#C(oU8zDXfHoryXV_fD>sIn@4~@tAgCl7zO46IDGLUY?2;hVyzOjwsS7 zP#D;m*p-;%S}MCf;!<#R8OlPvI-KcZ-kAdAb$(OCCQp(9x5a#6T(el#b7bm-)CGoZ z=p-fer}>1QfnGhO`o7s~+T+dW^7E;Yb1VtZVwIKUFX-ZS+g4sVsYJyBKR5y?WCr(H zf8VN>h>CI6MXuV-(=Z! z&x7*%h042~D>=y4K-TGMISIzABWf?d3a|+eJ7Xs?owz|RAX!pHk*tW%&dJ~cV344e zRc_L?-W<8Yu4=|o;6sNd2Bjk#1)AVefGp@*)h;8 zT+*aFlI_bEL6wnW5-C#gdDq*J{plDJssHWP*ZZUS;))w_POh;^Uj0;0M|238RY0L0>=141hm8QBZ8Ws6PEJ3H1?*0qgEQu@{v|9pU3RwQK=&Dtw# z;Bv(HC8dII1JdiM14XoMV_IgJ^F~clrw67@8l!J}VGzB(gtD1Mh3?#V)Skub{uG>+ zTJ&H_^N_0AICSHFYG(1(YBz3`3r$~P)5Fs)qgH41X5z@iZT{7Q74>mFjo|%&XwtmF zPX-*ODuaH&4}=wsKfwHe??p$kDUD)AH6gj@7y66w|4yX*NyYV1LC2lA1?_lS`nj zT+-nZV^BwYV5j<$hU0a|nS>RW=*1iBY}+`xQfE;(h+E1@a<$D1)Q*G+neVHehSLfZ z{GeXiVYg`|wf{r6@k6HN%n+Wv3y~UdRj+@~^g$CNbV5pL7W@EVH-(J0)+0@b>bwKH z*SgESSy4BZlDZ)Jt3`CO8%9ahAd1TkX%c_;*Z`g$t1%L$7&suHc`T|QOd~O1wq$yz z*~@1vZ_;~T#Ed3UJ$<2VVWiH8%`}{NPfGlJ{%BlfDIl%MP1{g6Lb75|%CkIN@mLjm z{F;RYp*UK(*;B9GoJZkqb(c;VrnW^k;pt4~`zGsD6pl9Lq-)6JX1f=G7%UMaAU{|9 zxX+62!&>Xeye`7+6Yl%a*8LY%i(~%<3&&L2XxP6Mwm-=k2dX%kd znN%dTX1T`!#o~CHUN%+x$-!h}5(-)r#q3P7u1G09-IbkZ$#_6?k`W(#`9^X}XS~EO zI-}_j3H)X;jW>&1XMT8gNu0BTsWgx}x`TZdSu8Xw2XCgqwq zT0307Q4`KE@w~OFE$BqJ=%j@DGjsCx+0h5itX&lj2C#_n%tO6_Tkmg&W|!rdW(xM^ zV;;d*^YR`~6+=S$fV2oWrdV^)2V1OOX2*JBryp&p1ahi}FYD^|(Yd)2+ZWA(pGI@V z4;V6zNxx5tD*Mj^!CF@3@_Gli7xmD8jX!l*C8vCS!p%*TA!7K*tb{3@bz=6VB0SJ{ zTp?->PJ!L3LjQo2?Wb^e7hxV7?W$A+{tl?Ds5Y3AabZrphDcNd7z)OEvOnTaRN5L_ zwgxVnZ#lK&>2GDxr*gZnA{-mxyzz_lZw6~Oj;bEsDQLMC1rIQAB`lFWR2&Kt2MDsw zrz2r6jOrYFPU-rLL<#ne+-RG@@J|xZNhydMe_rTj%C@DR_J3TC^LCgPEdU2!C+3tR z6hxChG2(i=d~s_#r5e|b%(bcrA^v4q)suw4_C1pDIfs*|zIQYnV=B{yHuqlJ|L6Av z^(Y>@Y`>rKf$-jXN$J|y9Tk)ZplS+beFG}ITP)96>!ii5P-HmkGaCU4^RT zWQT%$o74vBusBO;SkL3+F+yIa#Ny&T5nVggsHA3|kG0|wj+J#g9rh#%9saZQ*x!;^ zu(c^%>Nu2Ki@t3SB9=M{?f@5Q!YjNVuvo9&g0h0o-V53bh|ZHr7LgJigr}uFpT_$2 z1;+dprkmMg zGCeDdTw(Zv37K#7$(8*<*M?;%J*1t9P`5K_Q2yxA`jAVu|_1t(|kM|Z5++YuasS9gLQbiO#k@NG7WvBPm84K zgJV=NcSszr2$5> z?Fbr#HNT)4WIfEtXh#`3L9x3|6jIy}_hws8f_1M)+PMn7n^KKvd|OHwY83Og{T8dA z40}a$j$5r{M|@ol>su%859}oqrs`W+icE*9Swdo_eEtCiZJA34j)hB)x(o(NDjpG+ zXY&VoRv#mq)=nhZx|Y4e3-*tsv)RGRAwsJ1zOzu2ME7HV&-Xg{HP-;8`x$NXFs^gN zwsAT16t1$7&(fT%Rvqi)HB_rstJca*%X23nYEE6N2HjlkkYAj@gO~SA@`W9l2*b?J zFKy#&$&p7_Ag0(1-Mq@=#h5J>l>|N{0Nt>E#U&W7%Yu9CjA^uDe6i13ad{G-o`%== zTW?B6=*xDd(T=AHyMK&%>fpH*x{ zeuXkv?jP#_;)0wO#n`oP=$-!NIavy;B_vm^2z+{wHj#ckLgUTOL?p;XQVY7CP;xAV zj6=Ip899ZAW4f8{$aN@a3r>p#DVH<<-DB~CU@4s+Ye2499vJ5E#dl} zhHMnuD1nOA?}d|dJf^3#Q~ctTomiVI)Fd(xJxz4HMS@~ZC%29^72avviYrVJKgcq- z{4-g2pM;7rfgWUHDN-^6?z#^Fr6a2|6*Y!23_Dj|_#`9n$bb z9!X}v@zVVBW-l~#Iaq6guPYGNjNbx(9&E6>BB9Ygc6$Atz#IQx=i1&zV{ znRj|%#AChl?hf9booB#0BWIo($<}F>PmrFZ82FPyG2o)48jw2{FgVj!Pf%~M&BvrM z8g5cKOzB>c1c$_bYZY;KQm|kO<~w(xELiE(hvN0KjN@4Hxi|ODJ_5QNH~|_PYzvHX zgHgNV-kHOpiJIj8k2&H`ZzF^JAPyH7fKB#wNEi`KlmeXwv?;w|AO#VOD~S&8eIT6+ z_0~5;8Yk>20U?hu^wO_R*75qTg9e!SiP8|Bsa-JXs`DuoLIb^Aubu0D-Ww48{rhg) zlOFMn?gWhNG^^ehj>u_u!S~g}!r`*y<}aSfrbTGxw}#b>o9AmmIu_T+6CJ3ehjHz+ zZq(@a)9bHaEAZHeXRUx}suavLS>2uSogEX)_Al8&By{;sqlA0WY_9s*@dBZ>vl2QM zoWkMeo5?tuc+TOg_Ah?V>u*=K9ZhLJXM}X*TQCv&-p5OCKsIyTn>SS{X=01rwpDF~ z(MEq+D^AA6zZ8mr=25bWIQZd9Q=xHmtCMq;?(-1y18-QT`i4e3w-d(%{vz5jPz zI7s#jBa#M^lZ>POvs(V&MI}w)K%tDT`3jg%{d7;|N9V9=j$nydse+Vt9t9PXGf{?x zIQ&zNN^tsXb$ik>*9eWEPrNhN5^l#tP;)8w7ykj6i4CM=^xhtv*YL3!UZH7ITwY=+ zf%(Xo%dt56I;3sb^}b2CUg?dQwl-J%Tfq9oVgQ02Ph07fv2;$l{CFF!P8D6BpiU@l zuh?o4#1V{pUS0OuWxnSNo8$NVj?xs46!4(c_@kWp79MnC zy8`YwO`#iU>6@;g*$zK)UF_tC7xqcyKc4(bB;GE!|7dXz4lWj`ET?nJuJ|EME*{!t zOkQPJ(CwsCl5Yx9XYT2mtILgMxWe;>k}8QP|I3Qaw2gX2v0233*``4=V6X=Npd}^- z_Gt_v!dku7&<298(q}@F_|-e5`y+Gwx|w&PX3 zVM=LV^h0v!WH(NkSsbnS2i?58Qce*Zog2;{i`5c4n^TT`W77A&9?B_Bm42|@>3&z1 z71tLl=$$iB)Ww?5f63K&?b}g)^5u%Y z%HlU*wNw{y=x-e~_doYu!)q$CZ@|?Mi_K5|!i}F9FtRQ$sP9#3WLF7S5*eQ z$-XqV%@+DIV47P?I~s@C+l~;0xw#!zw^b}$zY8#-P;xdC`=A?4R1MzHFSJRr?}9AT z^({9xWSabhQ2?SC_Y=;}Wx<%@^z|O&I3H|N#k>pjN=z;d?wVUBiqt+vNI`U44>bR6 z9|M)QPZE9mL@Nn1==MXHBcaq28mn<Yc6 zm(THY#Z7BWL^PZU-U~}B$z+aZ4iERGf{#U)S3;8bvz(jyKrO}O(w3G*l%Dq@(_FRr zXLWo>Gm_gQbQGY#P_U%FKbQfZO7_Y?1STTdvHoUCInwnK|LgtYY*msBCbNzVMU8&D zm{xu$BM*$zl0rk?ZiSGCn`>dEupLLZs1L&qa)jwJ=DVre4?%#P$*!J zxtre|vvD%UuD;JmAu#t9A~hd< zXeeFINeN4stI93tzL3Q?KLGR)G(Zml)F9lM5pY543@^VRvt(rN-h3wF<^v5j`UR3# zem|q9dYV`HSmvsXudI*5VwumIwZFIObv4e@1vb&2>hOFCY0l8wXpEG%m<{#tGN%PC zYhRF0OBE+f8puWmqXmaX_@#*AztX7xJ1|ObpyJ2Hb#+s8ccV#>1(4#vKT}$#3vNaG zsEN4^kf^K3xC>*>)e6PrdT|sL&@LJPbK85Fg z{zbK@YB!AoAQO4rcHoGdHZvlA<;+kLNxn1 z%SH`n&k4qu`2$Gc1T$$|mv68n#rbP$8ar1)q~9y#TfuP8b+phP?8MvGuKC$EhUEycNw$TjaESM?O%n;#=q%w65PJSo_5Bj=;DNlynZ zWxbt~z_E_<$vnS#A2GT_Ve}&a%W6ZhMDGt7Q zM%76>+ug0rkL+#-9vL5Lt5l_Bgtc0T z^0yt<$aa1qg5gjhIutdX^soewguDT_hVNSnqB&}u#(VFA(24OjRl|4sfsRbxB25x7>BFdU;?XjtOkssUkD6nNdpRI!wbdxb)37V zD&GupVB6rPVGKCTN+>waq}=P7q7qky`N*+#7~5f8qq_4^q^@eC;La-&+FwfZ!65{d z^zXR--xVoYK#(PO{ulHk41up26iVTSn}~0Hd*21Cd>^n$vfN$X`Lo(_2>H2k&=N^_ zU)$?Da7>2wJWD*n2tX~=w}S{8qsf0GXkTT5#h{o*$pXM&Y~@h~CML|}Hfht5z%cNx z0JZF#TlIo{Fvv3O6S+6nq0eW_85R*~3l}^#Yqty;%WIpl=Gw5E;l`*CVMcv$)za_5 zU`{n6OS5@W9%#+EJwApG4Ks`$<#Kn%QI4GHsYGY=l&#e7$jMS5aNU?b5H7d}B9#18 z^^5CRO)|OTeL3~RgTm@>1&p;+6>N9=0{pkq)ogGx{AI1zRqd5ny4Jw^PM_=9y+u9q z7%IpDR=txj6Gsh4p5X~sHG)kay^A@qE<5q3fqT}8Cdsy|KSiGe1-%QxU`z=kxr8RJ zCcZ3mkay-t>-V%D!`DPn8Enw#t;=&!%b9ydNZMMGzw=c(B@)KWBLl7Ne)tPhw~NP_ zSS5ceRX`{!W1netW?NmwjD@K_24fvdc^y3D3ERALq=d(Z5r7`_aSy67N|;m_db!6o zi?nzBImGWJv;~_gn>;!q;OVi^)U?E#OXm9(s1;NQ;RV(x zRBSH#whJriAqi=waYk7oRG8aNxrtB+QTXItHHPRb`Z=AkF`j+yT8#XQ{aL!CG+{Ag zQO`H&>7uCoU6O4Ic*z0khdTJGZooYkuM6{L3hx}`6_CL1=< z67AvU;tKxd2ld62@_*6m4QS#F3>FY4q&L_x3=AxYGv#L!3Ez?-aq!Pkj?__VrO;?G z3luaZA%Nm{F3t>xk(@tH7eBcC>YQK1X=Ng3sNtfB6J<&NYDj^4CVs~rn?+zgp;G(! zR9LLJkxW2I7oB-?{5d&EGIKGsz2$4K*E^@Ysd=oMW2^oy(9;a-2 zdu|`VVFa<_nrL^z{ud9Kis`w!*NcDJkFmj2Asu7G#`uNy;vf$fkBf5D; zeH#c-xyPlLf+maag3TZjm+YX zoXtkxt9O|$z6B%Q)nyYQ2w0Y~7>C;f1pZ@GiBz2uaI>=3_G}~ODy8QYYc9W9KTR^H zu^2b;O-APEdQ9B0abm?EUiONR4Gl3osN+(1nz1h{Wa@Pd?)icHV?P>LQIZq zre-bg;1zl2H1C>-pV!T!F>M2Dt*9xid~Yv9&0;oHD$Li9A(v z4x7?{6WUO`yH>@1$WA}c?nD7{Y2peQG{?4B)G+n!B;M5s7HpF3m&Je1Zv7gii2<4p z*LlSITYIRMI503=l;2qZt>EiFq8 z!po1$G3Q<2>Z{#(yu5(jX++t8S_-EXd4R58t5wf;u7%GlEN#nVJw2uOvlE6Sa-@>* ze^A>*HPh+0;v)4g>;K=vc)=FeKQtPU`lKp$I1K5n?LnZZG;)r!PY}2acLCPJkB%$0 z6d29_)(+fmxnT@Fl9WAF41>kkixYI8 z$bMG`hAOvOvEU)oxQ6ITygYxi1sSblT&W-*N73N@(K0%|_seSWw+3nW>+o(Y(0Ul~ zGv?n6?F#C?9~-~PvW_&^ndbSBkaQ@E2eHQVXXNZZ?DN~K$0??Da+9EGF4J*eU+9%- zt(3yEeK~qCU-)s24cz0n?~F#DAOkHaayzt+*@^)#N0idDSujX^b95L3WoAu7g~(zC zn{w+n5LX5N>7U4%^BXxGA_42Wfb-erQCa@M3o5^n>rdKRzh3mUt|()&wMT{Ia{O<^ zAyNFa9jPbpavoG$!CD}E5OXN_PT^*`O!NC+5J4n$6F)Ik6`Q{{4jM09e0P2toJO5q zeN5qFEG@g1wq>_$rW$l7q%D=>p6r|uXAt~FoB=Kkn);}`M%aNly(;LkNl`oEghL_6 z9HYr|NK97NZ8yTxu%3&41#h}+u~WtwK(|}aIX?l+e1Ez& z_SG;PwD$kqatC?bjRV{5$pZvgFna{iEx2WQyyrybwm;(8L$tr5_czXX*sK!9e?X8m zLEt&Yj?;7wn5Kg4bQJtZ5!Ym4Wz%czkDk;W554Wx_w=|X3E^WzBTa!Cy=)$<(qe&O zy_daRaCMXU?8242;@Wm!9xaZ4%C3%CW9=>|596Q1y*>_cnjF_J(41vgX>a|ugMB3WKn*j|1Y)KLnsj{`R#Rd& zbK>(W`FKO{1=Ah!_MHLyZ}w*(lQ!~r6U#zR4qtv$v^piHDODNL@#fapef@3TafMgR3J}Px6WS@)m`}0RZ&nwm&BC`tP0TxH2oed#wx$V!) zLeSOAy*m#885&-(tK6zW^AqOeYYBqI+Y@ZwqxJ2L$~bI3RhI8yyhNvHCe^w>4DSSS7`)ZL1H=cwH_nh*o8amMC_DlYSkTmg+IjDV>9H_$0BBb$z7}o5HbMy<1@AZ z3+JPr*io?q=GDaqqqd1_8IweOb_iAWdc(}?oPs283hmA&91|WEz zt1|z2tX*e5;d8?cUh(>CM59pi*H!*(EbT^?ZkG&1Utn6=eFX#Sm~A4vmk7ul5C#_| z5~egxk2WSb$sH|16*z>~^R&aT1^SHgc;$`+X}|Va8s8uE-aV{Xx@0>|K=!H(PxT%? zuePYk(Z8gZ6Sbeo`b!%FArg7rSO_%4mCs)ZGVRXuHEp6ke&Xva3ApVYva&^rka}gj z1C6Zdv*pA$Wg_L98=-vnXJ+hd>NZ;?)o&4RkNSFB%|#et${PwNKu-X{7?}Ow`$z4 zQ6oDqmnY%jb3+wdz$=qRkdCkXChgCcog_k>Sce|Lgqzst0{6@!zUg~i^TH_ycS-z= znSECJsR^ElzdDZ_!SCt2KweTD~9pQ2uF zikRKbyjho+u1YS)dt+aWSg@>fjFi@IStYS8^wlmj!OAD>nY zC$-ZpIO07kO6c7@{(~}A*&)ygm;RW~{}k9(?-W3ed)^JqI3a`CR6{)j z!;&9xt#v}WV9FtVRX_D}79e~+yCIK5UCHJ7eJB%`7;Xq|FEhxs*~`D}`7E0}VK&E2 zU}bC8wV-d`zU`Mf{h8U1u*?-n+mEN<7`)PmMJksub1Zdh=CwCZAmz|yD?+*(AKc}X zt{;Ed728ot<7D!%@NNwW_q{6b8`E#T#Kr56K7pGNSUw#r(&wLIE%MyS90j~!O8iAg(X>x-IQdbd1p`~XEdHb_`W+btwKh2^9;OT=hqk<7u zUz*CmIlceEbR3t9WA?)VxUpaRP4t_Df?n6kf)+~q8}?iCi|*ReE2F*b`Z&-nt5?vM z`05h`&*pAJXJMeOs^GfcQ!$bET2MHXfKS9{r5MMzSC96l!7&TbZ+J8`28O zcHsV-Rr)vbiHYiS@j_TQ6pHwks}a;U>5X1-kd10cfzGW8(h5TY#36v;P=V+i+5H4_ zwJwtQ7iL**UK}NNCtDuTo+RMid?N_e!dbY!=(TQdgn$5L?F1Bk0=s&IMi(z zloc`#k4Dny$Xn^KH^$iPR~MlpV+Qpm@>pKJ zai}8dzb;I-(zB)wjgA`3jWXy|x#1HK1nG@OEjp7}yl61#HQgcV1+f4~jFrvU(>28A z+nV^=Tu9z0;j-dnB<5E1`&H%n zIt3AC_iw~W8^lT*P-nFSl4JgAnRMae*k@8A5{OaNX5Q>H5DkRk2ri*XVTi+p096kf z{E82r@Zhcu&{y8}-us@}P(A5=k6-s!C|4bY2NfP83u%fPDGH@#{_{io-18jtPMV=_ zxojux_fjuI#ujVTiA9Q6W!A@CYnJk`gK)V?-3nTosf@ zCgZ-`MEYlt>fg>M|CwMGhX_rG@4C!sA4u%sdQb`QF8svbb{-BTIlgJ{+n!e8dse&I zJVgBlG}pl=%{jULC}Xla@;jeHa>C#;g5iHa?oAdzgtKrX#F$3|W@cvpK<&imb_LYe zmx4s<6$Y}6qF-PXd<1D>9H$CViXv>7%r4%7B&*unTw8ar;{&@eaBuB@rA}2fx#(kk zrixd;;)f|d3(v2xC!H7%zFJ@VcoNXCma^Xbk9bonqGco@jSfxEFu519ZLN5X)h+@k zV#QVgpWmfQr9jXtMba^`uK*eL-c}ttRB-L1S|kC7A(f)Q8GNqk-jEfzJd_zY2PnVaBRzlphB(ae$wWFXptQX%k1+G6-^G5%oHc zQOtDj5C|DDHz`iY0s?Mg?x8VW+0I>DR*(^`@I5s&C;v?j=+NsN=|2u_BM*3K@#0q9 z!hynjJ&yf*v$NH+k|WF&4~9lyk6o!r>1zte(G~m_iu6Vso{+}IlZMNd+w55Hx&it8 zI!e3I0)iRjt%Ly=zYhkWos-T%JNt(sztR5Pu>ROai|wE*$)}-^ZFp9UbAM`tp>;#L z_DbW|<&X!*^cO+zf76mor2SI7G`D@xH(Hq@K7``<#YqeUg|=)mPAc=*6Q7oVXuku2 z6jU*Izp%Pjo7U?-F9vm-bm*vN@^_`tO&pMzQN|@zKPQun#U2sxpyk7MN*$nK-FdyI zqw4>YP#|`ac~^HPXebhn3}aq*S`ydT{*vt!Ti8V6kIxmX)@(hTmMD!0Ld8p-8Xy#w z!ejW;>DH|=6SU9IO7Up(ix5|atT@TG&n2sRV}}B*+4RP#qq#rQq%ZrEs3sX@-G&xX?VRNqmV5zVR`#cAN3UeZW!6TbHu)X;+^au zwgJ}{iw$eai68w@sr}hveK6D>b1FB=_+oGW%PY^jW%r#KsO#`2T6hLOhJ{_~Z-5(U z_2VzzEk4v-nJ~)XcodfJTFy+=D-T}kJ+RxEg+~>E_5?0=kCBil-k~s2$oRj-3c=p% zW&Mj=>Xe#{L4eUQCsnGQGk8r3wo0=b-qk?XmT(qKX`#u4p^d9xYdWl6YsnGsY{M71 zh|r~EZYuex;{GbuFZoGrlGMgjrbd-KVruT+>VO^E7e=`+tA2?WEiB2q6ShXOr~7U7 zS9EU9_#+KA}V52KF`}EWSvos&{;lRB)l)H;DiuG0kRR^4$NQ?yZCo-}y% z;NkCMi9q|~+DxxI6^EzRH<|);YsvxZ!L=!4IllN4q_8?7uy#z~92MM#^5{ul7d9Kc)*k>tb z>-lCiN62-+%+N*QjQ0zif)~`hF3+fDDQmY+&`fkYP&yXQX!2=UsCTmui-lOb5TdH8 zz*mRgQACIx6MbMo$*};E<|3+=wuw#`clC63qqF*`sq4v;`Nr#~K>U`k=L=tmtk$tUDOlxg=dUrC=-p-s-kLvvbF!v_f5w_Pm;tYo|GpJU@?WK5c1Vx%UgF zwbYKN(3Ri=?!Wsib-*I&wLs?~-Ss@?m2&V<+Jb8l}p@TQAX z`(FB_4SssZ>%THbRKNMW(}3~hJba8>_^M)G1K=*5#~~m86O}LaxW*(?DPivRN{ARh z!L)`MA(@bDzlG+K{EYr85)~nI|5}rHAI$$YgH`zWWp!Hl5qi0^^7_i$QnG5ZO{l<7 zzpOUl^W8PznORn1YIUWG? z9HRFd6gWwiRC_h$KQpCb1Yh!+rVQ&&8>ed9(cw)JLr*E-a2rH~{V33fEZ!~4pfc+O z`M&~{zlrjobGP)mp+h0fzt0r{@mZY(8kB#14$?1i#|-)4Awuj?CrQGg-0sgg}c+ zahSt*zq!xcnfrU5nfW6pJ12YZv-eu>diT552~(7p#C<~c1Oo#DS6WI;8Th^cKAVrQ zfospc$UyyvE==Nvq^dv>6;^z^6^6}QsJoMO6+&pZGlT0by$`EPb z8$`tC*Y0C)O6+MJm`ZG%!!7(s$>W2WGdNQLxLAF9pchK^`aMZ5w$x})G3JZ^a;tH} zD+qqINJ4v^DNoB&=SSHa)57;NgGy|Oo^KOAcVnqup{sR#DjD-?E$8P=o2C7yDoFc7 z$Wmx8dTsy42ZgWdq66w7R%4LuHx@b@Ve+r{uW8g_47iF*WeO-x+ASJ zO&4OuE)j~Q?m7zhx)JB)<7JI>I{jz!K5g^cU%uC1Wz_83 z9h1G?;W|})ysF@E1h=J|yL8IlsW$&6Ai^U(_ntCR%-#Cug7^OYKO^T$I+x34tS{0g za2eq}^|s|UnUyoA4(p}lTJsy9v)^Gz?0eVi`f;XIT6IXK-?$m9mGl~HNOk=3neoZS zZ}4!izkK_1Hn&=#wTp#vUUL6X0k#yPK2CgWv>vjxe0$H&9Y%To+{pMZ>#E*B!9snt z<&Ls{C4VQ~=|OLAw&C7K-_H~z;NjG1uVW3@-Sv)d)(7s3MfONDrLo<*CqcmG3?swkluBk^={~A1zX0&o{~q?*1u7-8*S`W;gvS z`7SLhEYN(i|_35s3!_Itbh6y`o5xJV6cqa`U9ewM`_w#DQ;K-of zZS<(Clb<`8DQ`!Fi)m{GRdg%nhwa>yvZT-^;e7tE@vAhEmRYpQCK)b25f-!Xyi;#lC`Hx&~MqDRlZ29t)vNppgU7`l1(B*NfGLd2h$7*6W*apPA`XUk1b28iF;y|Tev-Pv2nZ}FA+jwDyj;DhGd zSIfV328d?Li@Ng^m$@LV8=~)A_Duc6?pww)$Q2{vCirG4SoGt?uqFE}6~E&_+T7>T zev-V*d-%7e*_uJ8sx1(8Li>c($B=hATQXuB5h32bt?RPvX2!lhd$7i#y^?ZtD8A0U8eCQI5x}VX2Ulox`-!mNfMDn z(bD|DVoaD)#4}D|e~1`v>=zuR`_!8vlNgVi{`Xs-JoX~X@^O~ivPJFpn9sKO&P}q4 zO{3GsSln+fNd2OH=M($ulVlp1Z4|dlUBSczl7pL0#4yzA?`Si-!v%iFa`?}Jh((13 zP~_7I_>?jpL#fErLm!Cw;j}|&#rhT_A}?=?OnRJ?&YCFquvBm}WO>U<6==Qdw49H*X-!3K5{|LYm3tqAwXC_(R}JS+yd!oig*I-$7sKjimY%%Ft{x9>~rsAy)h8F0pW2X6^M4Kz1a&VQ#pAL z-!iRApeLcU#E&Hdt>Yqlr{`Fne>%mIr4XVpY{UB8guSatOzXB8}=y=JkYH#LmZrTYFPh_i*9dzAmCiI(V6f2kc6Is&T}d009#VL2=y!x!me0( zrm^7oUC3x;>kZ6^XPD1ePA5z?KE;XIrkvXY%Imohr|Iw3>noJm2QvzT<8il?wdnrt zKRgb5_Z(Lfn*B=lD2`W>U(X1WXCukjclWPsqu7m{)zx}_I?`__aGDG!2!~ad_N2ahG;r_B zs7FMqQ#OQcB=cyXn(6q!O_73<$&d0cPFv?JOtjxnzb#}6!u_gj9`PI_=%-@&ZyR$x z>W;RjVu&}c(_h#}N;na6Pq(XCGwK?_*>y6I_FAOh#> z)uqV>+?<5;^*H%S@eCwc3TfCuR#)0DQahmr*E6Uidq*Ba2CQwigjsVW$t+kh1ZL7> zk26(EBiMywDelG7N9>l|yGraD7P2&K<}RQ!QcukC@Pb_oms{i8*(1jDX%aS$=*OOy zL~Kv-KT*Z<{McVOjgRI`0u|YMjYDd3Y?xl#YCTn0`?HY#=f3V*jfa`YD1H|^ldbhi zv*3LW_!?pttWS>)MGPO9oId+OgL`#~^Av^vRpAvtcijw;8UtaZ4zi=TBWVe`W*B5F z8WxNnPCF-bqO)WUuLJr^_VSst1*bbF2<+zlN>liTn=+DQoNf=OPvNzn_+wt}K&`tO zcKO{m$n6j3-r<83j@oBODx0vs7uXuTaAvz7{rIBa&u!X#(p!)PJ*S0d!a0CinKu(M z^A2^(I{8U>)b;#5Ji%!zBGS_HeDaXhfU{kA;uYmeQ7b9&Z=GE+mxLyBKz0~c>Jx|% z(*1+*;92Fr;9&?M%w_Ez+&t38V-QPn?1~g50o(6CoL$!L^@QWZ)bnt%cZ_(d;tmEH^wfOStV$(gmI7jt$<;#4|PZd(5Jk~N8&0Q=% zu=1P`{0`H9^M^lNm9{hD`VD{j*C(g-DNr+)_I0}6hmBt0zZn=}36FAq2jJB*hNu?4 zZ)a_MZ2?o5!4Mh|F*i=FV!=<~W$&xSGV6i$f?{ZR6mt#apHO#uI}DZ?6{2G2$4%-s z))g}~*z=mZB=a1XEo_W@+CLWI9Vnm^MdbxHVnp+h(K-cX#reX@gAdUyZ_PCE!iFc3 zf_kWYqP8@1V0|UMvW;&N&&Fx*7V?7)J$4e|rULf}Z_1JU6%?XKV*A$VW3+}bG+p#W zcSGsrhp{=$5gM~(fiRJu9qgTKk;GDV_{|6MEzGJT#PE*xk6W6b*LNG$U-yZ{a*1Gt4I4yeU|L@>xb1oAPT0;&5Ys z!7`YXt&_v+f#}bcZLd zBy{y3m>)wjQ<9Pn6)QrxkxHsJ01l!be$H`GE>hXz?dU>~G1Sj(orU7w!##8^!I4dj zDC0{<^xGeGRr~s;WY3vBQj)%N>&QB9r%#B&LHpL9Hlnfx?Xz_5?u_&CPMuD0eSFaf zluxqb^M7FGfemg^6%}`jjScJU&kS1a)y^NmMbSDP#C|+U#@6Obgr|vAQq^C9&Q3n> zWkkP5N9HSii%A@%)b2lECLtyrHgavKlwN1n*PJ{ zd`?+qd#~!vm!neb9jZuK0#;_-%;y+#?5>gpKMohG6IOB1rxtWhO#& zi?W$Eo=3Wo+^hl*k@-yEX-m)m*awVXrf0{i{>^D^RA7CmgU}kG1JT0IRc+1IIQX6YwqmSttDWXz>S-r-TLg3`Y-`fwTDH3yUl? z-^kQBbR@Wy$%RpNVRcq2)8MpVlYvKK0NmYC3iMEg=4Xu@_VpV^o7g^$iOEn8Jck)a z?};CZ_ew(nx2aJ&Ylgz4hQi_=&%n(m_{oM2PERQO#y15 z>E7iq#?dcN?`v>HNxrXU0HEA0;yBD`N`V9e7H45TfaUUa+{$X3a0ZfRoU3OM{_5fu^gc z`b7qs|4bzTCUwX6(M>qu@PvO5av(=;*)C_u@=BisH~)Mqhu(|UH&QCi#^Q0cz;=$-+%auD z|0Fp&3&xA88~V}5dab2*Xq47*%8js7V=T+_xuRCT@Fxd9DY+o^ftbM<;pmD@R=1NP z1p`ahU?UHeH-69S&N{0FzkBd>98Df*An(;Ba-yu7B;s3=7`SsMTHBr#s9ggG5-K(z zjG6_b(o^W~9N*x;P+@Gun>hL^bK7@ZEPv_F_umSoG!tA&63aHk?8pSz9FnacL0u)B z%#(9v>s{($hUGDo_(@8~sc`RGmmN9p$+G=A&Z8aTprr7lPL=+9-AM&c=zlnPAifLq zC4a(ctKX8Qc5meV@gC;`iGDKVYv%G_De#6j07{Jue`3=n&5atK?0WgOR2K;HvEB#_ z?qA+tZ)!MgZgf>uNvMR{XhO+l7Qwb5jqO`R0=s)r`}u82Q)54BT$dt96q{!2TjCg< zI{1IDFPwY(#WQM5J66;6 zugx7kVTmBpq!^I5_sW0fb}&S3?u8R6l4%8C+dfZoz)&)0rh56VkM;ZP$4vbGbyNQ@F=O-9k(e1;L#N2k4&TE+MA4T)v^;A`BTOe|><|+$Dx0G%vCP zsSG&gp!6a*O~H2;=j!H%mzM7Y!teJttR?(E6Q^3`x-Pih)`%Zf`1sXsJtIAvTcv7a zg1fqeQE^Uf0!~LfQ?!6Ym{J8OI|dckYq`rqwK=8!pTAVEE1RF?zUu|Kko;fD!H5z- z8g%zO=kP2*f@hd5*XqAQx2jfGz6;(kD_^}WFG(qB{DO!Z3Dm7a$S%$oa%ZhpFF)5u z4xm%I7RxTajOam4g6Rf%Z&a*TF1v!Mu8i$nx5q7_D$)*>k-%V5*)D`zmr>uiMW>0a zQ>c)EzQW$v^Cs#)M$u|Mqm24^Pzmck%P6(|h6w{R>MR=en*dxNC@wCoobU|vG&xtC zjSmnO=ZG)j&uuJzQTk&SnmY1l|2hp|XfpRWDX0Pu{`}^3K!)uy3!-?r(N0*4AY=|t z1439d2nqiDJ?^DjHbC3gsm@Nv_>{v7l;4hNu3Lixx;YlUl{9NQ2MCttXF zpu(`6ZMBgNOS9wRJlERcEa4`^rC_17t}1#1UuoAAx*LsHPbF;y7Ua<#|1ox?0r80R z33Awpi2^O@BYk1sB3pd)@XSahl$M~**6D59`mJmCoKUhhuoI_#w)fop=F#D4m=-O% zITiFv_$d_Rl;IASiq$_i+;goz(G?;D3X;!{`!Tb#jjnD~S{EIsZPg(|*S!gge#5Ot z{H}H8hjOmreE8YG5egi;4*Wy+jfI+jlbt2kYY_^>=dOYXj;-E?=sr@e(_8?=gB9by z+yB2}^A3TSWLKE=4b#=j=#D&Q`KZYK+T}@$xK7#R5?QXgz*&mFqvbZ91kx10qmJa$ zW@>QQUfrRCKIHf!V6nbK@>Dp|PauuXGL^|i)GU02P`*{R`>;^rhAMR}+t_EjpWH@! zzRQ4lVw)%Kqp)BZ{f6e$!`4+)?0|`}HHMo_tXD2#tdtJ9p%{h~Ez&Q{kOPxQQYq<= z%v;##Zz~+GsQE(N=+0uc=VX9$_+Q8j|L065wST=#mISFw+&f|3^G7$CpeY+Ah$Y^% zQ~**QRGoVG>a7l5iEO|pLx=aH>)Dl6tpfTvF7INj2||c)eL4;^P{=3=6!1c91K zRB1v@@GgRLuO^=!%niGKHxZ`F*hs5fnRxi4uuWD@gL;4^@)>uz6B(4S+zrI4Z!`3jb%f(b=5FaRI5iy6T>VI!+3LDy3XVW%6AMy&|*W1i} zVe7=oF6&o+_6;R6SX7Gd*^LCd1YhF8S3gfU)f&0z;20bgy4PC#2rRUwmYylE{}74* z>P%JcQ0kA?JU%GIim7qHkDKLpV@4C-*ypC_27;;uefdrDko5MMvIDQX7XiW=ImKee z{lUw%N$A#HM4v^UYvExWhb2v#pUMbG;9?u-x(S3USfI-&0F?T}WwCpJ96w8CkN*QO zT-R}s!=MD#C;eyt95J4OPEvvP{LH=-iQ49A293Yv|E}p_%%+UZuo3Q*eGY&pyHJzh zFM)jb9OsIF-49qFl(&f1G}fq+=Rcg@yJzG%_d<1>`~yK*JCuVVfH3Kfh~iPP7*V*G zkh+OQsM^sVCMnYV;`~zk)bn-p_x3%?mJ}3s7N4FaP7~#H(VmKcgaW)G{I4S#^o_d- zV2x>5fB_|ik-eJGnh`CCBToj{53nR!nAm6m=ur2rS$d)81{Sl7hW>^G(#!utsnVg# zoK`{iXk|b(SYDlHQx4^29!qfe?6UZW7?Q=Be&ehR?XR@lrAF&;v(4jvDlWKSH zIv3N4#h*burjMwMdw~C!hQ|ssRkxuO5Nv|JFEW;}SSoT}&lErSDUb+aj(lbmtqLeo zH&krr;uKua2Q_OsFeI76SrbNmd)caTHPo!+nt}QKdzV6I`LyS7_#*`FIBCSnli4yWZ^kQtnq}M_+fV zI`V;ctu}kb=uTgPH*gdp*EBX9A7Z((fgOPTYGdz@?M0rFFQ`9HSHIF8zlP>lN0qJT zi^mkZat@>S5<@bE?Ykx>kgZ{3G^D#2ZkcFQ(kWF-B4z6nJ2_Zj=!`-L^4GesP#Y+% z5ldN23kOKhlG?r$j2@Sc4akK&%-;E*!u4_UE<}RdvSoXVVttSOYfDL-Jt1?zk^&w@bH5efwbz?mPHj^sVV|a1hVA$h z6=kt4n_&;Gx0-lqBL(NS8tFfyueWgn&mLx>lHPq1u~wYmS=RrmSc9=p^HPZ-T`v>w z8!Y3UkrPuDrra|2ZPzOt(G@;(Nl~RU=04K9$<+KNeROG%U~+GLb^F((u8I88U1J1=07awtl!hRa?&L3et&D_SBbiOf)P z21>VOCV}=StCnf-mS=?4ObZlY&XZH*t6dOU z^Hi1yeTSb`6(Lu+EJl;PNUH0_=mgThe1iNFB764!wB1TIuhA-AI^}aMD{X z=jsX*rFzZU*U(d935ZQzxy+repnesNU!l_k4L;HVugo!| M#pT5+MLzof4{{8YzW@LL literal 0 HcmV?d00001 diff --git a/examples/natural_robustness/ocr_evaluate/image/result_demo.png b/examples/natural_robustness/ocr_evaluate/image/result_demo.png new file mode 100644 index 0000000000000000000000000000000000000000..64f28286d66fae64f57a34190cbfae29d1382f0d GIT binary patch literal 45458 zcmb5VWmH^Iwk?di1rHj$a0u=Wf#6!WYvJy0K@&W<1b270;O_432@>op(zkE-_io=Y z-unSnqfVXLy5^p1&UL;h%1fal5gi|ZQ0SdQ2YPTj)T9w(wSLcF_b6IrR zX}=#EN79`)-^W)*)1(X7j%2ySl8&4aFhs?$3Za7;4b%y-0Jxv^&ACo^ zymB{EE?Lx4eRh3#*pFAxX*?IWn@`MroYl;fV(c^vkw9?JO4G{P{p59EqCX%VeOl`#(HLq{aC90ZF76R&Lgcs z8O*KQmwPpnD@P{JMT3p+?RZ9$!sT}U^`f)U96k2wq%x*;&D|6SPp2ipf`7bKdxPXT zpd>ER?$v3Y;SwSe_4>z3+R~rl8UEk_n1z4uU;hXY$HXUSPHxNWPSJe zVKIS-mMYd7?`&S2TAhBBJ@~yh%x&BSk-T?L$UNY` zFz?re&NjcV(3p&fXIv%&ks(Wb+^)!XWN6D{99r?AnK3Mm9Kc{Lmy93Ahg{?RNy zG?yV>2wq;k#bzfwyS6L1_V)I|IgjCt1IxfJf%tKrN9*}AO)+f{A-cfR=bEKcO8pEc z(Et%`FFw2W_7HTPs$HLp_TRU-%gZZ+-i9;-Mp0xx@2dP}(oQ?}CC%6+x4yP6@j9T5 zXy}gQ-yDC5-}^bf;PGcDc`0Ll^!2h_X9=IG=KAx~;~uWlCDqHrw?=85TL(e!AsqrN zR_E%-{qmEC#=`Z2dhS_PyJ_OI?tFTUn$y+yluzWk@ay|-3j8Dc4JtI`*ttY&-aE^A zdO}Z!og@p7dB1;G`kXc0zK*+fdWjyqmsU~1Dzybl_N`cd8lt&ao35v*NY6*_7Vd;futz@k=_>`?}FR(`+o2!#z`Kr%uwg z{Q;Ncz8@MJDgHH`dy5CIvh5GuL53I8l*JD_Z9)CbL=Ao7WMo3ZSd$r1v&S`S8|Egt zk^ob#wZ48`h3ndj*8p{nX!MHa=oP{52kVb|<_C79+xfK}ed!B+{06`eqH1@xnO>KO zw=QN=LlUd@D^|8T%RRArR>V*EY9mx!>*H$uc=vk(==LA*jL+4$oK6@?b9cff96zMg zrI82YOXY_>FLd74-wE8q+w!e{Fse&UO}+0sp0i%A`+_%LNrMpcvU5E>!hDmnH}U}f z{K;yq1hf@*p2x!h6;AA^vEuKAKR|cMr#BMQ^3)o2*$)OFT9Q-o~)*0X%?NCV!*1VMB2E zW-A`Zl{pwPgMM|L=ePL@EjuK7qHVlizr+9LnCoggJA()o&GuPrqcq!?klFgr2fuk| zTs7v#X|_c*S)Q!V2MyQPLLK7)D8iuv%taO@Nwni;?E(`9a^m^i4G7Y7>7~0Rd4~|c0DZez*Q^G%=E@IW0 z^gqna36%JNK$trO_&v~TnA{dv8#<5d#977_I{&KN4YCQ#9z7rvz#|sMz zLMtkmINPrtuUhJ`3=Y@ZVo0AW-mCFsL{_{4@Wv`=UjB$U`j?dC(jDeHbO2@`=N(5Kn#~u=>Sb@n{Q80DMBmf0&u5|Y7v@2x7ZaYo^E>8fqt_ws2}POiE_atb z^!eK%t5+Zg2gj=Kip{-muz99W5~!ksVB+RaV4!ZRTh=cS{`Y|yGwT2m445*%BU8cK zZ))Jzy*n3HIff9%UvcM0WQQKf2c%v}mio;xCwOQ3`VD4PN9>k%NJDfVP=;0I80&xV ze09YVDTC$MTq?I>-p0zZ*q-b~zYbc{#b%iGzO40jk$#=sGmd2y5cq|CceAYE*G*`N z2os$0i3TL!Z9we=FnoD(TI}%Q>js$B!1t4{G}@ViKZ$t^5KuaRyt5~krn<0 z0b%nndow4@R|KR@2tlt=)`kC>jN9V|DKt|Xz6dVK?*VOm{h3dB2A54hNo3^aE=C}> zn~!hZ^iQ2-TkX!8!#%>z;H~wZ8)5SGUTN~jn~Vc)dlN!C;*RtY4F zx1{tcbVxfX_{Y$E=V^>tcXOMeqd*i5XO)?!UUE0VVsW(Vck6i6MI*J`>p>3FJ95bd zZ$O5F_ru}>O$r6wfq|j4DnQqVk1Z`Nhhw+b-@w~=C+o@NsZ%GGrE%OP0kqFlwDXen zj1?QAZ#GDT4+K4|A)gE`k4j_MyYEC=x9{%gICWE>zdpay^SFPPTdIB;!Y3p%dOO|l zh)UwQ>$1!!%zV*S6M!t}%jb6`61&;u^mGBgzB!~}b~QEe%*S+mPL^eNu+|QbXck&n z6-4CpdSdOl3wc^k54;(qFNy|6EMbSOdj%D)3*fXwfX`bLJm=Bx9L73H zM%7(Go%gWKIx!_B$MMrUf-TK1N&8&-rVIWg=hsDv25fCtU9W?LgSO{u;}jb!>vz0| z%R{y3XS++?gk(4CH%~X{F_{MOo?CcNy~t7*58DWXUj_OK1~1HKb_SoNgE5T6Gm7O2 z3OTD>PNqJS!OII06**WzXB3Y+@J%Sn|Kb%nuzs=piHN$r3vl#D^QLs2z|0 zyTf>Daq(w)L^2SI)yD9hT8%MkTxHv_N+~^}lQ9OnQJkj*&k88AaSoKmiX>Q|B@s1UDu5x-ycg4S@W9mpZb_Hm2DF@PI(%tG| zz!XbGOGP3U{6_Zc-GUr0m~wRw-OEtu#|M7}kKDm?cyF-N^?ArbC446O?dvnjpKl-? zmsal?tQ(VbIHf+{1iTTlk5M1wVNF}<443lpE{9L-u1Gttm+so{gwEUTFe^SGux3Vu z1dTn?JbnxY@R=?dSDj8HH*LpV(eJV{>^VKcn_Xobc@|OSh$4{nS>~O3q6{zs6a~5T zCx2$tLkd0mAiuVHg+G@WsyNX+&ZzG*B08j(c(Yy0i-sersRwks-(sh8)W#?%8&nb( ztMpgxd`eXjDi6abENFH*Kzn@26!4-xlnKUAGz;jauk7px|8eZQwnbQV2vTg)tX!|U zE6rW<(PynNrw>dg1wHkX-Mr{Q@7;t%7C2azM2e|7n0cgnR{m6J<-HdohQsRcbC9BW z#8OMgo0}~rkG-%y2qUg&|0DtXs-Y4Wt4@FYz^_0Au z*H3yhNhFf-RL#M15w6#*PyW#{3u>C{mPcE^15XqVW%3^yWjVNJy~>b5fo(oTVO+g; zE4+@U79}QzvRClB-NO+?=pA#PBOzr2^v}oaRn8V8yxxd=5N&H0gh6-m>rVE{#`DxG z{w=qz(%6Ez=F7vF9@@$s^HPy@dQT;;8U{3r=F3WH`IFT><{p`Z`T8W+*f3ljPBv44 z($>85)NQ+6d44!~E(7oJHxwu|bne(8LMh#om11XEHU1;8RK(wHu{fe{f3`@%l61b7UdJu5s--}j*S#K@20NY}As50x!`O4K)|}8-shX#mZvK2vW2XWd;L3b=o&bxR zCn{*$k?&+u~QgHmw^&hblsB}>*Nawyn`Ws35a5N4;`Lcdsaer1!^0*=N zsMFfnyXLQ{4;rj--6et7-h8n+o8_Q-5Yn)4Z07H@8Lukm4E#p52@a$IO-(pB-!i`` z1Aa*4%6XW6Z06Tr_sgu!Bro1R6PJ8T8yJ>I- z%G+``758~)e2(xV>-$l3JyxVlJ`rW|mN-tTAv&1Rj~qvUM-$A=ii!WW0dr$t0|vGh zrjW_{hK!(6wE3RyuqxH#$52`$F^q_$-x@_A`}4^F4F00wg8y}o!qOfqz*4I%m{^+OsTSJBtJ#b;;K>*?volLcq1;8qGVYED@3W3#ghawFD)EYOG~0aD0}a#(h@oEPUYk+Aqv|zKRwh^ z*S&pATb7~TyXPQRopMkg19iYIxmeYjffw593m%V=e+?1}T$+Wuj%siS>*vp$<9HJl z#!9MFX(LzL-)u<~;8Ffi`6Uc86kXpBwwJGR$!-lksF5g~+n9qO)Dk&XIoPDF6!@5N zWo+*Gt1A=Xp^D0-ucMC;8_r~^gAf+RWNQE`Lu-$<&W8x}=CiCCI7=)IeB|K8MNv@? zfQ)L}HfY)u@kpgBx0G>z^7f+_8tzSRq4fJmFMV;&nMt^tc4ra0lqOT_6Kpm4*y6c} zdmev2cIlWCkUvMl?ABz{s$%IHdf}FTbDE~teTs0Z%E}bI?_s7tKW2%5#J`ZVcRXD4 zcm)n@?xEr9p{+H?gY1y0un15^oGza5sJi97`9^sD+{6ZNf06E78`lXFwSV) z0gP}zhQWjjJUmLe2^uL3gF9O>i5(V@pxLwW;T}j*a2$6jD~y-peq~7&-Yq|7sVG#) ztSg4)-atI-Pk2}Xwy-<9cs7l3k8zk;1f&43j54%gY~Z!Pk5A>?#HUNO<~7VB2&}@I z`>a*W>ryV~mUMgz-G8JM510w}0$@3RAjuY&?*}P`s7LU)%;}u&d9bDLaxo1;M1QfG zr>5wGvn?$*^U%qe8vR5KBpIr$ropc+?A~><9+uKv!pPGLxF*+`!Jy$Wsv_^(ozCPc zc|{_lmInucY%70A3>2Vb`tfktQD{l^xhO_pb*Z~ukCAKa`+zyej~mU*wT(G0Nc*;! z@5;LUNE?^d<^eiQHePgtQ3G%tI1BFmP!n8kHNNchNY(*`seID1m0Qed>>;87s2Q5K z>h~$%a^KJwydP0eoanxbyhg(iUM2?AM+7#Q^6Nd#}e zS;`LUIHyO$0poD2<5~hzTc*5grbpvj)Z!=T8nEo^z!lO?L{@vM$1z(@0#m(Onj5L4 zE`El+bX0$-((gn|aY{7cLNYh~cy6(>df zha4vLUzcfH`QfPKB9p;!zb|q_5T%-``340liKek4Guu=QI^NwT`k~-k9hPZZNrG5m zVd)0Uac7<8So>GY#%`Efm)*^N1WFDcEH9}vHTPVu2uQgzco`+XwJn}P&Bk3J~*~i`_)i?C5dq$mS^GRn>ZNnwX#GYtY(VpHYOXl zJTo4-733q5IDz%3QBu1$+02$)Xez50+%FL zWEj{?(1@dp>C!U*+=6Uijd!Yh<0{YL?Pl@@p%38|Qbjn9$DTpJ^shj78)CPKa4g6w zp7WKZ@sXB@l@msV)3f}U7~k(w3RAatqKp0&Ba!P>P0$`Mj!^)LW$;YX4=>TD2%7 zn5sCSN=oE`MER>aI2POiBVAn`?xb_k8H-U=wO(k)Po*&@?tA^fOvN@Iz-Oyq>9FQ= z>{o@BSHlVloK@D=Kp*&JkG2JYdqS)^ovl6hwpwAP)ynFI)IEWWvI)8ZDlCTBU5{Lv zuBb~2)(4`r>$x)P@akv#7&PLo)KG3|jtEn%wYNz>VZTItEifPH&wsqTT}c; ziBw-hrQd^kMBwNhH8IqDN?yzF`j0TeoCt?VjS9sTMj>|YuEGXY0*S_w#?TuPpf;m` zgET%(p;R2$hAx${49bFq%VfD!(VVW(aRx7XZ*aQx7B1o65=;%U^PXd!Dv^$vR2H*? z_NAoUyuG9d^-s8~ozg@@JH2R}W*f@kwVO^-o&!ab;P1@bFMSyQqR=4w@`%92uiJ}n z6W5dL4Co=1&%*>AmZ-FU13pUWFXoUOdA%GllEgYR4AlzQDfuRx6sT_gcCM*y6KVi= z08tT4gxn84a^raDMa|X5c}oWkOs~$2wKMW~N(rihfdM~B%XbXvA(J+kJKwZLMU~p* zEE)q#B<{~!9Ob?xY2Ys%QoJRO|C8bT^RkMo2Z~H8F0&yx)Pwdp@Qc}N-vFk7ZiFPL zp#L+l={|7iAFQiR#Z>^#7zs%=YJ#EYfbQaOY<~YCS=WZ2EtOpAZGkMyz~qMY4ttRZ zz0K=u7$8weox#F-lfGKdmsu_PE#4r(@wQlP$_9oO5*5M$QT=LDVq?o_GtlRx7&0s= z`u+JiN0pBQQQ zKy7h69ppJsdY~f{T_D_Y?0_a;dFaSasuAl1h!}RErc zuLUr<9-#6P9PESVIj5qlaV1iWgsXFSXQ=(m8a2D5CbZRzTKdfLp#QSFBYXJZi%&T} zhOmuXoSm#fm5oLOv3QfMS)9a@{P}uTzL*q{CoiVCzar(oXONSW z&R<@%4PBY2spOk^{g}0P6$9m`MocxO_X8B3ckA-IiqZa69=w^FC%dMtVw3q+8`g^1 z`FihKr#)`vDh*s=TubniC2#m^nLoF}20~5Ln>M%9ocbn!o1zxc6qIYzJ|}Yax*WNP z?ud4W)#!_8bF31!^b~1ph-o$N`vS2@ba!Tbke_R0m7B`(+hJb<8>pzYb5JXegED72 zsZ#fBxAYfwoHrmfjcNNS=Cz=(1+#r(t*|C}J^-ux{Z&0){$g9zwRb8Er0WHzDmz;3 zzVWZY850&5j+d@p%usbx_p5Ffj z9Kk060`kdt)=91_2m1p2$-F#D4vbp>U_%UP8jNoE=%(KX{~}IvVezRB{0XT@M(1f$ zjD}v+9%BDWhNg0_91}#@d>;$ZP;0iZhf+mxG>bjh1IkUUA3~@OSg|VAB+;;7XNyBj zlDf7&MZ|ZVg<^Tb86|bjz9Nn6HiQNCK4~;kg}0+RPdP~dZiY!ACl4Z`R(O zn$YpiiB$>rgh?}vz|#*gh_sPQ<})SRG=W4@2{ABpo7Vmies{r^-x7h-v9&Y~)oj0V zs~Yn@8r7S=;wl0*)9&2l*+LzA5lVr>EiOf`SD(`oDFlj;xAwIBl9fUN=Ph{N&~0++ zWw80*_zho2dtP*HG3eyY7EsIRCsn%oHmk4W=}wi!~mTZEzHcX1H&WqH6m7pLx|A2O8?Y zcK#9fYVVr9;MRB}I%T|*k|5e3Pu5I`j}h?kb7Z! zB!vfGOW4!n?cM!lTQS2S_oz?-r6PuC)R;TJcwez`TVVSefg-?$faxcwL=6!7DpalGqbFQ*VJW_)aAJ0<-+M|AN5237C**Yl;J zj$r8n4-&WM{snjQ6ZQG$;{~ACAD=TYmPa^%O2H5c&N*W}?z0&)34z+`f(S-aR^!*_A+epiQrHt-qD zHECM?l@i{Ewa!d1fdO4CcQC;_c z&LDrDYiloilo-(=^DawrB(VG^1P`1T)^yQPI^2G|FtZ@*2{W6&O8lN=cQIm*&(hC- zg63S!XE!;;&6Sn7i|hj1v8H#*cM(XJMRQw!Eeui5A>F|$(A}U*>xu&2O8OHBXAa(X z(g()FZ-D{y6;}k{TvTBbA~$Vr2m=H%612-_f_r2cwTBc!W95Q z@G3Whs-Y%`4)ez#u-#iYMy_kyy3-;Dn%E6Xw`u=b3y^y7-A8ko5d+)3f^lRnap%T( zX<@Fnq3fA%Ep2O06#8?ifZkd8W(_c)FpZ4}sq!X-;(bMm_K?IBio!nQV+-3e>Rv7X zy3%H4Ngm~Lq;p7`TjBI6%F2bz!4g+{;L^LrZ$cd>9?i!0x2`4eURl8y<`wxPOy={W zNBE#B+~Y#|y#j5!eVV(Ml=XGCS~DBX7t`Oi%}$eN-OQa_4WZFgVdSo}2ti&M^|*VM zm<6A-SvJ3lenSgIK=FP%H7x%$E*S|QbNez}E%TmdzN3LMa9VAw%}DO~@>QT8GM>mr zSmRv@1wqEb?4tiIBo?)eU^cN_bma$h6|A|k;#fB3+^)cB0--FblI&TFd7wg_}%$pRl{tjoN%B&|Do%A zEi_`laNc|L&*0?9LR7ORA+{xxXFASIQQ66`#y5mo=WmU*xOvbmF2Q~NVKkWqp5eE$ ztl3gh(pDc(s1q(&ogFko>a*cc$W2s;R}%@mFTlC^Krp&?Y$(Dn6BKHMLv^Ksd*Q|C zN^%kx{99~BUgaV4!okIb)p5Tum|aH7lY+~!RHgMIG87j9BEIAu{6RnPY#W)-3wjf) z6v|O4W=~l#NCr%kENs|Aw{EQUCpB(lo{ps3n|^d1$93;$j{gnoPp-h_hLMr9ISyOj=kS2meEhobANq#$k($VwDH5UcE6cXh=D&VC*BLzDh(F(dY;BQx`5>(L^Lld~1s&~= zUX075QyJl}PrWe>t{ro(zvee?^*I#U+(x5xXWsG^h1n&~s*qMm9|4L+u2UaO(eNEb z4gCqm#&uCvmK1yYd4nmLAY}{Kd0<&}dgDER#p~tor~QS_@24Y~Ea0P?$o>A8rXNA< zAot4dQf4DSXxK%C?$VQY$>WcAFd`WkCt3B9^nEzAK>VxX=6U#E8zr0lXsd?(0|^Bk zq4~i3oo}U4VO>HYy9OCW-jylnYTtuZeM07%3YFlqlLFUW!!9=)-Vlo1c|3*@zpPFA zuo`Jggz)WENu`)?$)a(F%e6x;G>=MN zRF&L?A6abx1rdGmnBKX|udXNL%Yi7b_Lq@yQwPE(6gw!*1YNa3dXgW~}|y zvrz%YiZX`1&EXO~&ro6*FhKUK*X@I%l9JTXSSMxaH+?(zw7mIB^63){gsJd*WB^Nc zS&vp^4K5)t8E(BjQl04{fYA3+*wQS6BN9A$d{+@WpUxc#{&F^P%`~^$C5C1LMBL;p z#SqJr&rZl?4wbrr6epgjJv{JQ&4WKAjHe7ZhOL-dy=fOJba=jG9q<0*-lc2)*fhJ= zL?C}?zX1*)UrDPOUMQmD+LkWeA4p)^H72Gglf&3_1sg4~b%anVc&&uXsK;-ab!I|a zx)%Y>0an^$7}Ma#K=!;$AZ##^dctMraJYSPsxmYa12L*=ma!VBHc?LHbJf5tBDVW%dsq{vi;LH5zRM_yQ{eu)I zf!e)eL-@7Ef*dBzZc^}eX!qrs?CswwsoV3&OWe>&_r1=9JHbQC-5*aIOt74pIOP=_do6s-ETxBaCpv=Tkqi$im zh9S17lFb1FZ1jXg1(dn^ASdg1(rW1a=49r7?J@D&bx?XUIe%gQJ5?q2&Qdk!^P7ot z8__|CH%PZnOKNaNzjK3khQlot7|&21IWSY3xz^))q~Zi`=^fl?$>zP8Mq@@8nH>)} zLKuTJBFJ*xZb~dB5tjKD4_I|$RKv+lsGtuBt0gejKo6zgYDTKVEsoL}=W!d7hIS~DR4hK5IifBv=03?KiGq$T0NGoK1YzXLuMdl#+ z-Su*JcJ`kcDdL8mjN24JrAbWXry~CaEndxJ*N3j>|3y&zuA-@mpfteru$e!a!G&jx zV$qn}l{bsL*$nRqbky`fB5Vp11Vpg{6!H?Ox>~m2_b@+bf@onRyeE2t0|L?-vzyOU z8gI~r?%{dmZ#p2^;6dy?-Fwdc3PA^kE_htFDG+;?dU z!dnSDVocHByV_lMZGcl$)9lq{i~n(3cVl#Kps%=h)7PO+f{@)nJ;YT&v)Dl2k4CP; z#Ws{{P2+YkF`YwEmaa6Yqh`{(C>RLVOfWIOz<}vMdm1{%yf{LS4veiwF04&&iF9x& zN>6403q`Iv!I_sWS)fDddxj=GxRYU~;%U&1C*gi3TUKnj-Wc#20hb3arGMWO=6M~xAN}5`E#0K$Oo}Jcoz4`LOAKnQn684S|gg(|Uzmd(V z5%PbL&2#$|*Ea^a@h=8hY@|alb~D<+aEALYdqoZ0{hW%nW2Es^9xP&c`npM>9}wBr zO;FuUD3SpZ+7cM*nyOl@n`Ec&5TYbXbCEF<0;?LkR1B;)kv@hLV={MMBWa-#?-tYe z@kwX-o(^AIZvMAXGiuXVSo^&85<)H{guPX1KB&2Ex~Ql&xW=|E4q+Yh=-P0j?BIN+ z1v;dI>h~{<9$Yw>pKkX$-0_O=p||UXbH>Tx-5%(Is2R8&^QxN1%)W#?^HNRoABPkm zYQXzib2|gm_L)VRi0OYVibYK_w zWLIZNfynQon?Pj*A`{eArzj6lg1+>Vdz<{0T6#EdJzcN&ag6_d8SK+1bAiyu@efw-lT`dFGB^uNzh>>dhboV5PxHVy*VmifQEkD_E9~^ zEU0lJF7tzNd8wHqEyM0mzTYAeQJ^Is7Pwau#G4)9rq3;&Of_i>BqXH&^y{|9wCRJv zA}0kyX{CY4Xr30mRW7S-iu%~8QOucPN7TSS$RTv9b=m*lYcfBiil?H^GFA~B9uCF- zjg)}*Ci@O`A7~SqPYJGEShXf*gRGot?YB6+Z81WLc{l|n0 zirOkG$pedYAsle!YKtluo0b5Ag^3dnl6in^wf}Xm>d-Jnl|j9Bl0})wlKnQhtveJ$ z<~+;*qvh;8EE0fO2dTKK;Ac%~QuW>$V_Sy<1vR(NOIOlg9lI8yovx@=)-VVym6Z5l zF>~DY{FLZ;4XSf(WB5v*_I>e~Ia=NID9fOoz+#W?pfxg*Cdq2rR`=bR?(Ua6G~)K5 zUyc)e?ayAjwfuElBVt>i&YDm=EwSTY5A8Uj;d!%jbJf>>PIT17)sCU+vpFQ2MfADG zgmYM6sJqxI7UqB&ZUg><0bT-aGCP)FJg2M3Z%xtdhgiZ1G{8v-`U zbgLkh9!}^Ar(A|BLsFk9Q{zEv1Sn}Dl%+NLo38Vi-%Kf#?VAfd6n^RCp~(g-L2P+e z+aJVW2Wd0Tw^#UD2aY_!DTSkt_wT{*L1U(=uS_^Rj->-Eo0cKs(O{rw*wb|DG92mf$gGa+{N-omwa!*B{@7PS5p0urQ zXWRW78Gn9b%Xc2dfu%Tci}6(+oqBJc#F%}De|)B2gw3? z;#`CYTJgVzXCYNN2kFD&;AMLe;e4ZQj4-WBHPYZiIV;duN5lVPuuJS}1a$JkA{9Rp z7Jd%9WO4~jOLkXqFQ$kAKuZjxDFC)KF|7{&ma=K@%ei0qM+1g;6Jy3j@|5>Co%aOb za`<@H2}!YkG?fo4chl^AWNO$Tx<`$r1&tdaBFt(p9TUyy0@;)wnq$Lr40lEw#pus*zl_Q}%OPtxHkQSo zA5Z$1U~kv`+9Irw+KR*8n$ln&(%P}2z!x)kETz63BSQ#7h+<{(%3&?C*9=sH1L=+j zPNxTAs;~g}=vXYu*S?0h)?=?;2CQsAi5Tcq2EZ9JkPI_AsYZa*^?IpD+nnRhmw*11 zRUMWswHEt&obZ|v{eqn!bzY2D31BoXJ|I9t_S6j!B+-R~6+2V)Vh*bm-M5%8j zh)0_1wi6`u+>zs-sK2)`os!9u<>tp=lBko(5rAz`ZJ*DD?z-zS{W_qg{e!);OAh!e z(Uk|cTqIGM{*KYZxs(!@fjcbCw&Q!&)8Sye8ymb5*Z_F*;O(zq<9*(PLt)_iftUAr zWR{!mgJMY%_RGv1F93(>V?#Ixq=d?;wy#Bkc5}9pc-=tL5aL#{7Z;+^%Ct^^{|7*e zUVwd*NJuj}3+~39B7ndA5tFhx2aZ(XsOmNlsTaFZrsu|6mitwNzQ4vax%xDMp8B2% zt@5p!kpP>0e%{{c!W<1@PrmpN|kHgre7SiVKKk~zDi~%bQ!5DjoZ8zh2)JnuL7p2qe{#7G_I$HP%p6?iN zg9gw^54V1Z35U@mDGoC#_5B*7vadzJ3GEV?jwH#Ph5>dVcRmo-T~7rIKqU-wf}pU# zXAf3!LRkHIZ1ceb5X&G@3ZrSKuCRIUc4e{6gH2#<4a-*uUPWC1Zf8mb13R*9ZmK^v z*5r}yJsNiDr^~rox~VnVj@sIg*C##rSZACeR26fBdrq9%Bd_m2qU% z>p`|si?7N*!H){jfDO{eLR&FVNZP0DKfEX?1f?>xF{{B)(LEX@9rHfLQHUcH#$}}$ z2B_35wp0Pt!16~rN~?UO%&=~QUQYz-(XXGlt+Nvyf8+J!M}ftG52P@*9Kp)@fl&r) zk4TLz-2l|#yq1c8CnhN3ZEx>@rnbz?WyX3Tu94G!1T z(;`yhdkF_3an+T5#NZ6LB6K6+JeNanh)DmNB4lN~a}Cp(@5BF=$yms6fveXjn+~@l zEz{QzTMo;+gWF+j@w?`jLM*X^M0dzv)DFS}1B&UX8*i0Sz3Zy*kW`lqSIpq6A z8ugo>z33`G70yYw!M=yQCnOb)q2cIsT=Y$U?FkTD?bp6zoBNNn!5PS2X9AS99|vqR z=!)j}XRTES=1?>euSb^fajXeWx zT9r>f^tiCaGYq>r-n}po^Y#qCcNtZa;7p&CdnXzmUkQFFBi8pcFNLKy2XXLW%J3OV zNJqq<-W`S{)1?$vwaIysQA!ox^zySrf^169Vv}@uQPqLSR?Bm_3lAzbbSQm~X%s;h zEHNBcltFaJ2jQ?@%mKJ;Z`C8-UOngp(eEt9aq9OS6T|5G6YBzA9((!8)MtWX-(lfc zt|Nk~VbF(XR4wIHdUg&TwI7s7_GhUc;x(l`-5Re}zrc8Cz3U}m!!{gxpOUMZQgrkD z|K#DG?2G@aWP}cZZ1`slebN-S53izUSIam1 z;*dzBlO0m^h==UYHee^9gx?P*7{fHXpds``V{Ma~@Yue=A5NPc0J`KSo@cIdH;ki* z4*OkK+Cl_msYr>d%)!79G=eOrsAvdu@taV)zr3ppbD+YS9Oz91x!#~bq0G}Qs3;M% zD3Zu{S-^@+HSu5++3y8VBS1wRB#9i`dr%FViu@{*GWgt#jtx$gnx1E}wjVbhv#^ol z*?uUh$D=>~dq%c1Q^8mxq)?1^7K8~|ah_@nvIL0$Haqm=xe z*%3K>jTpN8tW+DM5dYsCZS?D7a*B~~?CmGC`yu*`o!QUTU|)57Zb45>jeKgsZ<#Au zVP-?35>8cy0lSLYUIncVZo(-Mz539;C%ea#{FkEp^4@#lu#m(uZ6yPi3dFsjep7x` zBx5LzlrJ5_$oR&zsSsyjy=X|}9SVInivEa!ED+T&FhD3p+qt06l{$2|s0U11eR7Sf z{O=nH05kt6*7;AqLBj1neS=Wd0NVOFZTa9caw063^f*JijsDA7hm_;|KfMALWebCA9?rcya0QQ2rLf1l3d`{L1RSuOq@rkQ zN~NLaQ?D(8;VZ>Vu=5c$>6i=#k^p+SH*^lQ9JQ+869)EbXCmKnFuKvkjpDEz+$_Yv zt(syTK7Srd04f$m3TEWLkb%18%`H%or}XV=)gNZCa)7kQwOXA_m*dAtoR4Ka3dvA@?+ zn3>bjI0vCySEHVLGl*AD<{W3zExpync(eAFo^^fVUt&7tMqX~wd`m!@Y<2{0fRQcd zR8p=&9!yCBm6B567GdTY!!Jg+)y!emyy^hEeh$6ruA8xpGE%w9`2TD}IEDp*3&e_I z{fsz<{_u8z zZ=aM8RkoU6d1#f}wo2AB3OpK7_TFR;V%v@>0sf)ItDD=u)jA0_6xL?&@r5mQ|5uIW zl>Wp|8@M))9~oS+C@;73L5!;|qYc?#_qyBPlX!2eOJurVr0A3XH4^@F;ZxyPT2vLF zXMasqZAYhPE*=^o;4BF;S4X2A51ZCg4+uH6=}6X+2>(vOI5AfdAZl3;MVDT87>bqP zD1brDpIoR03>Xqn)w?gtNwUvxgBoR(pe>lwiH?q`g}H^M zRDj04bMX~}P{?0ecx@j?3!4ruUhy8D9_K5v*#F^Hh~uJV@@!w>o2T)w+~Dp>HQUO- z>M79h?KL2h$-k4Keai<+N=pE0a;`hih9P}j{`0nkNUAA$UAV0CIAWrjcZ{ovDyJjt zhw4~OL*blNwt4}dLBWOg{c+YuE3FbC9*C>T5@%3Q$n&A4L_LWJMK9UiwW&r&J>}hS zThlV-T{Bz!p)czp)8di}ASh$jd7pJBysW`Nb-yT=*z!DJdWv%sI0;Svr2}W$bz@YP znYjCzfIVqH)ArnEXo~L|`~VCq872pZj{8;rmZu+bres(=vJb;M9aj-zIC#A#t$<51 zyGfsV)C8o?DPx^{abx>ER2};O4nsYdppTm#B|?C%7AWL~y8V&cIUz;=k4)$Rg>6-T za#y%ma1|b45|a1`k~hbb=7lD?*fHfDySDcQEb?_KDolUORlOS1IQt=?SU8*)|W+8iF9#-2&^@xoPRp2aCkTax4f+2zSd# z{~qP>_H5_X{9?@bwB?*{V&Wp*Ys0IanE{u}fhMw4d??9eD6Z{iQgUB)(Bc{8`G-UK zuZRR#T?3<}@v;S2D4tQQ7zX|yqR(NZ;h~+qtb64LBMSx{O_r2QMy6yU`nR=KOMoU0 z#XcSvYhL4aFWd(L%9vEjn0Up1d?C9lO5Bp~n6Abzo_ser<`Vksa|Bb~xO#&8bgWRc<{VBW5JkO&X+;{LN10RKg@CuT!Rs+YTC zuvjX}rsmZr{VcMt%8Wia9@FyltiG5U!!inme;np0=Z+_U4x=o^=|WnDsxM`~z2tsB zMZ~A+eIe_yrfdo!#sGqh>Zc+jmLgH`qu`wn+OIX=i9Fbpu;IYH0w`|k22o1ipH}*! zwPkylJP+QsJFu@o6Y9aKQ{JO~>c!LynyW_kg&Dxt37Vy$ZurwX&i|w?>adBU^Hchg z;77P-CKfeHvMvf)38uA0lh?aG${+Jz!)=D&XG}OuH44jduRii$lRyr>l3N^6l zt;lO(A_})2_|?N+9;5IFXEy=^6VnH{IQH&GgRURPSL?7G64?CQsJdUP@Il^k;U>04 z9}wKa0N&u|U`;00yljuz07yY^O9!~D5RRkteu|InpAuHY@c)A1Bhe4vY*af(`Vm#J z`+>iJJQS82h&~t-WK~AVem+VFwlwRh@#i&J5w3)=+K}4rP*ce;(s2bylvt*=6{~>p zUx7jUJSxf5+^)MUGb$=T5G!j?cVCR8y#Y!_v<|dF5BrZ4YJ0Rg9^L?DBi4DNzDX_1 zE&ZoR<{Ho6$7#1j5j}4+1&h>1UuC?2)?l;ZDv>O_ts``r)Q`|Sfyf71upZHl5B_B8 zUAuSTQ$>e#>br1?G6@VOtOT7-XHY2O$X;sAD>}Q+9kOB!2d#QUdBzoye3BE@q6+{Z z*L%ok5bf_JXCCw)WE#JAXaH06!*?PTqwpz!(CRpZHQ7TFN({lI)dTEAyMZqHXn?(ezb(Mb0ezhE_W*^0k&yb!a9qZ<&j`k_h0jTMzju<&sJCPrk3o0JE+ z#F`j?Gud~KjcbpGfwp-&ak0gTODYVm@)&fv5mr|HHlgc$O|LJH;4ByGlSS5!aa^ub zKpXs+QUD%{Tw^!?52lEIFS6vXsuon-#BgfDl*|K8!o6h(QFvW}ecY7q+K{`Ymu1Zn zbl@ovanaWecxbl1?aVsQ&ZZM1Wjsj>LK^_bZnn*$>t+nnlFY?#D}B z1<6l5ECEgCq9~4wXP{kV@o6$vh#`+CbaVDaAP@{F(O=v+t0_4VA!YQG&~Xvj`~sRE zG||vduVY{Dv6(xcKJM(`eTqlQ{}IKCWA!_VVldXp#686`)7YB85knSp6S%S@cZ`()>jFPWrW#xf-WBsC~4C0EaCe4W&W1XZ3AGaVSYAMNBO}n8h-Vs z7{*MT3?C&z-W*3cMyMiQw98;WFfV4?7VD0NXoqj70gji+XlzFWf}in`uqd}CmuO|s z?N`F7_Ebmfo75Y3(3d<=(jx+B3|Sq$JQo1nKVz~T0&-{Q$Y0c z_P!iqf1zva?tY6BO{UcWti`B?O?cj^Ej}Qvj`#UF<5cqY`kk_Dx}3d4vc3d8oBE~p zxZ0W5Crq(%KtD#P`Dcq+baOoWzj`komng7iAyPSy(K51l_b(}jg7$;rRNsk6n!~7$ zV1G$KB23yDj$xOlA))1FWcZ&plgvV`%ARNB{mSk!X$URn`q?Dy$e?=-I#W zTXDb3;kx%B!A#Q24J`W7O8Z!d$(I~9_3WPvv&bIqt3%xoT`{Q=bm5TAssj0ZIwvKdl^Nd ze}?d9;OVR<5vksuG;w3USqnMK8W!EpuRh*0( z8nr`>{ZzWF(aM0xfs2PN!P8$I3z9RE>(6WgyyF@Zb#r+blN*lBd-g=$k~V_=lLxrH zygpqp7eBG2a$$Qt^Q9o`aEpLb80rpi{ovqx-*Kj@i|$BQeU{{N>K{?CJ_nt0oZgba zD7M=RmHQ?rFrdm@m^SfY{0aDxQkcUzQH_YJE-(9h~m~>iCip<bosdQ=Wzj8Vzg?P7>;z%$-r?H>o@V ztZb5`FK@5o(M0lZ-9x(em&f?UH=8p2LcUCC(df|OyAg}`Xzxqce>j_kn!uK56qeWx z@=H+A9tkkg6kU!y9Hk+2Bho^GhweQIPzV7RF2VQ|KBk*#nF^8GUhDOI!4e1olA=%f zEuhB2&zH@_DPTA2GiNA88UA!XJ4KdHCf~l;mQE<990}+zPiZYV9xXEj2VA(Df2`JF z)Ei$5IxM_lKY`O5>>^{P1Je6b4i-{%Fv!wW%9&$t*3tvn2hsbOgZgH`-?*sDF{h8&>Bm;NLHPAk`H6txG#d0*r@|>a} zTVMJ|z%iTUeFM4EW;^Q=+rD?5f0~@R1+GUn-g6c_jro1CuzVWOH!}xvbcvB1snCxP zk0*~ZrQiGZa-MGMs^Ae|q@Py{@;v%2t?&ibk#D#7QS4xQdZQl=8E&1`i+$>Hjez^_ z@eS}}Zt(gCS7d*!)r(tLmLSGS)(1z^{=+0etyR>mnnlV#Xq4;?cYZ`M^SEL$%U?BtpzFSP04>ESS%@0@zP?XV#~a zxuma7Sytws#h--!yl5STe|57U|9Vn#BCbE6Dm<>={JuPNenQxz*_z@E8?&I6D+rG# z=7s!^h@IoJ8&lchgNdFn*0jo{vX1`jI(X2&kp6(|l_YuQmt(iK>)~uKSM>C+93Do> zv)UO}{`LWue`WQFteP4Y<=>fTHggp_n5nz>?Li~X)myE8HJ=2Q_wGP)_UDM3{U@tNM%s3=v#IS9c|8?S*>-e=nS|m6QOW9KCoz&_xGPm^RXQA}~laY3x z+l!x>`v*wZU2x1G<;w?a#1J)GQsKbNzV9`0TZNVe0i={FBI2E;90DRHmO3GnAL{{5 zMVB{A_|wM8r4rmlWcj6**7PB*KU1uUr`2=>*Kutu{#lJ%?d1|ZMOj%7O<)L!P&s)Egpl7!xlheCqq8G?b(;3XEL1DRbHWVIEmU1!bw8 z-qFb$+B+-?vxg;|sY1#s!`fX%H(&;{$y&2N`3b*^x!nV@7TPrcR-Ag21+I#Enp;SC zwbORmVZ{9OD?aQBW?;ZsEounUo;5EdnW>M-U-ZYF69&G{l;+g&SJE zKC+Ar^k=YzOtxeApW!X2aNVstQVR3)g0rhOo_X=ovmc7-8n0~@E*HLdiu`<*ox-D^ z?)l3boZ2V!b?NzXYuUZ`DsR4{Zv2}n`$hdPldYA*i4&dPpzq_~cBd5`=|f73e*dQ# z>t`K47G88aM>^{onC0-r`rh%`QJc=r5{wVJ5BE(@wToXv7hbTb49`6J^qC|bodCjb z9S!l4E(@1$vX2wWDa^I-a>|g7#&mmrKsh+5G`OKq{II9``;WoYU40kv(pPdv^rvJB zDk555!d+N<^`BZkN&ZFd(^;%erhQ^5z*`TFbcYKvy-z*NPU)^JP>*suI7dweGi!XwSUx%-YhX^xcv$*Mc)Er^I5eUS z^#Y>=wU9IJ)=yqCYB0{gcM@J~7gGp3CdN12*OJ#*GA-eDu4`r{%j)V13BqYJ4Ol~= zBZhcFD90L>3eMr2N}|`wY9?Dpsg`6y{yDSF+zXDxi+~#6IiU4RW#Z{ZZ+zH~)!$C% zHs@$>lY2vG0#_0iUY}KAQ;B6b5I}Kj_ zyN7q$uvq_jH)+)V%gbr6CqI)^C8k@9e!;N5l_mC1JA8cV@HSleVhl@4DSAm!+_nHy zX`@XhKq9;-vqBL~c?`zgIGWhWo!v1gE#ksQB!MkKmQ~qO+5^y3Yjs~wPEKBI_P#x+1gUNGKMbh}|wbB@K*B<+iNmU+AW3LCn|I_Z}f% z3z-&mTH}*YljpDJlWZXY1$?x*S>Q0Kh(O?6@QnV(Xc{r(KUK5ev(^@XRG~@yQ;;QF z3w5xA#!JF@uz)j33>`Nv=g0}L>7*R3-9&gU!l!c4xm#QmocH+nnE$sfOWXWEC<9@m z@aWGU=+Y9ZaugEDve{(+XkJCLQ{l~4p{Ncyr7e7Y&T5UoB1;uOJ1OZ1@`n6bXzXkP zWD-W&(4oRfB%ZsafP#2`X@)w9OboI&fS#+7pvaMN^!vnm{(yC@=g>v>=5F@qY#1lw zq92<{M}OE$fV)8yYc_*Dg8Dj=J>&nj?bIvX((*F}%Q<*=st8Ed3(l zH{07SATDdH0dfQ-1iLLi20&-U#lyyy3H4Fmc=Is{d_$mUf(4`&DCIkK2u3t0N- zqkt7GVs(dcH8E6xCyXWeX39rEY;sIqL^^Z!^Y$Jr#)w#$MkwK+W1JG6G6ftiq|%TH z1xBD*?oR5}3OJ7b)cTTP@T)J%+2@bVU=#++`N^QKYzw;OU=W2+V5nAAayk%&7*{z5 zcXT1WGhv_kFY!`yx<{v!ZlQDmJ+Jx5xg~anB*XTYGJ_R1c&XavCN4Fnb!iK~8gaK$ zpVGHc9}SfqyQ7*3yztM83aO0xA&liyMo_i_v$WP^v%`HTmPT!PS2k zMqXzSfTWmbrZd@WMRi5e=U_@POII zSOr|6rayM)W8%`zyZa%<5Ki)_utH_{K1OeC|JU}pCU5g=d8NKg@--L<9g_xOHjq=LVreNxTYhwjbq9A#`MFnU?Q=ZQc z9D^gt?gwjv2ZWA$OaeG7UH~m zc^F(Hy2s^?pOfam66%!vp%j+i*>ZyoR>A)66gJD8cxAhPFRp>d5?UcLb66A z3Qrr!_l#j2lmAIckkJyd+l_?Rfy%%#`NMs0tHkK<8Z^iM5BeILEdj3vF-r{4^WQX^ zUx|d=5{kqOWj*fyEDWq3l5j41J6Rw*pZxJJmms*1D^=hYKP=EosY)gg+|B;gzwxN! ziok&ZD5IllzH6)=(}-lb7uaQ_m$G27&M!T7?;XC`qfIIl`7sf2&n5OL>-P|oVrOQ} z1V;toVgs}m4HI5&^~0OUy0;_Le~W@r=uH0;N#<@uV#Mv}-_UsoYv@eG`mi zq_qO!6cV2HnVR;TgVCp0Z7N_*KYX0{?L*kq+Fc7u9vAs2`n_Mq+{74qDfa-ehZ#_v zX$#3MaW!}(6x2V6MT}AdepIu#Y4XfYch(`erKaX2kS3Il?3OwcHRu_BM6)C#TbaI0 zuL1}LV4hQdP_nA=y18qypRj4iaI$Y3q{=H z@aUtO7t?^OCA-v^RYl)@T76pG4-31FPVGiISlr-C*RXZcAQs(Qc_azLc9Qw!ip~Op z4#>V98|DzIMR?p&hN>jW!~U)M%`LVs1OK4mJGSX%af4k-C+0VsN5@wNM#_)tgK&@N zZ@z{*+wkHK(%&b4DtJ}KuF1TUAo?6q7RskeK;P`tJgR_#;==9z8L@-ILq#QP zZI%rbMThF|WbEUW_4!L3_M-18y5&s(@-bLtm>+-!$C>0HIrGo6U7u44iT#0b!vLFp zthRuRu@13;J-4CISci!>R(S2$z=C*;Q~oeMkuKen+hKvF<9> zyS|&<#H#ySc5-&4<{Q^*tD0N9=krsGH`N|Npo_CGrBicG_tgBZmPqi=B^&eUak1Vd z`=bSy*KePuBgYh92W66g)?%Hdpy*Xx!v#;5;G*-SX-^1@=FWA#KLa#ZkFb$<&#KxIv z0*Q!Z9s`eZTpAT;43kWtK7({K_6*0naC~0aDfQ+oD5RE?oAx-n5sbnlGke|<@Xn65 zl1iUhpP%@fn7<7D6!~Pap|Y()$^o<)It|;b20XyS-UsD@08VK;gWwq(hRjacbvr)d$acF)R%^l6iY)M4nSMQ@Faaf92&6KI$*F+%?$;nfv zV8GyDQi?#JjNWyby9*au)^7#Fgvo|BelZ1#yc2oumBZUf!RsqZ99?#z%C0T6%fsQW3VMozM1ETgJa*wFjly_ zK1*_4g7ag2lry-v5dc~#^@U;(6}HrT|7h(_9gin|L9z*+vYQwo$N|&T_bQfh{mS-N zKNPWUt;B(RG)3%mZ8=W1z}BqY0==VxDt^_Ck^8Wgx*Mkoa+NlI?Idems%oqr4hCqB zS^|cBy^i@xX!n`i!6NPbf>>p{bnl@)hCL@}g8PAM;4d>Mq=?EnEFqravjV-;hb zU=q35Xmipri~g@yXu!8}5;-%AU{ zmlhOv$fGN3$XuMw&qxOU3KG?GqLk+)n0+z$eZ1{h%T)f(aQ9>o?^z_|<#L!r=qIQ9 z&}QpdPXA0Uvazi>$FEkxp!8pR{+;}#|B&Xq`cF9Rm)gzv`12uP)~EBF?>n%0!j;(v z;w}GXoQUh9qJejM;_*J1?BM>9F6_uU4AGEQ7))HTcmbl--w_wo%o%fB2BHGk6k`6N zK~K>DLBE3IQ=XbAhoW8dI+K=p9=OK52yY$+UFBo>ju)MUf` zntN_Z@p_6O=v}1qxM7!hK2$v6Z1QYWe;v;RpXGcp(rc<+MgKUyT_Jn-UoC)lhq3=* zPA2XIZk}vtc@~98)z!@SxM-{KatDXhzIS3mCEy5SHs26E5q4lMdtfabX`_xAR?KHI z7k3>oh8#hW#pG9Tz&}GwmX>)Kl^2iD0SgZ0lO=9OQQ&B_W2fF*7~15^(D8FF3vB{hJkmDDZ&$I_t2;= z-;ChcaG&!wL#VF=no$gNcW0pFyhs$Jg13;NflC&rS(Rm7XVjO~b-qGQsPa*sbSzeX z0Zms^+f#Xu004Jv>?ZDHtRwwv;0rxbEKnBYsz8QN^fEL}OitcYF@4sipB5SRdV11& zuU3LKJ`n3Y+7R9FwgpI2g<$S|4o|F7c!W&E<>fWm zlDI-!UTSM=O6+HSe0kRUCC%7?v(EVao}QR^7)0bL>uS`j{z5m0ery4Z(l7x=KIlTG zjjY&kr4$p?#2x5ztlH;`Xz^}QtP4~qSTq|#s^p}7SK=HTkz3-bUU=5iV7c1m|8P9+ zaSeK056k=vom`4E9j_W0KK`+`{>!rPRVaX-{jz>c4rgmwytBWD+5gcnd5h5Yapef( zYx6<@WwMH<;Vd}&o0kTdTdc2tGl4fIx~PZ92B5$G-4_wDygvh#nSCOoK8#yAOKyZkaa4O!ikV z56j!q#WkjlID(*^VSfO+nHD-OM4q&dh|Ui2GOXw#D<%#A)q&W@Znw9W(7`??7F8dJ?o)`7+N^&m_B? zQ2QJ-+2zm8U`;_4Rzh>H8~i7%ZfK9*I;W&61qk*v&YIO^^gJx|nOk3v{EN;DKO%Lj zA-iSiQJTz{9DW}@alI92mwxe0E~AxiFHWq6XXh|exF~Q({#&NeOn({+`qZ<@_KIhh z`u{?HX&BYb#&()VC@SVmQ9cxh^U~4LM+sO#qSdMJk;|fhite4eI}lk+N}*)|N}oFAlm30v7O;{t(Xy z9r$D!UbCWODHl0RLGJ1RIdW9iv8f#b)_S|JtgPCw((^}a-a`fDpIfVM{V;RRov!2LuOYemA#76m+$i_RraQfsfdyvA+@3N?7?y~OZ zxp;JPG7JA;7+zWj3Wf?O{u%7KUz?aJT>X7=QliNQ(z{wqwWZMFN+kmnAmKFqfTdQT z(1=|_9IE9}IgzRi+h2PC431!K%Yr^8pjxmjGI+KLWiIlaK~*%O>n5L*Iy$lTq+o6P zN2F-1SPU)KLM>tX4rH0G|9hl`4h$9w)pDYC&M{e4|uGg2}&??Ki5)_u?j z{QQg_!WXzZSpwB{T$~LbhP=d^eW;Gow>}sBf?@Stf(>^8#Q#}U3T@NS#QXg7a%42(*D};jEQs8#_ksXdBam*0r|z?8jy{z>&GWptm`YJkK%IH^f6=cC zxo*ATSQN$yH64B2!{?EEv9uQ{bqd&V=uq zpiAxRPR~o)+Az*V*ro?Dh2fb~%3?fA(5Pi-N&(zv$_Z|Wjben@=C-bhDJ1d!`;$iu zBhhLW>0AzY#xq1a?p9MRPLEt?Ph9C#Tw+;QAn z>TqJb(I~mn9MQb@ep>^bXd<_}^w5&8o8-UDtJ@m-Ou<~=GsbuYe6;+KSsE_wz$n$} zSColrA?)Nfl*OzLH}?cM>BgoG^@_DQ6tN-S>9h3!-=mV~-zUHOQ_`)yjiJjDr4DE8 z^6O@sMswt(-`d!iJvt>WNN{(xIfm2dvVOH#DO6rXM)PUC111778 z`XM6@xG2jQlFm3n2vUvrA(lk>efgcM>N?sBY3WgP8 zm03HHV_BgEjjUdP@Q!GMuKcw+KJw=cYWH?ZAtc1vCN%ztH-glOailPazKA3fLnv?7 znx86}_*85<0d!MVQ_yybmi20qb6t-m zuN;2%;b(Bn7+*fzEZ;`X(S(&^-O#Lpt5Y>6P-v7Suc!ljhI5`7;=6-lW4jSlQ1@TO zrYp*-&zaS2o=$ZRLl0X0s|4&M>>srVY#i*YYX}`c1CwHHk6wF&DmgKtPDhn>C%924IKc_2j)20SNAx0k%%29_{w4P27zTeNvJoA*BrI+T z3Iv=11MYhioSRiGjNZjN%i!0jPmn`}95!!Za4d^8IUFGpmjdU~(Gu{nvQjzcctjRG z#$++zfFXNC)U^473R$kSvs7H#Bnc5pPa zr4xjKYGC;v@sH@5=k)r`Jv-LoU`_SX=LtPZmawtr;ay&5rK@fds6`+Lp1x zWUpPONM28dYX<%{>i46psUU~K`B#_r^OYpU^b})2Y(xo~xn;RW!TLz#N8MIOqB)t3x3otA#2LT|TB+tWPLBAcyAODD7khD{P(|TuqSfS6x z*=mf!!(LS8bMUXU=jwlnU=zo1)$nnlO58mcnPGNE&nbrBE?fxYLvc%N=GL3!DM-X- z48_8OF~FsA<#k5l&x8eI7inP!W-LVPDpzwGJqbmEcz;nWa4^v7l5eCk0Y0aqh^3sgGZ*#6e%656tgR0o3 ziH);hNjg(VkXKreUXDg?>txlF@PnD7Qoewk?cw9pqo1nKy9rCutQKo1zZMJDKzm_QPZEixZq64Wd^+$WjUVRkjblqx_xg>V>*?WVZl{v}CjB0KLQ zCK>6~3M+lyf(;Fgj!sT73t_!WxUu)cN*r;%A|)WRX`_cle~!~A(PYx7)g^{f+|gWa zD0XweEdUgROB>YvTP|DAtjy7|{oz#C zS=l!qUNcS-gI6}NLwMrLr*m>Pp9ROFGWleWn+uNkpCpQgau72Pu*4VyL70S2fu=@+ zG-9Nwzd~_fxfv%45$STnavnDhq1l)0XPwfDrp5hP;mtj3vD-kBsTA7K5UdA z=@Y7wGO9TqPX{ktNJJ@21C=c5f}{8AtMF!`$B++umiEZj(F_$-Exb7^$o7N%0zIYm z8&6OhX-ezl`hG!(*th;_xKZD~7MaO)j{8Bs-|zPH1a`Az{LQ|s3JNzW*xf9~*u!<4 z`2T`evA1E9$a3v%PiCPaGYadmAiwyu&&Z+R z4pjpC-zDgh*vIxy6xD#My7660Zgf4N2hERYCv{wW!h>%?E~(f4LB9fjkY9~q1UY@o z+12cXcvv2BBoR%iT@U&tCx>kQzg3qem%W=SxBnzC!k4MZK@mko)?njNJd6-2V-8dI zDUhAAo^mJ7nE=I?$l9Htq11woTmmbxq zH?=-&Fgaao20;ME&Z(^#QEbs@Ut%}>fQs0GlNNRzLWsoW*G1T=)fz!tW#SeJ=I9Lx z12eC1Wa7ck?V^JxL;oa}!J%_Mv)pBAt({SVg~Y(=tP{&ze!t^1q+p?PGV8lUlbvM8 zrpj;8;`+hr=@Cz{@0HO=EyS-oOR&4; zB458pZyMDwMQm$Fm#rT; z7)vQEBWtGnW%fZR%Lu6Y8silaD?9Jm&x2AucMpFbpGh@KqL?ET2P_O8sN^rV+T$UD zd~VGm%EL7%(c@u*u$cu_oxSwX@f9qk%+frIfV}Q}l+HPpU~G{B40%k7oztZyXwGy+ z`EX^l6udp5_tNt;RovHt=Qx}!SEZoWdX1DXq}aVXqn$Ze&pO%3Uqvz{n24!wevpAP z%AeUl(c4}lg$l(tVj^JA*ot-G%IE!EULWO=5$n1VEpog^f{;dGIu0*7*xO;=mwiY7 zinEN)1Hnv|s5oS*UmXXey^Y4~_ZkbF>Cyo!qbAWcA-7MTE`##F`Sn0c?lnWNPoJ(X zXs~!=y{ePj`zRU_&s!vh=goAS-nrrz{|o~rASR6D7Z^5V%ta)g{NffAo_JonW|`C; ze7wRV!3)>g%XJ4X{qJ7M)mX2?_<;&;jXUw-wqSMd?b*`D8UPDM@#>wxzWV znPbYWnD$U>yt%SxXE&yv?oUfv>O}=u9FFi~ra|Wtc8OGcFj7umo9!=LE-({K;4GDu zmC-jnaXm@U5#?1)rys#s8>H*7`lXRDQzcbc)D@_8%LyR|PdU-z0n)04wmQB1hWi1R zhaN^mZ@MSXrKphH^lk)EDR2L@M&-W0KEpx>%CRnRko9(``1HZ&oUBFD!T=VM)$@M{ z?YFAPi^u$r*NHR#0ke>QU{=Z!)n~U+4?24_LULs7yP4>dsW2Xv4+-JHzuaK3n6r5= zr4G}-9O!$ss$m4QO z1ZrTp8)uk&b*72cDRDz6F<9I|j_)FXDmmS5VQ`?Qocv;Xl?f~4sc4sIEw#jiduK|s z`M=)!7);S3(*=s27h<0jd_HNoSp3hO zuExI-yog+lN>dka#jQ6(lwr?}sh&!}R>(sn2#3;JDF^!ADZ-kLWfm|z_blf}I}PN* z`Yryn`e5EEYOxi3Qf<-Wldx}8QChaczV_#)Q)x?I$G+uUgy5?MHJ53)ziQRfr+tgJi7`I+a49zZhV`VP-Wscr zoLu0rbhjjhKWCRVstimsg zQZ!*#jr=Z_a3pTT9Bm|MT{`14?t4%NKznKA}iA&Wg-%+QU zI{(;`Ph-&O+5-H?BLCgKWEIt`bGpsFhew0gorlxf;CslJXJc^c|KX~?gZ}?~)XSL1 zM0fXcmL&P(%h4~BdDf|mKFWaGTFfHyL$%ETUM%P@b7N=PL`~qy4eaLkqyXPPPrZx( zDXgB{MplG<--7PLp=p$kT2+6RgAJoV`T`7-x}@tfm0HedDmqLT<1ZuXu7^h$1Rr}K zP5NsS41s(yEeOetTsD9J3ceq4k#(RSlOMCb)AHoK!#Jy|>dLP-YPU!SwqInn8cSUl zSjtBWx_iJ|DpfP4J_*IpUrO=Tezo|C3VjyQ#qa}dpeWi|EZv$G1G4g|gx!%`j70)Q zS(sf9i${fg7`PVeIsV9YrrIO;b&~tr>voScbQG{Px!H3sio~4BMH*B%=q;OuOKG$i zUYKw3K-b%56!c^Dk^`w3HbOAgA7WoPl z-T{gAhAzGI(mv@yAG)qA@EHDb7FR2Bx3Ey|FT|+lkk*XX53sZ%UX6+F2P8-W;Bfs- zB2pTP^yY$!1Ewt-Z}Y_$h4f(2L?w=tqHjy!*LtQmptuECzxgWK`*G+{P_&S|?qgTf z6$B{!aYgbBbQqp2B~@Y~*j-HErb;Yia!^soKL2xmqBJsolqp&8!! zNj=M{k!n_lA4J~U^#~&;H+2p4%bfEH19vrAyID82M<###HE0W^yJhKk&o8k|htu4~ zM(2X0#hj@M#yJGc+1Ic4n`DIAoH>(GReMWLvc1ac-i-^|ZG`>0sfCP*e zH~?6mTI4A@RYnUW*dw~;F;5qjjOD(!B;U1_&Brzs}Q9GgI`dq=~?vp+UcbKElE?izxaQ zYdeTc4IkVk5^Rcmi-%!~<6Gcc5S5_d=N~shxq62veB>BeiHe>##@lR6aaj8_(ztUk zBL8K}B&bj3y*q8w;#H&AE2#k1;=_y;J(rf$1*BO7n8T=efhn;QQv^7kGOtT?0hI`< z&8VC9%cM#WaiP`#u&xiMcC+$bY&`zb7f?`Os9jSO@Q^%tRgzHyJY1n_S9a(t*xlz@ z_p|F8!?A-Pk+0G@_j=vXd8OtqQ$p0M z@bWqybpyUl257~uosp*zC#K4rqwe5NLGkx>v361SQHp0@Tk3tP0&s(ZS-0No_qcU6 zujH+owH(EUA7%c_fFrM%c>qCci5K#$-z=2^4{;m`vMCsCBf#UZ`pF;gX-X8qQolWr z5o#|L_4Nc7)ShNsfg;J*&6^UUHRDIVk=Z`o1KTw6xp@uEjw5WU1%Ltl59A=d@Xh9y zJfaG5cE~lVm5Rq)GC`u{ylQ~K;e*!Is z*E4XH!f!7w*fl#H9{|LSm9dEW`1Io=Ju@MWBMxcaWjCq?Yus?vNyoxSmA@VG^HNhx z9JO9@-e>nMo-80wg5Y~ZynKQoI|V*pd0Sf~$HfMlXwD? z^Ae+}@Al#8Qd>Te&ncac8S1R42D&MEreS7Q7NN^ZQ>yGsywr3=)K%y`8=?TUPI8%_DOmplTllqD#(U+Xc_POqi?M4tcB!cE{tUGGAWw4^ zkB#n5r=_N%*=Bc4a#6XY;V+>HfpQMKu|pP1-Xi*}Y~7$NPY7SN|=U2Ak4 z8ph+GP$qcY!SE}r%Xc}sC@X5XIsIoHQ8}j>=NP3V7dqxr0HqfxHt1e*Wo(K^KZ-ih zQal@5ZyQXH7MZB0qRR`~^6jvmUGUdLFM8 zB8wm&8ZF|j7$U2A36>!O#^X;rW^z*YL2RkA+_|A<<=7wDup2F|o2EsZ!GE3xEBmSb zx#4bx;fLo}%2ycdga~z6C=v@s;^U((V!f@UV}eEHW{Sz>HUHyuLuGd%gl*4r_Lw6Z z)bgaYN89X~c^;xDsAw{IymL`bZ3~jdM9bQ6qcd2Rh$VRvd z>96^p8)1!4pX;hi!e#>~MaW{VhH*(5K)*3m$>h)tvcllu=v>1oKyhd<8JyEwI*+ee zNMqN}#@`#CbsJXu`x^=m7kKR|n}?xx;am03Alo%jevof*E)Mg-$p$si4;!Kr zW>PsC`@}U!YnfCW@o{!uZyk`L(8KHFnQ5bO6BHG$xCRVNE(!c z+bu;82=NMb@=goKybiDc47I16t;}j{0{F}|`2ZaZ`+#%skZcFEMEpHR>!jb8G&Q9P z(N7qY+Wm5ai8eM6kF4hrxrGc;5DqyO`Mh;pJ|X0Pcn^_(fPs^G@^FO`qgPtkYA9@!*q0nR#pvq90;o9 zhnLzML%;k+rHHrl{fiaf45_*oki}^s3WDZl-$4s>!t892KAuJ*r>fCssY<$b9hoZV zB7u7muB z#pwocq&!6pTQG?60AKV|)6 z0a#@eH_08Py6YQkmk>QpL3!WElI&TtFpQBq!!dm z`lY3=um27X%q9elDN{qq!8gDY{<<>Pje;C0tmy|`mKfAJ9P`z8R?20gHEDZW zBT9Y*2UR0nP#KR0oyA70Dd`iTlrGq2`^!i8z-Q|ev*2>cW8(hY zl-VZa4MyD@6bllAByy)PLz9z6Xtv#~f-81D}d7MPree!@Ik)a-qMzv(rz{&Vn<$m{E2 z8z&;e#jLDo-Is)`{Xqgbmn^Irz}BCet*0c)w0U9^W`% zQmu)_5J_s{d`Ih2YQuf-TM~XUXS-i)QSGvy4b%?V6Vefu0#cFpNG7Ojw5w{JLhh}rKGslF#bhL11I493Nf!`H{m9tl~0 zTx(+@XIq6tC>jY3h=y3afB*5bSwI-5{j}M6f-(Ok;qp~_ae#}Yx6^YH8Gn76UU_F! zM{Mw0dtL%5>>s1FRlGt%LMkhNwJK{>(p-2+dyO6*Ed;tH*%4I24!06+%=eQB15U8f z44}(qdq-Qt*eWDQbwegM5m$rEv30MveP+{Qtt2Ai<#tX`OTbyX6uLSzkkUo#R6H1k zWZ4s4w|;%MfOeoRGg-|;r?>3=ONX=7g70NKA%UDymr;mN9t`QJ@D=4=y^p&rNvsTn z>ZHV;;+`1gqw!JAtc3!+Nq{>v&ZnJErl6bwEVueZ7>A516sVALD*4^&UR(Gl21{f+{23c7pU)2Lh08wLYJ0QKtUf-T2-YY-M(km;WXyc2-4SXYP9oOFX49hWpGB32|<&5ZG6!$9|-WSbwlvH9M zj+L2KRV_5M>oXH~ZTf<@XR_cuWb3;K?jsq#@|@>pZ}^Y3GOT$zwCE*_=r6uYu8Q3ash zA%(hclT{9#yiOHJQFj~=Jiq!MaCrnGe_zf2s;bWF-a?zg~qItN?Q6S@adjq?zNEfg1D}Y#e;E&HvfFv=qF`m?>$Qvth&(xW#wJt@m{lX!RdXEh6G*F>5Pe| zKGeWgz=EIVbPjmTnT+)ZoX+LDCdQ}@Sv;-LxWrct^JLLatD)c}vb}pMctVNEQTLst zd>U>XEZfFNrtsE&cWO241Dh_2OIHTY7X!QlKxm+?V)z&~X9Ou#W`516zo7mE;}<4o zGbVGg2u^lEfBfmts9w`i%f3DAfZ;NGBy@kEClQ!`!fVmOSk-qDh4v683W9GY1oRB% z<#9Z089&M3=jH7c;HY~Wv5b$grzF~Pb=zP1WgT{rU7WNjcQfX%i}o4^9wzd3$^QVw ztK;hI9P5%UV}037RC#oEE}`??r<`rao1(F{eBHGUdD5d8X}h;3ej1Wie=x%558qC9 z3KD9qcZp^z%;GNN;Cl>a2ZXL3<*2-}xdRcq!QrKe83=*w>Jh5gU=iF>n}Jv1b2Rki z?~({;>bo0g0 zebbUH#ZZ6G@u1?_N7AdEOx=Uc9Z((VZ%ldnN=@yw!X+T);Sf8gSe-Xg3B8wsQUB8u zqRG2_S@bT&F%(BDbXi_f2Bu^>pqw7un2*VgW9c;}+;|7fQc&B2R6jQHyAca{gCpg6^dGxrE^r(u+g$^YaJ(AiWPo*f`vssgU}hdM>})7CI>_xznE$ zgP8aC3MsqT^ces|>+BjICop@62;9V%KHaUJ_hlBC2%cB=mG`Jg;nhXLP}`-lr6pv> zz%G@i)}3w1b^ z*JYW;H1H592&c`}(AK;qvWUNufGqW1lm@cFQ$fb?=%^8d zn0E27#LbkQ9ZaQJ?ssu=*us)spzRu!JiOuO@qn;t$p(VnJ>vQmU)s#lIn6xXX;-(?8> z{!cbkR3cpjvia;nj>56KJS6HUPQx*80^{n?Wo4|VXG^`Jx#i6e0D=I5r_HVH^7hzw z#qQyLPQ3>F3S37y>F-sJ zeq>fLsP_{}uLqO2v@pjT5IrnW_&;qC`W9-SstDq9|Cl4}?IIiUH-)p#HF%l{TYf;Y z+8}=SHSxO=0Y*DI?#vH1Ri~(nL8-RUemVJ6QeM6$jE(rbgWUUAqC6$oV;GI^<41o?^QZ5K*V6oF-Ig>EG#*+$3=?aImtgLgpGy5Jn!-=sm0H;5mc2J?p^JJdVq zj|;^fdc#6~5x%2(@)rd)=QlLcm}q7%I~FqD56sqRC<=bZdIaMoCHe!=$-~44zs~u+ zmn^Uqt0{$GgYBzd696CxIZgwc+_0AURv!l^In(R z>@~!?NkWiGm>-oT6^kkudp2#gWQUZ#m^J)$=K5C7gCALE5QhpD&Tp*xP8M*1K_#vb zpDg%T_lP4=tsv-u!AjS;m1s%F!^Ay_ey*YcAsSjomQ(qm#a8P2lH0~@i=sN zHrVxfoe-pD%_^w-d0p+BDKxQ#)?@F)=bAA6d69(6iiNHpS9jV=Bw?CKxL<@PXAOJw z+HBq zK*18%Ql0yZhTsRlUM~Gr7G6Z9C-DVfBzUVGp&$?3%B4SaN}5SLt@!BOO27xT`+HbY zeR7w~k%O#~J_@9OMVQ=ygm999?LgboM~V~DhvH}p&6B|(3Xi9?8mly+FtmTO`&C!Z zulz=DUno|?xx(^8;u$~VAk3pjNzo?~pFCk-aK+jI9S_Q5JRAePt*{7T& z`5ApZdt#4Z4=L6OvB7aBI|A(lkF zBkBYEcytm2T^%OVnIV?qLd;;k5>2mcPLA3)8!Tj8X$fU>gM!_Ug|BHM`)Xd`e2X8U zI;Ml2uFjcKXV9F2?*+8<)QFzYEqeSu_SgFw4-5l1=o)#sIv>zEUq3Ox z2BD#w2ajtqSJi+g7-uUDVen1Wcwkhl5R?m(*V*9watPXYoo>RBxUsH^rBnAwm z7aTZx4mctgz7*d$+zqZjbi2GWG-88Cg=H=NW%+nI~Vpk+J=7#+$Fod zxf$HoDGv`oD+msEctj@apJ<+nz(a%R%S-AR)5YL0A{I4xsLC;`uv8=IpfqEWH=H36 zXm;nzqb#bV6vhFMnY$-vV+}nr^1jz17XoG#-p(*GEBo%#z0!9V9X1UN8}W)u8uTJBvktp*{vUsP*aSGHS57@F0%G5?9Iw`>sXY|9j zCX3A_5j7#*613zOq=%G7=xT6P^d%EXzW`ZQUsGzU50B_grn@tS*vzD8DxvXV?VL%O zho85ynTetzr0jrW;j@^9o-k9`6cwvt?Z`TA{G+*PL)t+3vk%St`oP(_8vO8TB zv|F&%X6?J$OCJA7+Em(#Y6Nea32As%f1<+WK_{+hp^vO*x3K%h*%7zV)FiB!RqQI{ zh*7#^y5vO=z6Z4@u8wGqPX8+6sjo5O;pB{DY#Mq3!5bD}vRDYJ;lJ6qhrf@5pjL2l zcLlszyX&P|KpQh2BQEB+bugN`(*?qsUgXcqzzFZVyK?Il`?YV65mq`fpC}a0 z9f}?tWs2>(+T}MPdMXqFk{ zv;YFZ0V-maCkpaoL43lmpJH(JGM=~LeG{Fk{?u^mbmPfBmGY}8P7hbLnQDIJZV}Yj z>hkbYF%!A+6TUaj{U#cU z!<$~%Ev__%6)xlzD+F~31Ih{1wCwEddl&y0>HCAg+{R)N&Ko88DEi7{Aux;}%u=_j zOj-S9A!D&x_GT}+nWdi-mX+8bDGgmcT0;Nh;Z!cGpF=4cQWFuCz4mM60_~`DA*%{( z^T(Pxu2=>HAiNAL`yCGg$nz#{wM6ss*Lqi@{+DA%KijbEM`yY~XXA8V^@I0QH*)?8 zZa!?}IWh091-0MaIFF@5G_so`-#q4{yrb0aQ)+TMP_-MTAuf7eg_@sVWDT$xrl0hR zyc>ig@IA=bJd@L?=xtX}YxVt6W1<4>yZ9TCK50!{ZAc&%`QAjIQT%wN)|P#Q9WeA; zx^M(q5&e+IzyAQtRgfY*$Hw@C{^OOQ6dU7y>{4w>ZQw(IssA~{_#;gj=I#Qp$MiRw zmKJe?R!>^VEE9`1b$8ngGe)&cbULnEdyu_GJwX2l0vRQbaOd7x=h3CwI~Q9}=Ad!- z3C6$t3(yaTWh!(x(D9`Pa8;bG2jfIm{o_e_mE>Tu+T=YEC*Aw#c*oa4bIBs<{bnNqm2?IoHivmnrEE81@Ip@G&%nxTYxZD;$GzSudsb3xoATW|W8 z=O)PlH{9pMFzg1MLy%psXuL0QJuWE7r@#92fUb)7q-d}A$cnY;R23-j|6xM>L>f8* zZsWe(3Ysm5+=cHq!_>oW*2fHt$j!9J^nas@9&u|+%izu%mj`?^#5(yKN0+$2P=(#4 zd->#a@-!=4Z#Unt&rpTU>b7C%m|lJjeepG zxqOLdZGg61Aj%0z(NM70XYr}ZD_@1Y7}uqsr`(>1p#L&+<0)>H{q`7@;KU5E7heJJ2GSyFH;B{lmAiRd-KVAj$hAmz7G zugb+EE!?JM5f-7EvR8L1;Rz3jNY@M{l{h}`PUx`fjU82-LPh&*HoR)0WQe%4k-Q}W zNrR+*bZ}IQ8ZG?jui9S|K*JgOUwI7jUI8`9-XWbbMAwZArlu0R?0brVtO9cZXMMlN zUzQ)FvIF7FMZ=_iN3NUt1?47<8DXb48Zh|ilRZ95jh}=Pvt(W3+VV&Y-}KFte^SOh zR+Lu@Mvt|8+pX~IjGBTcHa?-o-&gY;=nEh)Rg%srnB}ZLEYGnSXgAwEU~=CNjcKqq zDGY7B>u3qQrOjgc{hRK&6n3$NEP^3@8Ji$hyD{%FHcqCk5 zpls)hj=enw-BuNu(sTk~K9vfAB}yC>#^}8H52WV@OuS0Gx&+;n9CrEPayTTfM|M79q#7%)v{Hakwm@$5$oqDCV8>JK)TZcJ`&5#n7g;bR$qG-PTln8 zPR#IS1*c(CqJ$S%uRqD6$KsIEMo-ZyN@gSZF<*gk?mvc}pc* z%STrg_u^+TDhp&oVZUsi08{4)o`Tv3C zt*?Qzc%AoV4=GXbs8s{=;38x!C7kr2dr-T3Lqjfi{}zZML|@*P(;kCNA@2UCHBBUsUObgf-E))jordt`ih^l$n0b0KS>2(+sK%ME#wm5I6Z$r$Ir3lHax%)* z>q1*EUDAIpxXw(~NuqJgy)_54Q@1@TV-Bl0;0;VTNx6i@TK2$=S;#i>9wyjUnfXPIJtX@KMBSyw@=e@MiV(gr)b2 zyPp3iRsc(~U)S9gICyyLLV{@d(MpZ-P*n^xPW>)$Zz(o+oYDl_+t|xU_IFLl%mdU+ zaC55y4I)wU;PBn|DQ{m~BZ37@`c|`0Q7Ico_E)=U70JkP`+V_UC;#%R9oer@w)q(x zAhlq%LZHzuCfKur(^>z0t?lTw^JjiDsx(W{K|EHVSQjl;%h_)I(m7oGutYy$$eu)* zn06~-i7bn&|MWTSV+!gaT)9kJbMhTBCHFBljf*FNV^K zJVK3QMld;NRtje_@mDM*c;5-4;jQc(!#mRqc3z74`cFY7;w$%n*ileigCGx#1L)VTPbI5=mQnLsB~FB6vz z8@Eu7FeVbxic^Rtn};-;Tu3>~) ztA8Uo>p#z+tDVx?86YCUZ6s&rrN)ViVq~pIsMk$R9sBVqlYp=-5&!!ikycEfa;3nm zk#7VAXJxsRuwJR^cyHz@+nyGfY5{;Ai9B1h39%}xKR(sXy6|2Y|Fr;NBc5t39N#M5 zs(DhA^SyGQNDc>}(zoz?i$dIO+GVJ|Do429>q&8dvh3E|g`vUBVv));IN_ ziT7Syn0B;PDlS|K3A7OnIBRh zp(P$jC4-4whsfay5FvOF?$w{@y6B@AQ+4B-*d8 ze}M|JYm^=`#PK4LBs2ktWD4uw@(fE)?qCmz5cvr8oQ>(A^mJ96M@qQpEHf1Rp-PJI zwJ5d{daO~7K8iji7O?d`6C@w>ZbHze&-_1?Kxr|s#l)`~?i-@qQ|9)M2t5yxlrr)X4J#EKGV#AH6_6jnd#9?_i#>xT%D zB`^*1XU{jp4Mq+Ay4RnhQKh6vTTkB^Z;*$fqj)gUEcxAUOsm4Ze2YgV0(yX#czGkRtl9WE|wUiXJEUaR;}2DC1Va>eOY4Q&*d8$UHawg;3~cpgMA z?Gp;nsNh+t{`yydp00cp2rG*Nwin!L>24thj5+3|1K^QTXg}SH$$pp5|4rKK%B9K5T!pD=r*|CZ z{Vtt!G|FZW#euhWQkQl)Ruez=WeVbIaUZ!kNUYPq1w{hZy_Nq`*ngM@ZB}@mEAMOKV^fltT)OwcSnJaM8`Pp+7Ap%qSO2=w#}pXx6CsU|)dnzi$Ze7ivJj29 zPw+EQHMB?qli_7D#614R!Oz~ zsife`g_`NxN3F2!?+ zQr1L{0oEQ!Bi(jC3Nt`=DrP>=dE3M5eJd~zr;DWfMLxjKKV2pS)V!^l2>P|gfSXRc zxnXgT&)K|_nWomn@4{t_gsph^)1tr{g-0~i@E&N%Z?6UKb(<8#q8;!BW@+tkQ?Tpw zgR!uF$C=vS;H}#8P>gQb=S_oW6!sKw3&5_5bMV(y{3xGsDc>Rj(yeZiH#~_>(-C>+ zqR{jPVl4H`W@nP_zP8)#y>)0OZMa$I|Bi&d&3|Iq*FSo5UH#w5Xgbtd%P6%G*!t|p zCu5EhBARvvfkB7p;aBipeQ_WH7Hu}*uShL?K5p>7xFXlD@!{got;#x7dBYBrMoq?O zFaIk^yw?kp4mp{GYrWc)km9nsJ%_V{#+wd@7r5~0scaiVE*iLxjHkQjAr{#IF=5wo zRsXpQuE8u-JQb*e6z{L4A7-4)xv6fov2x_~LuS0!-NVe@I<{0v0~_^e;FaEkcLUFpcPW#}93gj0 zV_9ykHy10$)9`QEaGC+y1n=P-o?YAZE?()uvCfCEf6LwvBYaRxetgfowXgAT8Zj(1 z(M!%FqfRNO3UC%Cn>J}gA~MT&cX0oX!9@6<1bz)9a7H+u{b&m0zfy}U7rQ;yXLemi zoF{R2DK&&gG_8o!3sTN-CAAmKk)wFv_?&7E>Up-6mX_b$@zd6gkSp&? zGe+P3Z=44?!=GQGeVOw<*TMB?@0NfiOR_ip+h@b{=zsD~*fb>`4YZ)%hQ`*1MRYz_%SfI~tAsMWrps}X z%WSKaKQvRmSe&uus19t7E%U-frT_r+*F>~1W! z11#w8bOeO>l#J{$3SK;|L{SB${7z#)FgQ&5`!k;s3H&1fNA=+R4TlvN`Q-mr(UYE! z{1Zxn%LKFHWq+H*|B+<5t$$|zzEzKJO3?^Kxp8BV&c5*Ih0O>9aK4EryI)WDCzL}) zOrLjwJo(RcM&s_3A;V-O&-;|g#@1HqeCR;nk6bA#i-BaPE#l24Lw;|}qt~9Yru@FT zEUjpW!g%x*q%GU|o*5+X;`Trp`fo00px6UFNKauz*W)9ZXCVQ9FBD%ZRLYqL{|}@n B-~0do literal 0 HcmV?d00001 diff --git a/examples/natural_robustness/ocr_evaluate/对OCR模型CNN-CTC的鲁棒性评测.md b/examples/natural_robustness/ocr_evaluate/对OCR模型CNN-CTC的鲁棒性评测.md new file mode 100644 index 0000000..97219ff --- /dev/null +++ b/examples/natural_robustness/ocr_evaluate/对OCR模型CNN-CTC的鲁棒性评测.md @@ -0,0 +1,508 @@ +# 对OCR模型CNN-CTC的鲁棒性评测 + +## 概述 + +本教程主要演示利用自然扰动serving服务,对OCR模型CNN-CTC做一个简单的鲁棒性评测。先基于serving生成多种自然扰动样本数据集,然后根据CNN-CTC模型在自然扰动样本数据集上的表现来评估模型的鲁棒性。 + +## 环境要求 + +- 硬件 + + - Ascend或GPU处理器搭建硬件环境。 + +- 依赖 + + - [MindSpore](https://www.mindspore.cn/install) + - indSpore-Serving=1.6.0 + - MindArmour + +## 脚本说明 + +### 代码结构 + +```bash +|-- natural_robustness + |-- serving # 提供自然扰动样本生成的serving服务 + |-- ocr_evaluate + |-- cnn_ctc # cnn_ctc模型相关:模型的训练、推理、前后处理 + |-- data # 存储实验分析数据 + |-- default_config.yaml # 参数配置 + |-- generate_adv_samples.py # 用于生成自然扰动样本 + |-- eval_and_save.py # cnn_ctc在扰动样本上推理,并保存推理结果 + |-- analyse.py # 分析cnn_ctc模型的鲁棒性 +``` + +### 脚本参数 + +在`default_config.yaml`中可以同时配置训练参数、推理参数、鲁棒性评测参数。这里我们重点关注在评测过程中使用到的参数,以及需要用户配置的参数,其余参数说明参考[CNN-CTC教程](https://gitee.com/mindspore/models/tree/master/official/cv/cnnctc)。 + +训练参数: + +- `--TRAIN_DATASET_PATH`:训练数据集的路径。 +- `--TRAIN_DATASET_INDEX_PATH`:决定顺序的训练数据集索引文件的路径。。 +- `--SAVE_PATH`:模型检查点文件保存路径。 + +推理和评估参数: + +- `--TEST_DATASET_PATH`:测试数据集路径 +- `--CHECKPOINT_PATH`:checkpoint路径 +- `--ADV_TEST_DATASET_PATH`:扰动样本数据集路径 +- `--IS_ADV`:是否使用扰动样本进行测试 + +### 模型与数据 + +数据处理与模型训练参考[CNN-CTC教程](https://gitee.com/mindspore/models/tree/master/official/cv/cnnctc)。评测任务需基于该教程获得预处理后的数据集和checkpoint模型文件。 + +#### 模型 + +被评测的模型为基于MindSpore实现的OCR模型CNN-CTC,改模型主要针对场景文字识别(Scene Text Recognition)任务,用CNN模型提取特征,用CTC(Connectionist temporal classification)预测输出序列。具体说明和实现参考[CNN-CTC](https://gitee.com/mindspore/models/tree/master/official/cv/cnnctc)。 + +[论文](https://arxiv.org/abs/1904.01906): J. Baek, G. Kim, J. Lee, S. Park, D. Han, S. Yun, S. J. Oh, and H. Lee, “What is wrong with scene text recognition model comparisons? dataset and model analysis,” ArXiv, vol. abs/1904.01906, 2019. + +#### 数据集 + +训练数据集:[MJSynth](https://www.robots.ox.ac.uk/~vgg/data/text/)和[SynthText](https://github.com/ankush-me/SynthText) + +测试数据集:[The IIIT 5K-word dataset](https://cvit.iiit.ac.in/research/projects/cvit-projects/the-iiit-5k-word-dataset) + +##### 数据集处理: + +- 步骤1: + + 所有数据集均经过预处理,以.lmdb格式存储,点击[**此处**](https://gitee.com/link?target=https%3A%2F%2Fdrive.google.com%2Fdrive%2Ffolders%2F192UfE9agQUMNq6AgU3_E05_FcPZK4hyt)可下载。 + +- 步骤2: + + 解压下载的文件,重命名MJSynth数据集为MJ,SynthText数据集为ST,IIIT数据集为IIIT。 + +- 步骤3: + + 将上述三个数据集移至`cnctc_data`文件夹中,结构如下: + + ``` + |--- CNNCTC/ + |--- cnnctc_data/ + |--- ST/ + data.mdb + lock.mdb + |--- MJ/ + data.mdb + lock.mdb + |--- IIIT/ + data.mdb + lock.mdb + ...... + ``` + +- 步骤4: + + 预处理数据集: + + ```bash + cd ocr_evaluate/cnn_ctc + python src/preprocess_dataset.py + ``` + + 这个过程大概需要75分钟。 + + 预处理后的数据集为.lmdb格式,以键值对方式存储: + + | key | value | + | ----------- | ---------------------- | + | label-%09d | 图片的真实标签 | + | image-%09d | 原始图片数据 | + | num-samples | lmdb数据集中的样本数量 | + + `%09d`为:长度为9的数字串。形如:label-000000001。 + + ##### 模型训练 + + 训练CNN-CTC模型,得到checkpoint文件: + + ```bash + cd ocr_evaluate/cnn_ctc + bash scripts/run_standalone_train_gpu.sh + ``` + +### 基于自然扰动serving生成评测数据集 + +1. 启动自然扰动serving服务。具体说明参考:[ 自然扰动样本生成serving服务](https://gitee.com/mindspore/mindarmour/blob/master/examples/natural_robustness/serving/README.md) + + ```bash + cd serving/server/ + python serving_server.py + ``` + +2. 基于serving服务,生成测评数据集。 + + 1. 在default_config.yaml中配置原来测试样本数据路径`TEST_DATASET_PATH`和生成扰动样本数据集路径`ADV_TEST_DATASET_PATH`。例如: + + ```yaml + TEST_DATASET_PATH: "/opt/dataset/CNNCTC_data/MJ-ST-IIIT/IIIT5k_3000" + ADV_TEST_DATASET_PATH: "/home/mindarmour/examples/natural_robustness/ocr_evaluate/data" + ``` + + 2. 核心代码说明: + + 1. 配置扰动方法,目前可选的扰动方法及参数配置参考[image transform methods](https://gitee.com/mindspore/mindarmour/tree/master/mindarmour/natural_robustness/transform/image)。下面是一个配置例子。 + + ```python + PerturbConfig = [ + {"method": "Contrast", "params": {"alpha": 1.5, "beta": 0}}, + {"method": "GaussianBlur", "params": {"ksize": 5}}, + {"method": "SaltAndPepperNoise", "params": {"factor": 0.05}}, + {"method": "Translate", "params": {"x_bias": 0.1, "y_bias": -0.1}}, + {"method": "Scale", "params": {"factor_x": 0.8, "factor_y": 0.8}}, + {"method": "Shear", "params": {"factor": 1.5, "direction": "horizontal"}}, + {"method": "Rotate", "params": {"angle": 30}}, + {"method": "MotionBlur", "params": {"degree": 5, "angle": 45}}, + {"method": "GradientBlur", "params": {"point": [50, 100], "kernel_num": 3, "center": True}}, + {"method": "GradientLuminance", "params": {"color_start": [255, 255, 255], "color_end": [0, 0, 0], "start_point": [100, 150], "scope": 0.3, "bright_rate": 0.3, "pattern": "light", "mode": "circle"}}, + {"method": "GradientLuminance", "params": {"color_start": [255, 255, 255], "color_end": [0, 0, 0], "start_point": [150, 200], "scope": 0.3, "pattern": "light", "mode": "horizontal"}}, + {"method": "GradientLuminance", "params": {"color_start": [255, 255, 255], "color_end": [0, 0, 0], "start_point": [150, 200], "scope": 0.3, "pattern": "light", "mode": "vertical"}}, + {"method": "Curve", "params": {"curves": 0.5, "depth": 3, "mode": "vertical"}}, + {"method": "Perspective", "params": {"ori_pos": [[0, 0], [0, 800], [800, 0], [800, 800]], "dst_pos": [[10, 0], [0, 800], [790, 0], [800, 800]]}}, + ] + ``` + + 2. 准备需要扰动的数据。 + + ```python + instances = [] + methods_number = 1 + outputs_number = 2 + perturb_config = json.dumps(perturb_config) + + env = lmdb.open(lmdb_paths, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False) + + if not env: + print('cannot create lmdb from %s' % (lmdb_paths)) + sys.exit(0) + with env.begin(write=False) as txn: + n_samples = int(txn.get('num-samples'.encode())) + + # Filtering + filtered_labels = [] + filtered_index_list = [] + for index in range(n_samples): + index += 1 # lmdb starts with 1 + label_key = 'label-%09d'.encode() % index + label = txn.get(label_key).decode('utf-8') + + if len(label) > max_len: continue + illegal_sample = False + for char_item in label.lower(): + if char_item not in config.CHARACTER: + illegal_sample = True + break + if illegal_sample: continue + filtered_labels.append(label) + filtered_index_list.append(index) + img_key = 'image-%09d'.encode() % index + imgbuf = txn.get(img_key) + instances.append({"img": imgbuf, 'perturb_config': perturb_config, "methods_number": methods_number, + "outputs_number": outputs_number}) + + print(f'num of samples in IIIT daaset: {len(filtered_index_list)}') + ``` + + 3. 请求自然扰动serving服务,并保存serving返回的数据 + + ```python + client = Client("10.113.216.54:5500", "perturbation", "natural_perturbation") + start_time = time.time() + result = client.infer(instances) + end_time = time.time() + print('generated natural perturbs images cost: ', end_time - start_time) + env_save = lmdb.open(lmdb_save_path, map_size=1099511627776) + + txn = env.begin(write=False) + with env_save.begin(write=True) as txn_save: + new_index = 1 + for i, index in enumerate(filtered_index_list): + try: + file_names = result[i]['file_names'].split(';') + except: + print('index: ', index) + print(result[i]) + length = result[i]['file_length'].tolist() + before = 0 + label = filtered_labels[i] + label = label.encode() + img_key = 'image-%09d'.encode() % index + ori_img = txn.get(img_key) + names_dict = result[i]['names_dict'] + names_dict = json.loads(names_dict) + for name, leng in zip(file_names, length): + label_key = 'label-%09d'.encode() % new_index + txn_save.put(label_key, label) + img_key = 'image-%09d'.encode() % new_index + adv_img = result[i]['results'] + adv_img = adv_img[before:before + leng] + adv_img_key = 'adv_image-%09d'.encode() % new_index + txn_save.put(img_key, ori_img) + txn_save.put(adv_img_key, adv_img) + + adv_info_key = 'adv_info-%09d'.encode() % new_index + adv_info = json.dumps(names_dict[name]).encode() + txn_save.put(adv_info_key, adv_info) + before = before + leng + new_index += 1 + xn_save.put("num-samples".encode(),str(new_index - 1).encode()) + env.close() + ``` + + + + 3. 执行自然扰动样本生成脚本: + + ```bash + python generate_adv_samples.py + ``` + + 4. 生成的自然扰动数据为.lmdb格式,包含下列数据项: + + | key | value | + | -------------- | ---------------------------- | + | label-%09d | 图片的真实标签 | + | image-%09d | 原始图片数据 | + | adv_image-%09d | 生成的扰动图片数据 | + | adv_info-%09d | 扰动信息,包含扰动方法和参数 | + | num-samples | lmdb数据集中的样本数量 | + +### CNN-CTC模型在生成扰动数据集上推理 + +1. 在default_config.yaml中将测试数据集路径`TEST_DATASET_PATH`设置成和生成扰动样本数据集路径`ADV_TEST_DATASET_PATH`一样的。例如: + + ```yaml + TEST_DATASET_PATH: "/home/mindarmour/examples/natural_robustness/ocr_evaluate/data" + ADV_TEST_DATASET_PATH: "/home/mindarmour/examples/natural_robustness/ocr_evaluate/data" + ``` + +2. 核心脚本说明 + + 1. 加载模型和数据集 + + ```python + ds = test_dataset_creator(is_adv=config.IS_ADV) + net = CNNCTC(config.NUM_CLASS, config.HIDDEN_SIZE, config.FINAL_FEATURE_WIDTH) + + ckpt_path = config.CHECKPOINT_PATH + param_dict = load_checkpoint(ckpt_path) + load_param_into_net(net, param_dict) + print('parameters loaded! from: ', ckpt_path) + ``` + + 2. 推理并保存模型对于原始样本和扰动样本的推理结果。 + + ```python + env_save = lmdb.open(lmdb_save_path, map_size=1099511627776) + with env_save.begin(write=True) as txn_save: + for data in ds.create_tuple_iterator(): + img, _, text, _, length = data + + img_tensor = Tensor(img, mstype.float32) + model_predict = net(img_tensor) + model_predict = np.squeeze(model_predict.asnumpy()) + + preds_size = np.array([model_predict.shape[1]] * config.TEST_BATCH_SIZE) + preds_index = np.argmax(model_predict, 2) + preds_index = np.reshape(preds_index, [-1]) + preds_str = converter.decode(preds_index, preds_size) + label_str = converter.reverse_encode(text.asnumpy(), length.asnumpy()) + + print("Prediction samples: \n", preds_str[:5]) + print("Ground truth: \n", label_str[:5]) + for pred, label in zip(preds_str, label_str): + if pred == label: + correct_count += 1 + count += 1 + if config.IS_ADV: + pred_key = 'adv_pred-%09d'.encode() % count + else: + pred_key = 'pred-%09d'.encode() % count + + txn_save.put(pred_key, pred.encode()) + accuracy = correct_count / count + ``` + +3. 执行eval_and_save.py脚本: + + ```bash + python eval_and_save.py + ``` + + CNN-CTC模型在生成的自然扰动数据集上进行推理,并在`ADV_TEST_DATASET_PATH`中保存模型对于每个样本的推理结果。 + + 数据集中新增数据项: + + | Key | Value | + | ------------- | ---------------------------- | + | pred-%09d | 模型对原始图片数据的预测结果 | + | adv_pred-%09d | 模型对扰动图片数据的预测结果 | + + 模型对于真实样本的预测结果: + + ```bash + Prediction samples: + ['private', 'private', 'parking', 'parking', 'salutes'] + Ground truth: + ['private', 'private', 'parking', 'parking', 'salutes'] + Prediction samples: + ['venus', 'venus', 'its', 'its', 'the'] + Ground truth: + ['venus', 'venus', 'its', 'its', 'the'] + Prediction samples: + ['summer', 'summer', 'joeys', 'joeys', 'think'] + Ground truth: + ['summer', 'summer', 'joes', 'joes', 'think'] + ... + ``` + + 模型对于自然扰动样本的预测结果: + + ```bash + Prediction samples: + ['private', 'private', 'parking', 'parking', 'salutes'] + Ground truth: + ['private', 'private', 'parking', 'parking', 'salutes'] + Prediction samples: + ['dams', 'vares', 'its', 'its', 'the'] + Ground truth: + ['venus', 'venus', 'its', 'its', 'the'] + Prediction samples: + ['sune', 'summer', '', 'joeys', 'think'] + Ground truth: + ['summer', 'summer', 'joes', 'joes', 'think'] + ... + ``` + + 模型在原始测试数据集和自然扰动数据集上的准确率: + + ```bash + num of samples in IIIT dataset: 5952 + Accuracy of benign sample: 0.8546195652173914 + Accuracy of perturbed sample: 0.6126019021739131 + ``` + +### 鲁棒性分析 + +根据CNN-CTC模型在扰动数据集上的表现进行统计分析。运行脚本analyse.py + +```bash +python analyse.py +``` + +分析结果: + +```bash +Number of samples in analyse dataset: 5952 +Accuracy of original dataset: 0.46127717391304346 +Accuracy of adversarial dataset: 0.6126019021739131 +Number of samples correctly predicted in original dataset but wrong in adversarial dataset: 832 +Number of samples both wrong predicted in original and adversarial dataset: 1449 +------------------------------------------------------------------------------ +Method Shear +Number of perturb samples: 442 +Number of wrong predicted: 351 +Number of correctly predicted in origin dataset but wrong in adversarial: 153 +Number of both wrong predicted in origin and adversarial dataset: 198 +------------------------------------------------------------------------------ +Method Contrast +Number of perturb samples: 387 +Number of wrong predicted: 57 +Number of correctly predicted in origin dataset but wrong in adversarial: 8 +Number of both wrong predicted in origin and adversarial dataset: 49 +------------------------------------------------------------------------------ +Method GaussianBlur +Number of perturb samples: 436 +Number of wrong predicted: 181 +Number of correctly predicted in origin dataset but wrong in adversarial: 71 +Number of both wrong predicted in origin and adversarial dataset: 110 +------------------------------------------------------------------------------ +Method MotionBlur +Number of perturb samples: 458 +Number of wrong predicted: 215 +Number of correctly predicted in origin dataset but wrong in adversarial: 92 +Number of both wrong predicted in origin and adversarial dataset: 123 +------------------------------------------------------------------------------ +Method GradientLuminance +Number of perturb samples: 1243 +Number of wrong predicted: 154 +Number of correctly predicted in origin dataset but wrong in adversarial: 4 +Number of both wrong predicted in origin and adversarial dataset: 150 +------------------------------------------------------------------------------ +Method Rotate +Number of perturb samples: 405 +Number of wrong predicted: 298 +Number of correctly predicted in origin dataset but wrong in adversarial: 136 +Number of both wrong predicted in origin and adversarial dataset: 162 +------------------------------------------------------------------------------ +Method SaltAndPepperNoise +Number of perturb samples: 413 +Number of wrong predicted: 116 +Number of correctly predicted in origin dataset but wrong in adversarial: 29 +Number of both wrong predicted in origin and adversarial dataset: 87 +------------------------------------------------------------------------------ +Method Translate +Number of perturb samples: 419 +Number of wrong predicted: 159 +Number of correctly predicted in origin dataset but wrong in adversarial: 57 +Number of both wrong predicted in origin and adversarial dataset: 102 +------------------------------------------------------------------------------ +Method GradientBlur +Number of perturb samples: 440 +Number of wrong predicted: 92 +Number of correctly predicted in origin dataset but wrong in adversarial: 26 +Number of both wrong predicted in origin and adversarial dataset: 66 +------------------------------------------------------------------------------ +Method Perspective +Number of perturb samples: 401 +Number of wrong predicted: 181 +Number of correctly predicted in origin dataset but wrong in adversarial: 75 +Number of both wrong predicted in origin and adversarial dataset: 106 +------------------------------------------------------------------------------ +Method Curve +Number of perturb samples: 410 +Number of wrong predicted: 361 +Number of correctly predicted in origin dataset but wrong in adversarial: 162 +Number of both wrong predicted in origin and adversarial dataset: 199 +------------------------------------------------------------------------------ +Method Scale +Number of perturb samples: 434 +Number of wrong predicted: 116 +Number of correctly predicted in origin dataset but wrong in adversarial: 19 +Number of both wrong predicted in origin and adversarial dataset: 97 +------------------------------------------------------------------------------ +``` + +分析结果包含: + +1. 评测的样本数量:5888 +2. CNN-CTC模型在原数据集上的准确率:85.4% +3. CNN-CTC模型在扰动数据集上的准确率:57.2% +4. 在原图上预测正确,扰动后图片预测错误的 样本数量:1736 +5. 在原图和扰动后图片上均预测错误的样本数量:782 +6. 对于每一个扰动方法,包含样本数量、扰动样本预测错误的数量、原样本预测正确扰动后预测错误的数量、原样本和扰动样本均预测错误的数量。 + +如果模型对某扰动方法扰动后的图片预测错误率较高,则说明CNN-CTC模型对于该方法鲁棒性较差,建议针对性提升,如Rotate、Curve、MotionBlur和Shear这几种扰动方法,大部分扰动后的图片都预测错误,建议进一步分析。 + +同时在`ADV_TEST_DATASET_PATH`路径下生成3个文件夹: + +``` +adv_wrong_pred # 模型对于扰动后图片分类错误的数据集 +ori_corret_adv_wrong_pred # 模型对于原图分类正确但扰动后图片分类错误的数据集 +ori_wrong_adv_wrong_pred # 模型对于原图分类和扰动后图片均分类错误的数据集 +``` + +每个文件夹均按照扰动方法分类: + +![1646730529400](image/catalog.png) + +每张图片的命名格式:真值-预测值.png,如下图: + +![1646812837049](image/name_format.png) + +存储的图片可供进一步分析,是模型质量问题、图片质量问题、还是扰动方法影响图片语义从而导致预测错误。 + +![1646812837049](image/result_demo.png) \ No newline at end of file