| @@ -21,23 +21,24 @@ import json | |||
| import os | |||
| from flask import Blueprint | |||
| from flask import Response | |||
| from flask import jsonify | |||
| from flask import request | |||
| from flask import Response | |||
| from marshmallow import ValidationError | |||
| from mindinsight.conf import settings | |||
| from mindinsight.datavisual.utils.tools import get_train_id, get_profiler_dir, \ | |||
| unquote_args, to_int, get_device_id | |||
| from mindinsight.datavisual.utils.tools import get_train_id, get_profiler_dir, to_int, get_device_id | |||
| from mindinsight.datavisual.utils.tools import unquote_args | |||
| from mindinsight.profiler.analyser.analyser_factory import AnalyserFactory | |||
| from mindinsight.profiler.analyser.minddata_analyser import MinddataAnalyser | |||
| from mindinsight.profiler.common.exceptions.exceptions import ProfilerFileNotFoundException | |||
| from mindinsight.profiler.proposer.compose_proposer import ComposeProposal | |||
| from mindinsight.profiler.common.util import analyse_device_list_from_profiler_dir | |||
| from mindinsight.profiler.common.validator.validate import validate_condition, \ | |||
| validate_ui_proc, validate_minddata_pipeline_condition | |||
| from mindinsight.profiler.common.validator.validate import validate_condition, validate_ui_proc | |||
| from mindinsight.profiler.common.validator.validate import validate_minddata_pipeline_condition | |||
| from mindinsight.profiler.common.validator.validate_path import \ | |||
| validate_and_normalize_profiler_path, validate_and_normalize_path | |||
| validate_and_normalize_path | |||
| from mindinsight.profiler.common.validator.validate_path import validate_and_normalize_profiler_path | |||
| from mindinsight.profiler.proposer.compose_proposer import ComposeProposal | |||
| from mindinsight.utils.exceptions import ParamValueError | |||
| BLUEPRINT = Blueprint("profile", __name__, url_prefix=settings.URL_PREFIX) | |||
| @@ -21,6 +21,9 @@ from mindinsight.profiler.analyser.base_analyser import BaseAnalyser | |||
| class MinddataAnalyser(BaseAnalyser): | |||
| """The Minddata profiling analyser.""" | |||
| DEVICE_QUEUE_EMPTY_WARNING_THRESHOLD = 0.7 | |||
| DEVICE_QUEUE_NOT_EMPTY_THRESHOLD = 0.95 | |||
| def analyse_get_next_info(self, info_type="all"): | |||
| """ | |||
| Analyse the get_next operation info. | |||
| @@ -59,7 +62,7 @@ class MinddataAnalyser(BaseAnalyser): | |||
| one_step_cost_time = (float(node_info[2]) - float(node_info[1]))/1e3 | |||
| time_list.append(one_step_cost_time) | |||
| total_cost += one_step_cost_time | |||
| if info_type in ["all", "time"]: | |||
| if info_type in ["all", "queue"]: | |||
| queue_info["size"] = len(queue_size_list) | |||
| queue_info["info"] = {"queue": queue_size_list} | |||
| queue_info["summary"] = { | |||
| @@ -100,12 +103,12 @@ class MinddataAnalyser(BaseAnalyser): | |||
| queue_size_list = [] | |||
| empty_step, full_step = 0, 0 | |||
| device_queue_file_name = "device_queue_profiling" + self._device_id + ".txt" | |||
| device_queue_file_name = "device_queue_profiling_" + self._device_id + ".txt" | |||
| device_queue_file_path = MinddataAnalyser.find_target_file(self._profiling_dir, device_queue_file_name) | |||
| feed_file_name = "dataset_iterator_profiling_" + self._device_id + ".txt" | |||
| feed_file_path = MinddataAnalyser.find_target_file(self._profiling_dir, feed_file_name) | |||
| if device_queue_file_path: | |||
| file_path = device_queue_file_name | |||
| file_path = device_queue_file_path | |||
| elif not device_queue_file_path and feed_file_path: | |||
| file_path = feed_file_path | |||
| else: | |||
| @@ -169,15 +172,12 @@ class MinddataAnalyser(BaseAnalyser): | |||
| Returns: | |||
| dict, the summary of queue. | |||
| """ | |||
| if not get_next_queue_info and not device_queue_info: | |||
| return {} | |||
| get_next_queue_empty_count = 0 | |||
| if get_next_queue_info: | |||
| if get_next_queue_info and device_queue_info: | |||
| result = {"data_process": {"status": "normal"}, | |||
| "device_queue_op": {"status": "normal"}, | |||
| "tdt": {"status": "normal"}, | |||
| "get_next": {"status": "normal"}} | |||
| get_next_queue_empty_count = get_next_queue_info.get( | |||
| "summary", {}).get("queue_summary", {}).get("empty_queue", 0) | |||
| result["get_next_queue_info"] = { | |||
| @@ -186,27 +186,49 @@ class MinddataAnalyser(BaseAnalyser): | |||
| "total_batch": get_next_queue_info.get("size") | |||
| } | |||
| } | |||
| else: | |||
| device_queue_empty_count = device_queue_info.get( | |||
| "summary", {}).get("queue_summary", {}).get("empty_queue", 0) | |||
| device_queue_full_count = device_queue_info.get( | |||
| "summary", {}).get("queue_summary", {}).get("full_queue", 0) | |||
| result["device_queue_info"] = { | |||
| "summary": { | |||
| "empty_batch_count": device_queue_empty_count, | |||
| "full_batch_count": device_queue_full_count, | |||
| "total_batch": device_queue_info.get("size") | |||
| } | |||
| } | |||
| if get_next_queue_empty_count: | |||
| if device_queue_empty_count > device_queue_info.get("size", 0)*\ | |||
| MinddataAnalyser.DEVICE_QUEUE_EMPTY_WARNING_THRESHOLD: | |||
| result["data_process"]["status"] = "warning" | |||
| elif device_queue_empty_count < device_queue_info.get("size", 0)*\ | |||
| MinddataAnalyser.DEVICE_QUEUE_NOT_EMPTY_THRESHOLD: | |||
| result["tdt"]["status"] = "warning" | |||
| result["device_queue_op"]["status"] = "warning" | |||
| elif device_queue_info and not get_next_queue_info: | |||
| result = {"data_process": {"status": "normal"}, | |||
| "fpbp": {"status": "normal"}} | |||
| device_queue_empty_count = device_queue_info.get( | |||
| "summary", {}).get("queue_summary", {}).get("empty_queue", 0) | |||
| device_queue_full_count = device_queue_info.get( | |||
| "summary", {}).get("queue_summary", {}).get("full_queue", 0) | |||
| result["device_queue_info"] = { | |||
| "summary": { | |||
| "empty_batch_count": device_queue_empty_count, | |||
| "full_batch_count": device_queue_full_count, | |||
| "total_batch": device_queue_info.get("size") | |||
| device_queue_empty_count = device_queue_info.get( | |||
| "summary", {}).get("queue_summary", {}).get("empty_queue", 0) | |||
| device_queue_full_count = device_queue_info.get( | |||
| "summary", {}).get("queue_summary", {}).get("full_queue", 0) | |||
| result["device_queue_info"] = { | |||
| "summary": { | |||
| "empty_batch_count": device_queue_empty_count, | |||
| "full_batch_count": device_queue_full_count, | |||
| "total_batch": device_queue_info.get("size") | |||
| } | |||
| } | |||
| } | |||
| if not get_next_queue_info or (get_next_queue_info and get_next_queue_empty_count == 0): | |||
| if device_queue_empty_count > device_queue_info.get("size", 0)*0.7: | |||
| result["data_process"]["status"] = "warning" | |||
| elif device_queue_empty_count < device_queue_info.get("size", 0)*0.9: | |||
| result["fpbp"]["status"] = "warning" | |||
| else: | |||
| result = {} | |||
| return result | |||
| @@ -14,9 +14,10 @@ | |||
| # ============================================================================ | |||
| """All proposers.""" | |||
| from mindinsight.profiler.proposer.allproposers.common_proposer import CommonProposer | |||
| from mindinsight.profiler.proposer.allproposers.minddata_proposer import MinddataProposer | |||
| from mindinsight.profiler.proposer.allproposers.step_trace_proposer import StepTraceProposer | |||
| from mindinsight.profiler.proposer.allproposers.minddata_pipeline_proposer import \ | |||
| MinddataPipelineProposer | |||
| __all__ = ["CommonProposer", "StepTraceProposer", "MinddataPipelineProposer"] | |||
| __all__ = ["CommonProposer", "StepTraceProposer", "MinddataProposer", "MinddataPipelineProposer"] | |||
| @@ -0,0 +1,84 @@ | |||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||
| # | |||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||
| # you may not use this file except in compliance with the License. | |||
| # You may obtain a copy of the License at | |||
| # | |||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||
| # | |||
| # Unless required by applicable law or agreed to in writing, software | |||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| """The minddata proposer.""" | |||
| from collections import OrderedDict | |||
| from mindinsight.profiler.analyser.analyser_factory import AnalyserFactory | |||
| from mindinsight.profiler.analyser.minddata_analyser import MinddataAnalyser | |||
| from mindinsight.profiler.proposer.allproposers.base_proposer import Proposer | |||
| class MinddataProposer(Proposer): | |||
| """The Minddata proposer.""" | |||
| def __init__(self, profiling_dir, device_id): | |||
| super().__init__(profiling_dir, device_id) | |||
| self.__proposer_type = "minddata" | |||
| self.__proposal_dict = OrderedDict() | |||
| def analyze(self, options=None): | |||
| """ | |||
| Get the proposal from proposer. | |||
| Args: | |||
| options (dict): The options for proposer analysis. | |||
| Returns: | |||
| dict, the proposal from proposer instance,the dictionary key is a language internationalization | |||
| label, and the value is used to format the value in the language internationalization string. | |||
| Examples: | |||
| >>> proposer_type = 'minddata' | |||
| >>> proposer = ProposerFactory.instance().get_proposer(proposer_type, self.profiling_dir, self.device_id) | |||
| >>> result = proposer.analyze(options) | |||
| """ | |||
| self.minddata_outer_bounds_analyze() | |||
| return self.__proposal_dict | |||
| def minddata_outer_bounds_analyze(self): | |||
| """Get the proposals of minddata outer bounds.""" | |||
| minddata_dict = OrderedDict() | |||
| minddata_analyser = AnalyserFactory.instance().get_analyser( | |||
| 'minddata', self.profiling_path, self.device_id) | |||
| get_next_queue_info, _ = minddata_analyser.analyse_get_next_info(info_type="queue") | |||
| device_queue_info, _ = minddata_analyser.analyse_device_queue_info(info_type="queue") | |||
| result = MinddataAnalyser.analyse_queue_summary(get_next_queue_info, device_queue_info) | |||
| if "get_next_queue_info" in result: | |||
| get_next_queue_info_summary = result.get("get_next_queue_info").get("summary", {}) | |||
| empty_batch = get_next_queue_info_summary.get("empty_batch_count") | |||
| total_batch = get_next_queue_info_summary.get("total_batch") | |||
| minddata_dict["minddata_get_next_queue"] = [empty_batch, total_batch] | |||
| self.__proposal_dict.update(minddata_dict) | |||
| if "device_queue_info" in result: | |||
| get_next_queue_info_summary = result.get("device_queue_info").get("summary", {}) | |||
| full_batch = get_next_queue_info_summary.get("full_batch_count", 0) | |||
| empty_batch = get_next_queue_info_summary.get("empty_batch_count", 0) | |||
| total_batch = get_next_queue_info_summary.get("total_batch", 0) | |||
| minddata_dict["minddata_device_queue"] = [empty_batch, total_batch, full_batch, total_batch] | |||
| self.__proposal_dict.update(minddata_dict) | |||
| warning_op = list() | |||
| for key, value in result.items(): | |||
| if isinstance(value, dict): | |||
| status = value.get("status") | |||
| if status == "warning": | |||
| warning_op.append(key) | |||
| if warning_op: | |||
| minddata_dict["minddata_warning_op"] = [",".join(warning_op)] | |||
| self.__proposal_dict.update(minddata_dict) | |||