bugfix and add pillow to requirements.txt modify summary format bugfix use sample_id in summary fix CI problem url encode '/' as well fix ut fix ut fix ut fix uncertainty enable checking fix review comment enhance exception raising enhance commenttags/v1.1.0
| @@ -31,6 +31,7 @@ from mindinsight.datavisual.data_transform.summary_watcher import SummaryWatcher | |||||
| from mindinsight.datavisual.utils.tools import get_train_id | from mindinsight.datavisual.utils.tools import get_train_id | ||||
| from mindinsight.explainer.manager.explain_manager import ExplainManager | from mindinsight.explainer.manager.explain_manager import ExplainManager | ||||
| from mindinsight.explainer.encapsulator.explain_job_encap import ExplainJobEncap | from mindinsight.explainer.encapsulator.explain_job_encap import ExplainJobEncap | ||||
| from mindinsight.explainer.encapsulator.datafile_encap import DatafileEncap | |||||
| from mindinsight.explainer.encapsulator.saliency_encap import SaliencyEncap | from mindinsight.explainer.encapsulator.saliency_encap import SaliencyEncap | ||||
| from mindinsight.explainer.encapsulator.evaluation_encap import EvaluationEncap | from mindinsight.explainer.encapsulator.evaluation_encap import EvaluationEncap | ||||
| @@ -54,12 +55,14 @@ class ExplainManagerHolder: | |||||
| cls.static_instance.start_load_data() | cls.static_instance.start_load_data() | ||||
| def _image_url_formatter(train_id, image_id, image_type): | |||||
| def _image_url_formatter(train_id, image_path, image_type): | |||||
| """Returns image url.""" | """Returns image url.""" | ||||
| train_id = urllib.parse.quote(str(train_id)) | |||||
| image_id = urllib.parse.quote(str(image_id)) | |||||
| image_type = urllib.parse.quote(str(image_type)) | |||||
| return f"{URL_PREFIX}/explainer/image?train_id={train_id}&image_id={image_id}&type={image_type}" | |||||
| data = { | |||||
| "train_id": train_id, | |||||
| "path": image_path, | |||||
| "type": image_type | |||||
| } | |||||
| return f"{URL_PREFIX}/explainer/image?{urllib.parse.urlencode(data)}" | |||||
| def _read_post_request(post_request): | def _read_post_request(post_request): | ||||
| @@ -129,10 +132,10 @@ def query_saliency(): | |||||
| sorted_name = data.get("sorted_name", "") | sorted_name = data.get("sorted_name", "") | ||||
| sorted_type = data.get("sorted_type", "descending") | sorted_type = data.get("sorted_type", "descending") | ||||
| if sorted_name not in ("", "confidence"): | |||||
| raise ParamValueError("sorted_name") | |||||
| if sorted_name not in ("", "confidence", "uncertainty"): | |||||
| raise ParamValueError(f"sorted_name: {sorted_name}, valid options: '' 'confidence' 'uncertainty'") | |||||
| if sorted_type not in ("ascending", "descending"): | if sorted_type not in ("ascending", "descending"): | ||||
| raise ParamValueError("sorted_type") | |||||
| raise ParamValueError(f"sorted_type: {sorted_type}, valid options: 'confidence' 'uncertainty'") | |||||
| encapsulator = SaliencyEncap( | encapsulator = SaliencyEncap( | ||||
| _image_url_formatter, | _image_url_formatter, | ||||
| @@ -170,19 +173,19 @@ def query_image(): | |||||
| train_id = get_train_id(request) | train_id = get_train_id(request) | ||||
| if train_id is None: | if train_id is None: | ||||
| raise ParamMissError("train_id") | raise ParamMissError("train_id") | ||||
| image_id = request.args.get("image_id") | |||||
| if image_id is None: | |||||
| raise ParamMissError("image_id") | |||||
| image_path = request.args.get("path") | |||||
| if image_path is None: | |||||
| raise ParamMissError("path") | |||||
| image_type = request.args.get("type") | image_type = request.args.get("type") | ||||
| if image_type is None: | if image_type is None: | ||||
| raise ParamMissError("type") | raise ParamMissError("type") | ||||
| if image_type not in ("original", "overlay"): | if image_type not in ("original", "overlay"): | ||||
| raise ParamValueError(f"type:{image_type}") | |||||
| raise ParamValueError(f"type:{image_type}, valid options: 'original' 'overlay'") | |||||
| encapsulator = ExplainJobEncap(ExplainManagerHolder.get_instance()) | |||||
| image = encapsulator.query_image_binary(train_id, image_id, image_type) | |||||
| encapsulator = DatafileEncap(ExplainManagerHolder.get_instance()) | |||||
| image = encapsulator.query_image_binary(train_id, image_path, image_type) | |||||
| if image is None: | if image is None: | ||||
| raise ImageNotExistError(f"image_id:{image_id}") | |||||
| raise ImageNotExistError(f"{image_path}") | |||||
| return image | return image | ||||
| @@ -108,35 +108,40 @@ message Summary { | |||||
| message Explain { | message Explain { | ||||
| message Inference{ | message Inference{ | ||||
| repeated float ground_truth_prob = 1; | |||||
| repeated int32 predicted_label = 2; | |||||
| repeated float predicted_prob = 3; | |||||
| repeated float ground_truth_prob = 1; | |||||
| repeated int32 predicted_label = 2; | |||||
| repeated float predicted_prob = 3; | |||||
| repeated float ground_truth_prob_sd = 4; | |||||
| repeated float ground_truth_prob_itl95_low = 5; | |||||
| repeated float ground_truth_prob_itl95_hi = 6; | |||||
| repeated float predicted_prob_sd = 7; | |||||
| repeated float predicted_prob_itl95_low = 8; | |||||
| repeated float predicted_prob_itl95_hi = 9; | |||||
| } | } | ||||
| message Explanation{ | message Explanation{ | ||||
| optional string explain_method = 1; | |||||
| optional int32 label = 2; | |||||
| optional bytes heatmap = 3; | |||||
| } | |||||
| optional string explain_method = 1; | |||||
| optional int32 label = 2; | |||||
| optional string heatmap_path = 3; | |||||
| } | |||||
| message Benchmark{ | message Benchmark{ | ||||
| optional string benchmark_method = 1; | |||||
| optional string explain_method = 2; | |||||
| optional float total_score = 3; | |||||
| repeated float label_score = 4; | |||||
| } | |||||
| optional string benchmark_method = 1; | |||||
| optional string explain_method = 2; | |||||
| optional float total_score = 3; | |||||
| repeated float label_score = 4; | |||||
| } | |||||
| message Metadata{ | message Metadata{ | ||||
| repeated string label = 1; | |||||
| repeated string explain_method = 2; | |||||
| repeated string benchmark_method = 3; | |||||
| } | |||||
| repeated string label = 1; | |||||
| repeated string explain_method = 2; | |||||
| repeated string benchmark_method = 3; | |||||
| } | |||||
| optional string image_id = 1; // The Metadata and image id must have one fill in | |||||
| optional bytes image_data = 2; | |||||
| optional int32 sample_id = 1; // The Metadata and sample id must have one fill in | |||||
| optional string image_path = 2; | |||||
| repeated int32 ground_truth_label = 3; | repeated int32 ground_truth_label = 3; | ||||
| optional Inference inference = 4; | optional Inference inference = 4; | ||||
| repeated Explanation explanation = 5; | repeated Explanation explanation = 5; | ||||
| repeated Benchmark benchmark = 6; | repeated Benchmark benchmark = 6; | ||||
| @@ -19,7 +19,7 @@ DESCRIPTOR = _descriptor.FileDescriptor( | |||||
| package='mindinsight', | package='mindinsight', | ||||
| syntax='proto2', | syntax='proto2', | ||||
| serialized_options=b'\370\001\001', | serialized_options=b'\370\001\001', | ||||
| serialized_pb=b'\n\x19mindinsight_summary.proto\x12\x0bmindinsight\x1a\x18mindinsight_anf_ir.proto\"\xc3\x01\n\x05\x45vent\x12\x11\n\twall_time\x18\x01 \x02(\x01\x12\x0c\n\x04step\x18\x02 \x01(\x03\x12\x11\n\x07version\x18\x03 \x01(\tH\x00\x12,\n\tgraph_def\x18\x04 \x01(\x0b\x32\x17.mindinsight.GraphProtoH\x00\x12\'\n\x07summary\x18\x05 \x01(\x0b\x32\x14.mindinsight.SummaryH\x00\x12\'\n\x07\x65xplain\x18\x06 \x01(\x0b\x32\x14.mindinsight.ExplainH\x00\x42\x06\n\x04what\"\xc0\x04\n\x07Summary\x12)\n\x05value\x18\x01 \x03(\x0b\x32\x1a.mindinsight.Summary.Value\x1aQ\n\x05Image\x12\x0e\n\x06height\x18\x01 \x02(\x05\x12\r\n\x05width\x18\x02 \x02(\x05\x12\x12\n\ncolorspace\x18\x03 \x02(\x05\x12\x15\n\rencoded_image\x18\x04 \x02(\x0c\x1a\xf0\x01\n\tHistogram\x12\x36\n\x07\x62uckets\x18\x01 \x03(\x0b\x32%.mindinsight.Summary.Histogram.bucket\x12\x11\n\tnan_count\x18\x02 \x01(\x03\x12\x15\n\rpos_inf_count\x18\x03 \x01(\x03\x12\x15\n\rneg_inf_count\x18\x04 \x01(\x03\x12\x0b\n\x03max\x18\x05 \x01(\x01\x12\x0b\n\x03min\x18\x06 \x01(\x01\x12\x0b\n\x03sum\x18\x07 \x01(\x01\x12\r\n\x05\x63ount\x18\x08 \x01(\x03\x1a\x34\n\x06\x62ucket\x12\x0c\n\x04left\x18\x01 \x02(\x01\x12\r\n\x05width\x18\x02 \x02(\x01\x12\r\n\x05\x63ount\x18\x03 \x02(\x03\x1a\xc3\x01\n\x05Value\x12\x0b\n\x03tag\x18\x01 \x02(\t\x12\x16\n\x0cscalar_value\x18\x03 \x01(\x02H\x00\x12+\n\x05image\x18\x04 \x01(\x0b\x32\x1a.mindinsight.Summary.ImageH\x00\x12*\n\x06tensor\x18\x08 \x01(\x0b\x32\x18.mindinsight.TensorProtoH\x00\x12\x33\n\thistogram\x18\t \x01(\x0b\x32\x1e.mindinsight.Summary.HistogramH\x00\x42\x07\n\x05value\"\xff\x04\n\x07\x45xplain\x12\x10\n\x08image_id\x18\x01 \x01(\t\x12\x12\n\nimage_data\x18\x02 \x01(\x0c\x12\x1a\n\x12ground_truth_label\x18\x03 \x03(\x05\x12\x31\n\tinference\x18\x04 \x01(\x0b\x32\x1e.mindinsight.Explain.Inference\x12\x35\n\x0b\x65xplanation\x18\x05 \x03(\x0b\x32 .mindinsight.Explain.Explanation\x12\x31\n\tbenchmark\x18\x06 \x03(\x0b\x32\x1e.mindinsight.Explain.Benchmark\x12/\n\x08metadata\x18\x07 \x01(\x0b\x32\x1d.mindinsight.Explain.Metadata\x12\x0e\n\x06status\x18\x08 \x01(\t\x1aW\n\tInference\x12\x19\n\x11ground_truth_prob\x18\x01 \x03(\x02\x12\x17\n\x0fpredicted_label\x18\x02 \x03(\x05\x12\x16\n\x0epredicted_prob\x18\x03 \x03(\x02\x1a\x45\n\x0b\x45xplanation\x12\x16\n\x0e\x65xplain_method\x18\x01 \x01(\t\x12\r\n\x05label\x18\x02 \x01(\x05\x12\x0f\n\x07heatmap\x18\x03 \x01(\x0c\x1ag\n\tBenchmark\x12\x18\n\x10\x62\x65nchmark_method\x18\x01 \x01(\t\x12\x16\n\x0e\x65xplain_method\x18\x02 \x01(\t\x12\x13\n\x0btotal_score\x18\x03 \x01(\x02\x12\x13\n\x0blabel_score\x18\x04 \x03(\x02\x1aK\n\x08Metadata\x12\r\n\x05label\x18\x01 \x03(\t\x12\x16\n\x0e\x65xplain_method\x18\x02 \x03(\t\x12\x18\n\x10\x62\x65nchmark_method\x18\x03 \x03(\tB\x03\xf8\x01\x01' | |||||
| serialized_pb=b'\n\x19mindinsight_summary.proto\x12\x0bmindinsight\x1a\x18mindinsight_anf_ir.proto\"\xc3\x01\n\x05\x45vent\x12\x11\n\twall_time\x18\x01 \x02(\x01\x12\x0c\n\x04step\x18\x02 \x01(\x03\x12\x11\n\x07version\x18\x03 \x01(\tH\x00\x12,\n\tgraph_def\x18\x04 \x01(\x0b\x32\x17.mindinsight.GraphProtoH\x00\x12\'\n\x07summary\x18\x05 \x01(\x0b\x32\x14.mindinsight.SummaryH\x00\x12\'\n\x07\x65xplain\x18\x06 \x01(\x0b\x32\x14.mindinsight.ExplainH\x00\x42\x06\n\x04what\"\xc0\x04\n\x07Summary\x12)\n\x05value\x18\x01 \x03(\x0b\x32\x1a.mindinsight.Summary.Value\x1aQ\n\x05Image\x12\x0e\n\x06height\x18\x01 \x02(\x05\x12\r\n\x05width\x18\x02 \x02(\x05\x12\x12\n\ncolorspace\x18\x03 \x02(\x05\x12\x15\n\rencoded_image\x18\x04 \x02(\x0c\x1a\xf0\x01\n\tHistogram\x12\x36\n\x07\x62uckets\x18\x01 \x03(\x0b\x32%.mindinsight.Summary.Histogram.bucket\x12\x11\n\tnan_count\x18\x02 \x01(\x03\x12\x15\n\rpos_inf_count\x18\x03 \x01(\x03\x12\x15\n\rneg_inf_count\x18\x04 \x01(\x03\x12\x0b\n\x03max\x18\x05 \x01(\x01\x12\x0b\n\x03min\x18\x06 \x01(\x01\x12\x0b\n\x03sum\x18\x07 \x01(\x01\x12\r\n\x05\x63ount\x18\x08 \x01(\x03\x1a\x34\n\x06\x62ucket\x12\x0c\n\x04left\x18\x01 \x02(\x01\x12\r\n\x05width\x18\x02 \x02(\x01\x12\r\n\x05\x63ount\x18\x03 \x02(\x03\x1a\xc3\x01\n\x05Value\x12\x0b\n\x03tag\x18\x01 \x02(\t\x12\x16\n\x0cscalar_value\x18\x03 \x01(\x02H\x00\x12+\n\x05image\x18\x04 \x01(\x0b\x32\x1a.mindinsight.Summary.ImageH\x00\x12*\n\x06tensor\x18\x08 \x01(\x0b\x32\x18.mindinsight.TensorProtoH\x00\x12\x33\n\thistogram\x18\t \x01(\x0b\x32\x1e.mindinsight.Summary.HistogramH\x00\x42\x07\n\x05value\"\xcb\x06\n\x07\x45xplain\x12\x11\n\tsample_id\x18\x01 \x01(\x05\x12\x12\n\nimage_path\x18\x02 \x01(\t\x12\x1a\n\x12ground_truth_label\x18\x03 \x03(\x05\x12\x31\n\tinference\x18\x04 \x01(\x0b\x32\x1e.mindinsight.Explain.Inference\x12\x35\n\x0b\x65xplanation\x18\x05 \x03(\x0b\x32 .mindinsight.Explain.Explanation\x12\x31\n\tbenchmark\x18\x06 \x03(\x0b\x32\x1e.mindinsight.Explain.Benchmark\x12/\n\x08metadata\x18\x07 \x01(\x0b\x32\x1d.mindinsight.Explain.Metadata\x12\x0e\n\x06status\x18\x08 \x01(\t\x1a\x9c\x02\n\tInference\x12\x19\n\x11ground_truth_prob\x18\x01 \x03(\x02\x12\x17\n\x0fpredicted_label\x18\x02 \x03(\x05\x12\x16\n\x0epredicted_prob\x18\x03 \x03(\x02\x12\x1c\n\x14ground_truth_prob_sd\x18\x04 \x03(\x02\x12#\n\x1bground_truth_prob_itl95_low\x18\x05 \x03(\x02\x12\"\n\x1aground_truth_prob_itl95_hi\x18\x06 \x03(\x02\x12\x19\n\x11predicted_prob_sd\x18\x07 \x03(\x02\x12 \n\x18predicted_prob_itl95_low\x18\x08 \x03(\x02\x12\x1f\n\x17predicted_prob_itl95_hi\x18\t \x03(\x02\x1aJ\n\x0b\x45xplanation\x12\x16\n\x0e\x65xplain_method\x18\x01 \x01(\t\x12\r\n\x05label\x18\x02 \x01(\x05\x12\x14\n\x0cheatmap_path\x18\x03 \x01(\t\x1ag\n\tBenchmark\x12\x18\n\x10\x62\x65nchmark_method\x18\x01 \x01(\t\x12\x16\n\x0e\x65xplain_method\x18\x02 \x01(\t\x12\x13\n\x0btotal_score\x18\x03 \x01(\x02\x12\x13\n\x0blabel_score\x18\x04 \x03(\x02\x1aK\n\x08Metadata\x12\r\n\x05label\x18\x01 \x03(\t\x12\x16\n\x0e\x65xplain_method\x18\x02 \x03(\t\x12\x18\n\x10\x62\x65nchmark_method\x18\x03 \x03(\tB\x03\xf8\x01\x01' | |||||
| , | , | ||||
| dependencies=[mindinsight__anf__ir__pb2.DESCRIPTOR,]) | dependencies=[mindinsight__anf__ir__pb2.DESCRIPTOR,]) | ||||
| @@ -389,6 +389,48 @@ _EXPLAIN_INFERENCE = _descriptor.Descriptor( | |||||
| message_type=None, enum_type=None, containing_type=None, | message_type=None, enum_type=None, containing_type=None, | ||||
| is_extension=False, extension_scope=None, | is_extension=False, extension_scope=None, | ||||
| serialized_options=None, file=DESCRIPTOR), | serialized_options=None, file=DESCRIPTOR), | ||||
| _descriptor.FieldDescriptor( | |||||
| name='ground_truth_prob_sd', full_name='mindinsight.Explain.Inference.ground_truth_prob_sd', index=3, | |||||
| number=4, type=2, cpp_type=6, label=3, | |||||
| has_default_value=False, default_value=[], | |||||
| message_type=None, enum_type=None, containing_type=None, | |||||
| is_extension=False, extension_scope=None, | |||||
| serialized_options=None, file=DESCRIPTOR), | |||||
| _descriptor.FieldDescriptor( | |||||
| name='ground_truth_prob_itl95_low', full_name='mindinsight.Explain.Inference.ground_truth_prob_itl95_low', index=4, | |||||
| number=5, type=2, cpp_type=6, label=3, | |||||
| has_default_value=False, default_value=[], | |||||
| message_type=None, enum_type=None, containing_type=None, | |||||
| is_extension=False, extension_scope=None, | |||||
| serialized_options=None, file=DESCRIPTOR), | |||||
| _descriptor.FieldDescriptor( | |||||
| name='ground_truth_prob_itl95_hi', full_name='mindinsight.Explain.Inference.ground_truth_prob_itl95_hi', index=5, | |||||
| number=6, type=2, cpp_type=6, label=3, | |||||
| has_default_value=False, default_value=[], | |||||
| message_type=None, enum_type=None, containing_type=None, | |||||
| is_extension=False, extension_scope=None, | |||||
| serialized_options=None, file=DESCRIPTOR), | |||||
| _descriptor.FieldDescriptor( | |||||
| name='predicted_prob_sd', full_name='mindinsight.Explain.Inference.predicted_prob_sd', index=6, | |||||
| number=7, type=2, cpp_type=6, label=3, | |||||
| has_default_value=False, default_value=[], | |||||
| message_type=None, enum_type=None, containing_type=None, | |||||
| is_extension=False, extension_scope=None, | |||||
| serialized_options=None, file=DESCRIPTOR), | |||||
| _descriptor.FieldDescriptor( | |||||
| name='predicted_prob_itl95_low', full_name='mindinsight.Explain.Inference.predicted_prob_itl95_low', index=7, | |||||
| number=8, type=2, cpp_type=6, label=3, | |||||
| has_default_value=False, default_value=[], | |||||
| message_type=None, enum_type=None, containing_type=None, | |||||
| is_extension=False, extension_scope=None, | |||||
| serialized_options=None, file=DESCRIPTOR), | |||||
| _descriptor.FieldDescriptor( | |||||
| name='predicted_prob_itl95_hi', full_name='mindinsight.Explain.Inference.predicted_prob_itl95_hi', index=8, | |||||
| number=9, type=2, cpp_type=6, label=3, | |||||
| has_default_value=False, default_value=[], | |||||
| message_type=None, enum_type=None, containing_type=None, | |||||
| is_extension=False, extension_scope=None, | |||||
| serialized_options=None, file=DESCRIPTOR), | |||||
| ], | ], | ||||
| extensions=[ | extensions=[ | ||||
| ], | ], | ||||
| @@ -401,8 +443,8 @@ _EXPLAIN_INFERENCE = _descriptor.Descriptor( | |||||
| extension_ranges=[], | extension_ranges=[], | ||||
| oneofs=[ | oneofs=[ | ||||
| ], | ], | ||||
| serialized_start=1145, | |||||
| serialized_end=1232, | |||||
| serialized_start=1147, | |||||
| serialized_end=1431, | |||||
| ) | ) | ||||
| _EXPLAIN_EXPLANATION = _descriptor.Descriptor( | _EXPLAIN_EXPLANATION = _descriptor.Descriptor( | ||||
| @@ -427,9 +469,9 @@ _EXPLAIN_EXPLANATION = _descriptor.Descriptor( | |||||
| is_extension=False, extension_scope=None, | is_extension=False, extension_scope=None, | ||||
| serialized_options=None, file=DESCRIPTOR), | serialized_options=None, file=DESCRIPTOR), | ||||
| _descriptor.FieldDescriptor( | _descriptor.FieldDescriptor( | ||||
| name='heatmap', full_name='mindinsight.Explain.Explanation.heatmap', index=2, | |||||
| number=3, type=12, cpp_type=9, label=1, | |||||
| has_default_value=False, default_value=b"", | |||||
| name='heatmap_path', full_name='mindinsight.Explain.Explanation.heatmap_path', index=2, | |||||
| number=3, type=9, cpp_type=9, label=1, | |||||
| has_default_value=False, default_value=b"".decode('utf-8'), | |||||
| message_type=None, enum_type=None, containing_type=None, | message_type=None, enum_type=None, containing_type=None, | ||||
| is_extension=False, extension_scope=None, | is_extension=False, extension_scope=None, | ||||
| serialized_options=None, file=DESCRIPTOR), | serialized_options=None, file=DESCRIPTOR), | ||||
| @@ -445,8 +487,8 @@ _EXPLAIN_EXPLANATION = _descriptor.Descriptor( | |||||
| extension_ranges=[], | extension_ranges=[], | ||||
| oneofs=[ | oneofs=[ | ||||
| ], | ], | ||||
| serialized_start=1234, | |||||
| serialized_end=1303, | |||||
| serialized_start=1433, | |||||
| serialized_end=1507, | |||||
| ) | ) | ||||
| _EXPLAIN_BENCHMARK = _descriptor.Descriptor( | _EXPLAIN_BENCHMARK = _descriptor.Descriptor( | ||||
| @@ -496,8 +538,8 @@ _EXPLAIN_BENCHMARK = _descriptor.Descriptor( | |||||
| extension_ranges=[], | extension_ranges=[], | ||||
| oneofs=[ | oneofs=[ | ||||
| ], | ], | ||||
| serialized_start=1305, | |||||
| serialized_end=1408, | |||||
| serialized_start=1509, | |||||
| serialized_end=1612, | |||||
| ) | ) | ||||
| _EXPLAIN_METADATA = _descriptor.Descriptor( | _EXPLAIN_METADATA = _descriptor.Descriptor( | ||||
| @@ -540,8 +582,8 @@ _EXPLAIN_METADATA = _descriptor.Descriptor( | |||||
| extension_ranges=[], | extension_ranges=[], | ||||
| oneofs=[ | oneofs=[ | ||||
| ], | ], | ||||
| serialized_start=1410, | |||||
| serialized_end=1485, | |||||
| serialized_start=1614, | |||||
| serialized_end=1689, | |||||
| ) | ) | ||||
| _EXPLAIN = _descriptor.Descriptor( | _EXPLAIN = _descriptor.Descriptor( | ||||
| @@ -552,16 +594,16 @@ _EXPLAIN = _descriptor.Descriptor( | |||||
| containing_type=None, | containing_type=None, | ||||
| fields=[ | fields=[ | ||||
| _descriptor.FieldDescriptor( | _descriptor.FieldDescriptor( | ||||
| name='image_id', full_name='mindinsight.Explain.image_id', index=0, | |||||
| number=1, type=9, cpp_type=9, label=1, | |||||
| has_default_value=False, default_value=b"".decode('utf-8'), | |||||
| name='sample_id', full_name='mindinsight.Explain.sample_id', index=0, | |||||
| number=1, type=5, cpp_type=1, label=1, | |||||
| has_default_value=False, default_value=0, | |||||
| message_type=None, enum_type=None, containing_type=None, | message_type=None, enum_type=None, containing_type=None, | ||||
| is_extension=False, extension_scope=None, | is_extension=False, extension_scope=None, | ||||
| serialized_options=None, file=DESCRIPTOR), | serialized_options=None, file=DESCRIPTOR), | ||||
| _descriptor.FieldDescriptor( | _descriptor.FieldDescriptor( | ||||
| name='image_data', full_name='mindinsight.Explain.image_data', index=1, | |||||
| number=2, type=12, cpp_type=9, label=1, | |||||
| has_default_value=False, default_value=b"", | |||||
| name='image_path', full_name='mindinsight.Explain.image_path', index=1, | |||||
| number=2, type=9, cpp_type=9, label=1, | |||||
| has_default_value=False, default_value=b"".decode('utf-8'), | |||||
| message_type=None, enum_type=None, containing_type=None, | message_type=None, enum_type=None, containing_type=None, | ||||
| is_extension=False, extension_scope=None, | is_extension=False, extension_scope=None, | ||||
| serialized_options=None, file=DESCRIPTOR), | serialized_options=None, file=DESCRIPTOR), | ||||
| @@ -620,7 +662,7 @@ _EXPLAIN = _descriptor.Descriptor( | |||||
| oneofs=[ | oneofs=[ | ||||
| ], | ], | ||||
| serialized_start=846, | serialized_start=846, | ||||
| serialized_end=1485, | |||||
| serialized_end=1689, | |||||
| ) | ) | ||||
| _EVENT.fields_by_name['graph_def'].message_type = mindinsight__anf__ir__pb2._GRAPHPROTO | _EVENT.fields_by_name['graph_def'].message_type = mindinsight__anf__ir__pb2._GRAPHPROTO | ||||
| @@ -34,10 +34,9 @@ class DataManagerStatus(BaseEnum): | |||||
| class PluginNameEnum(BaseEnum): | class PluginNameEnum(BaseEnum): | ||||
| """Plugin Name Enum.""" | """Plugin Name Enum.""" | ||||
| EXPLAIN = 'explain' | EXPLAIN = 'explain' | ||||
| IMAGE_ID = 'image_id' | |||||
| SAMPLE_ID = 'sample_id' | |||||
| BENCHMARK = 'benchmark' | BENCHMARK = 'benchmark' | ||||
| METADATA = 'metadata' | METADATA = 'metadata' | ||||
| IMAGE_DATA = 'image_data' | |||||
| GROUND_TRUTH_LABEL = 'ground_truth_label' | GROUND_TRUTH_LABEL = 'ground_truth_label' | ||||
| INFERENCE = 'inference' | INFERENCE = 'inference' | ||||
| EXPLANATION = 'explanation' | EXPLANATION = 'explanation' | ||||
| @@ -0,0 +1,121 @@ | |||||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||||
| # | |||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| # you may not use this file except in compliance with the License. | |||||
| # You may obtain a copy of the License at | |||||
| # | |||||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||||
| # | |||||
| # Unless required by applicable law or agreed to in writing, software | |||||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| # See the License for the specific language governing permissions and | |||||
| # limitations under the License. | |||||
| # ============================================================================ | |||||
| """Datafile encapsulator.""" | |||||
| import os | |||||
| import io | |||||
| from PIL import Image | |||||
| from PIL import UnidentifiedImageError | |||||
| import numpy as np | |||||
| from mindinsight.utils.exceptions import UnknownError | |||||
| from mindinsight.utils.exceptions import FileSystemPermissionError | |||||
| from mindinsight.datavisual.common.exceptions import ImageNotExistError | |||||
| from mindinsight.explainer.encapsulator.explain_data_encap import ExplainDataEncap | |||||
| # Max uint8 value. for converting RGB pixels to [0,1] intensity. | |||||
| _UINT8_MAX = 255 | |||||
| # Color of low saliency. | |||||
| _SALIENCY_CMAP_LOW = (55, 25, 86, 255) | |||||
| # Color of high saliency. | |||||
| _SALIENCY_CMAP_HI = (255, 255, 0, 255) | |||||
| # Channel modes. | |||||
| _SINGLE_CHANNEL_MODE = "L" | |||||
| _RGBA_MODE = "RGBA" | |||||
| _RGB_MODE = "RGB" | |||||
| _PNG_FORMAT = "PNG" | |||||
| def _clean_train_id_b4_join(train_id): | |||||
| """Clean train_id before joining to a path.""" | |||||
| if train_id.startswith("./") or train_id.startswith(".\\"): | |||||
| return train_id[2:] | |||||
| return train_id | |||||
| class DatafileEncap(ExplainDataEncap): | |||||
| """Datafile encapsulator.""" | |||||
| def query_image_binary(self, train_id, image_path, image_type): | |||||
| """ | |||||
| Query image binary content. | |||||
| Args: | |||||
| train_id (str): Job ID. | |||||
| image_path (str): Image path relative to explain job's summary directory. | |||||
| image_type (str): Image type, 'original' or 'overlay'. | |||||
| Returns: | |||||
| bytes, image binary. | |||||
| """ | |||||
| abs_image_path = os.path.join(self.job_manager.summary_base_dir, | |||||
| _clean_train_id_b4_join(train_id), | |||||
| image_path) | |||||
| if self._is_forbidden(abs_image_path): | |||||
| raise FileSystemPermissionError("Forbidden.") | |||||
| try: | |||||
| if image_type != "overlay": | |||||
| # no need to convert | |||||
| with open(abs_image_path, "rb") as fp: | |||||
| return fp.read() | |||||
| image = Image.open(abs_image_path) | |||||
| if image.mode == _RGBA_MODE: | |||||
| # It is RGBA already, do not convert. | |||||
| with open(abs_image_path, "rb") as fp: | |||||
| return fp.read() | |||||
| except FileNotFoundError: | |||||
| raise ImageNotExistError(image_path) | |||||
| except PermissionError: | |||||
| raise FileSystemPermissionError(image_path) | |||||
| except UnidentifiedImageError: | |||||
| raise UnknownError(f"Invalid image file: {image_path}") | |||||
| if image.mode == _SINGLE_CHANNEL_MODE: | |||||
| saliency = np.asarray(image)/_UINT8_MAX | |||||
| elif image.mode == _RGB_MODE: | |||||
| saliency = np.asarray(image) | |||||
| saliency = saliency[:, :, 0]/_UINT8_MAX | |||||
| else: | |||||
| raise UnknownError(f"Invalid overlay image mode:{image.mode}.") | |||||
| rgba = np.empty((saliency.shape[0], saliency.shape[1], 4)) | |||||
| for c in range(3): | |||||
| rgba[:, :, c] = saliency | |||||
| rgba = rgba * _SALIENCY_CMAP_HI + (1-rgba) * _SALIENCY_CMAP_LOW | |||||
| rgba[:, :, 3] = saliency * _UINT8_MAX | |||||
| overlay = Image.fromarray(np.uint8(rgba), mode=_RGBA_MODE) | |||||
| buffer = io.BytesIO() | |||||
| overlay.save(buffer, format=_PNG_FORMAT) | |||||
| return buffer.getvalue() | |||||
| def _is_forbidden(self, path): | |||||
| """Check if the path is outside summary base dir.""" | |||||
| base_dir = os.path.realpath(self.job_manager.summary_base_dir) | |||||
| path = os.path.realpath(path) | |||||
| return not path.startswith(base_dir) | |||||
| @@ -17,6 +17,7 @@ | |||||
| import copy | import copy | ||||
| from mindinsight.explainer.encapsulator.explain_data_encap import ExplainDataEncap | from mindinsight.explainer.encapsulator.explain_data_encap import ExplainDataEncap | ||||
| from mindinsight.datavisual.common.exceptions import TrainJobNotExistError | |||||
| class EvaluationEncap(ExplainDataEncap): | class EvaluationEncap(ExplainDataEncap): | ||||
| @@ -26,5 +27,5 @@ class EvaluationEncap(ExplainDataEncap): | |||||
| """Query evaluation scores.""" | """Query evaluation scores.""" | ||||
| job = self.job_manager.get_job(train_id) | job = self.job_manager.get_job(train_id) | ||||
| if job is None: | if job is None: | ||||
| return None | |||||
| raise TrainJobNotExistError(train_id) | |||||
| return copy.deepcopy(job.explainer_scores) | return copy.deepcopy(job.explainer_scores) | ||||
| @@ -17,8 +17,8 @@ | |||||
| import copy | import copy | ||||
| from datetime import datetime | from datetime import datetime | ||||
| from mindinsight.utils.exceptions import ParamValueError | |||||
| from mindinsight.explainer.encapsulator.explain_data_encap import ExplainDataEncap | from mindinsight.explainer.encapsulator.explain_data_encap import ExplainDataEncap | ||||
| from mindinsight.datavisual.common.exceptions import TrainJobNotExistError | |||||
| class ExplainJobEncap(ExplainDataEncap): | class ExplainJobEncap(ExplainDataEncap): | ||||
| @@ -34,7 +34,7 @@ class ExplainJobEncap(ExplainDataEncap): | |||||
| offset (int): Page offset. | offset (int): Page offset. | ||||
| limit (int): Max. no. of items to be returned. | limit (int): Max. no. of items to be returned. | ||||
| Returns: | Returns: | ||||
| Tuple[int, List[Dict]], total no. of jobs and job list. | |||||
| tuple[int, list[Dict]], total no. of jobs and job list. | |||||
| """ | """ | ||||
| total, dir_infos = self.job_manager.get_job_list(offset=offset, limit=limit) | total, dir_infos = self.job_manager.get_job_list(offset=offset, limit=limit) | ||||
| job_infos = [self._dir_2_info(dir_info) for dir_info in dir_infos] | job_infos = [self._dir_2_info(dir_info) for dir_info in dir_infos] | ||||
| @@ -47,36 +47,13 @@ class ExplainJobEncap(ExplainDataEncap): | |||||
| Args: | Args: | ||||
| train_id (str): Job ID. | train_id (str): Job ID. | ||||
| Returns: | Returns: | ||||
| Dict, the metadata. | |||||
| dict, the metadata. | |||||
| """ | """ | ||||
| job = self.job_manager.get_job(train_id) | job = self.job_manager.get_job(train_id) | ||||
| if job is None: | if job is None: | ||||
| return None | |||||
| raise TrainJobNotExistError(train_id) | |||||
| return self._job_2_meta(job) | return self._job_2_meta(job) | ||||
| def query_image_binary(self, train_id, image_id, image_type): | |||||
| """ | |||||
| Query image binary content. | |||||
| Args: | |||||
| train_id (str): Job ID. | |||||
| image_id (str): Image ID. | |||||
| image_type (str): Image type, 'original' or 'overlay'. | |||||
| Returns: | |||||
| bytes, image binary. | |||||
| """ | |||||
| job = self.job_manager.get_job(train_id) | |||||
| if job is None: | |||||
| return None | |||||
| if image_type == "original": | |||||
| binary = job.retrieve_image(image_id) | |||||
| elif image_type == "overlay": | |||||
| binary = job.retrieve_overlay(image_id) | |||||
| else: | |||||
| raise ParamValueError(f"image_type:{image_type}") | |||||
| return binary | |||||
| @classmethod | @classmethod | ||||
| def _dir_2_info(cls, dir_info): | def _dir_2_info(cls, dir_info): | ||||
| """Convert ExplainJob object to jsonable info object.""" | """Convert ExplainJob object to jsonable info object.""" | ||||
| @@ -111,5 +88,5 @@ class ExplainJobEncap(ExplainDataEncap): | |||||
| saliency_info["explainers"] = list(job.explainers) | saliency_info["explainers"] = list(job.explainers) | ||||
| saliency_info["metrics"] = list(job.metrics) | saliency_info["metrics"] = list(job.metrics) | ||||
| info["saliency"] = saliency_info | info["saliency"] = saliency_info | ||||
| info["uncertainty"] = {"enabled": False} | |||||
| info["uncertainty"] = {"enabled": job.uncertainty_enabled} | |||||
| return info | return info | ||||
| @@ -16,16 +16,46 @@ | |||||
| import copy | import copy | ||||
| from mindinsight.utils.exceptions import ParamValueError | |||||
| from mindinsight.explainer.encapsulator.explain_data_encap import ExplainDataEncap | from mindinsight.explainer.encapsulator.explain_data_encap import ExplainDataEncap | ||||
| def _sort_key_confidence(sample): | |||||
| def _sort_key_min_confidence(sample): | |||||
| """Samples sort key by the min. confidence.""" | |||||
| min_confidence = float("+inf") | |||||
| for inference in sample["inferences"]: | |||||
| if inference["confidence"] < min_confidence: | |||||
| min_confidence = inference["confidence"] | |||||
| return min_confidence | |||||
| def _sort_key_max_confidence(sample): | |||||
| """Samples sort key by the max. confidence.""" | """Samples sort key by the max. confidence.""" | ||||
| max_confid = None | |||||
| max_confidence = float("-inf") | |||||
| for inference in sample["inferences"]: | |||||
| if inference["confidence"] > max_confidence: | |||||
| max_confidence = inference["confidence"] | |||||
| return max_confidence | |||||
| def _sort_key_min_confidence_sd(sample): | |||||
| """Samples sort key by the min. confidence_sd.""" | |||||
| min_confidence_sd = float("+inf") | |||||
| for inference in sample["inferences"]: | for inference in sample["inferences"]: | ||||
| if max_confid is None or inference["confidence"] > max_confid: | |||||
| max_confid = inference["confidence"] | |||||
| return max_confid | |||||
| confidence_sd = inference.get("confidence_sd", float("+inf")) | |||||
| if confidence_sd < min_confidence_sd: | |||||
| min_confidence_sd = confidence_sd | |||||
| return min_confidence_sd | |||||
| def _sort_key_max_confidence_sd(sample): | |||||
| """Samples sort key by the max. confidence_sd.""" | |||||
| max_confidence_sd = float("-inf") | |||||
| for inference in sample["inferences"]: | |||||
| confidence_sd = inference.get("confidence_sd", float("-inf")) | |||||
| if confidence_sd > max_confidence_sd: | |||||
| max_confidence_sd = confidence_sd | |||||
| return max_confidence_sd | |||||
| class SaliencyEncap(ExplainDataEncap): | class SaliencyEncap(ExplainDataEncap): | ||||
| @@ -47,15 +77,15 @@ class SaliencyEncap(ExplainDataEncap): | |||||
| Query saliency maps. | Query saliency maps. | ||||
| Args: | Args: | ||||
| train_id (str): Job ID. | train_id (str): Job ID. | ||||
| labels (List[str]): Label filter. | |||||
| explainers (List[str]): Explainers of saliency maps to be shown. | |||||
| labels (list[str]): Label filter. | |||||
| explainers (list[str]): Explainers of saliency maps to be shown. | |||||
| limit (int): Max. no. of items to be returned. | limit (int): Max. no. of items to be returned. | ||||
| offset (int): Page offset. | offset (int): Page offset. | ||||
| sorted_name (str): Field to be sorted. | sorted_name (str): Field to be sorted. | ||||
| sorted_type (str): Sorting order, 'ascending' or 'descending'. | sorted_type (str): Sorting order, 'ascending' or 'descending'. | ||||
| Returns: | Returns: | ||||
| Tuple[int, List[dict]], total no. of samples after filtering and | |||||
| tuple[int, list[dict]], total no. of samples after filtering and | |||||
| list of sample result. | list of sample result. | ||||
| """ | """ | ||||
| job = self.job_manager.get_job(train_id) | job = self.job_manager.get_job(train_id) | ||||
| @@ -77,7 +107,19 @@ class SaliencyEncap(ExplainDataEncap): | |||||
| reverse = sorted_type == "descending" | reverse = sorted_type == "descending" | ||||
| if sorted_name == "confidence": | if sorted_name == "confidence": | ||||
| samples.sort(key=_sort_key_confidence, reverse=reverse) | |||||
| if reverse: | |||||
| samples.sort(key=_sort_key_max_confidence, reverse=reverse) | |||||
| else: | |||||
| samples.sort(key=_sort_key_min_confidence, reverse=reverse) | |||||
| elif sorted_name == "uncertainty": | |||||
| if not job.uncertainty_enabled: | |||||
| raise ParamValueError("Uncertainty is not enabled, sorted_name cannot be 'uncertainty'") | |||||
| if reverse: | |||||
| samples.sort(key=_sort_key_max_confidence_sd, reverse=reverse) | |||||
| else: | |||||
| samples.sort(key=_sort_key_min_confidence_sd, reverse=reverse) | |||||
| elif sorted_name != "": | |||||
| raise ParamValueError("sorted_name") | |||||
| sample_infos = [] | sample_infos = [] | ||||
| obj_offset = offset*limit | obj_offset = offset*limit | ||||
| @@ -97,26 +139,23 @@ class SaliencyEncap(ExplainDataEncap): | |||||
| Args: | Args: | ||||
| sample (dict): Sample info. | sample (dict): Sample info. | ||||
| job (ExplainJob): Explain job. | job (ExplainJob): Explain job. | ||||
| explainers (List[str]): Explainer names. | |||||
| explainers (list[str]): Explainer names. | |||||
| Returns: | Returns: | ||||
| Dict, the edited sample info. | |||||
| dict, the edited sample info. | |||||
| """ | """ | ||||
| sample["image"] = self._get_image_url(job.train_id, sample["id"], "original") | |||||
| sample["image"] = self._get_image_url(job.train_id, sample['image'], "original") | |||||
| for inference in sample["inferences"]: | for inference in sample["inferences"]: | ||||
| new_list = [] | new_list = [] | ||||
| for saliency_map in inference["saliency_maps"]: | for saliency_map in inference["saliency_maps"]: | ||||
| if explainers and saliency_map["explainer"] not in explainers: | if explainers and saliency_map["explainer"] not in explainers: | ||||
| continue | continue | ||||
| saliency_map["overlay"] = self._get_image_url(job.train_id, | |||||
| saliency_map["overlay"], | |||||
| "overlay") | |||||
| saliency_map["overlay"] = self._get_image_url(job.train_id, saliency_map['overlay'], "overlay") | |||||
| new_list.append(saliency_map) | new_list.append(saliency_map) | ||||
| inference["saliency_maps"] = new_list | inference["saliency_maps"] = new_list | ||||
| return sample | return sample | ||||
| def _get_image_url(self, train_id, image_id, image_type): | |||||
| def _get_image_url(self, train_id, image_path, image_type): | |||||
| """Returns image's url.""" | """Returns image's url.""" | ||||
| if self._image_url_formatter is None: | if self._image_url_formatter is None: | ||||
| return image_id | |||||
| return self._image_url_formatter(train_id, image_id, image_type) | |||||
| return image_path | |||||
| return self._image_url_formatter(train_id, image_path, image_type) | |||||
| @@ -21,7 +21,7 @@ from mindinsight.explainer.common.log import logger | |||||
| from mindinsight.utils.exceptions import UnknownError | from mindinsight.utils.exceptions import UnknownError | ||||
| _IMAGE_DATA_TAGS = { | _IMAGE_DATA_TAGS = { | ||||
| 'image_data': PluginNameEnum.IMAGE_DATA.value, | |||||
| 'sample_id': PluginNameEnum.SAMPLE_ID.value, | |||||
| 'ground_truth_label': PluginNameEnum.GROUND_TRUTH_LABEL.value, | 'ground_truth_label': PluginNameEnum.GROUND_TRUTH_LABEL.value, | ||||
| 'inference': PluginNameEnum.INFERENCE.value, | 'inference': PluginNameEnum.INFERENCE.value, | ||||
| 'explanation': PluginNameEnum.EXPLANATION.value | 'explanation': PluginNameEnum.EXPLANATION.value | ||||
| @@ -68,7 +68,7 @@ class EventParser: | |||||
| def parse_sample(self, sample: namedtuple) -> Optional[namedtuple]: | def parse_sample(self, sample: namedtuple) -> Optional[namedtuple]: | ||||
| """Parse the sample event.""" | """Parse the sample event.""" | ||||
| sample_id = sample.image_id | |||||
| sample_id = sample.sample_id | |||||
| if sample_id not in self._sample_pool: | if sample_id not in self._sample_pool: | ||||
| self._sample_pool[sample_id] = sample | self._sample_pool[sample_id] = sample | ||||
| @@ -100,12 +100,12 @@ class EventParser: | |||||
| Check whether the image_container is ready for frontend display. | Check whether the image_container is ready for frontend display. | ||||
| Args: | Args: | ||||
| image_container (nametuple): container consists of sample data | |||||
| image_container (namedtuple): container consists of sample data | |||||
| Return: | Return: | ||||
| bool: whether the image_container if ready for display | bool: whether the image_container if ready for display | ||||
| """ | """ | ||||
| required_attrs = ['image_id', 'image_data', 'ground_truth_label', 'inference'] | |||||
| required_attrs = ['image_path', 'ground_truth_label', 'inference'] | |||||
| for attr in required_attrs: | for attr in required_attrs: | ||||
| if not EventParser.is_attr_ready(image_container, attr): | if not EventParser.is_attr_ready(image_container, attr): | ||||
| return False | return False | ||||
| @@ -117,7 +117,7 @@ class EventParser: | |||||
| Check whether the given attribute is ready in image_container. | Check whether the given attribute is ready in image_container. | ||||
| Args: | Args: | ||||
| image_container (nametuple): container consist of sample data | |||||
| image_container (namedtuple): container consist of sample data | |||||
| attr (str): attribute to check | attr (str): attribute to check | ||||
| Returns: | Returns: | ||||
| @@ -141,8 +141,17 @@ class EventParser: | |||||
| def _parse_inference(self, event, sample_id): | def _parse_inference(self, event, sample_id): | ||||
| """Parse the inference event.""" | """Parse the inference event.""" | ||||
| self._sample_pool[sample_id].inference.ground_truth_prob.extend(event.inference.ground_truth_prob) | self._sample_pool[sample_id].inference.ground_truth_prob.extend(event.inference.ground_truth_prob) | ||||
| self._sample_pool[sample_id].inference.ground_truth_prob_sd.extend(event.inference.ground_truth_prob_sd) | |||||
| self._sample_pool[sample_id].inference.ground_truth_prob_itl95_low.\ | |||||
| extend(event.inference.ground_truth_prob_itl95_low) | |||||
| self._sample_pool[sample_id].inference.ground_truth_prob_itl95_hi.\ | |||||
| extend(event.inference.ground_truth_prob_itl95_hi) | |||||
| self._sample_pool[sample_id].inference.predicted_label.extend(event.inference.predicted_label) | self._sample_pool[sample_id].inference.predicted_label.extend(event.inference.predicted_label) | ||||
| self._sample_pool[sample_id].inference.predicted_prob.extend(event.inference.predicted_prob) | self._sample_pool[sample_id].inference.predicted_prob.extend(event.inference.predicted_prob) | ||||
| self._sample_pool[sample_id].inference.predicted_prob_sd.extend(event.inference.predicted_prob_sd) | |||||
| self._sample_pool[sample_id].inference.predicted_prob_itl95_low.extend(event.inference.predicted_prob_itl95_low) | |||||
| self._sample_pool[sample_id].inference.predicted_prob_itl95_hi.extend(event.inference.predicted_prob_itl95_hi) | |||||
| def _parse_explanation(self, event, sample_id): | def _parse_explanation(self, event, sample_id): | ||||
| """Parse the explanation event.""" | """Parse the explanation event.""" | ||||
| @@ -151,7 +160,7 @@ class EventParser: | |||||
| new_explanation = self._sample_pool[sample_id].explanation.add() | new_explanation = self._sample_pool[sample_id].explanation.add() | ||||
| new_explanation.explain_method = explanation_item.explain_method | new_explanation.explain_method = explanation_item.explain_method | ||||
| new_explanation.label = explanation_item.label | new_explanation.label = explanation_item.label | ||||
| new_explanation.heatmap = explanation_item.heatmap | |||||
| new_explanation.heatmap_path = explanation_item.heatmap_path | |||||
| def _parse_sample_info(self, event, sample_id, tag): | def _parse_sample_info(self, event, sample_id, tag): | ||||
| """Parse the event containing image info.""" | """Parse the event containing image info.""" | ||||
| @@ -45,6 +45,7 @@ class ExplainJob: | |||||
| self._event_parser = EventParser(self) | self._event_parser = EventParser(self) | ||||
| self._latest_update_time = latest_update_time | self._latest_update_time = latest_update_time | ||||
| self._create_time = create_time | self._create_time = create_time | ||||
| self._uncertainty_enabled = False | |||||
| self._labels = [] | self._labels = [] | ||||
| self._metrics = [] | self._metrics = [] | ||||
| self._explainers = [] | self._explainers = [] | ||||
| @@ -52,8 +53,6 @@ class ExplainJob: | |||||
| self._labels_info = {} | self._labels_info = {} | ||||
| self._explainer_score_dict = defaultdict(list) | self._explainer_score_dict = defaultdict(list) | ||||
| self._label_score_dict = defaultdict(dict) | self._label_score_dict = defaultdict(dict) | ||||
| self._overlay_dict = {} | |||||
| self._image_dict = {} | |||||
| @property | @property | ||||
| def all_classes(self): | def all_classes(self): | ||||
| @@ -147,6 +146,10 @@ class ExplainJob: | |||||
| """ | """ | ||||
| return None | return None | ||||
| @property | |||||
| def uncertainty_enabled(self): | |||||
| return self._uncertainty_enabled | |||||
| @property | @property | ||||
| def create_time(self): | def create_time(self): | ||||
| """ | """ | ||||
| @@ -220,37 +223,44 @@ class ExplainJob: | |||||
| self._labels_info[label_id] = {'label': label, | self._labels_info[label_id] = {'label': label, | ||||
| 'sample_ids': set()} | 'sample_ids': set()} | ||||
| def _explanation_to_dict(self, explanation, sample_id): | |||||
| def _explanation_to_dict(self, explanation): | |||||
| """Transfer the explanation from event to dict storage.""" | """Transfer the explanation from event to dict storage.""" | ||||
| explainer_name = explanation.explain_method | |||||
| explain_label = explanation.label | |||||
| saliency = explanation.heatmap | |||||
| saliency_id = '{}_{}_{}'.format( | |||||
| sample_id, explain_label, explainer_name) | |||||
| explain_info = { | explain_info = { | ||||
| 'explainer': explainer_name, | |||||
| 'overlay': saliency_id, | |||||
| 'explainer': explanation.explain_method, | |||||
| 'overlay': explanation.heatmap_path, | |||||
| } | } | ||||
| self._overlay_dict[saliency_id] = saliency | |||||
| return explain_info | return explain_info | ||||
| def _image_container_to_dict(self, sample_data): | def _image_container_to_dict(self, sample_data): | ||||
| """Transfer the image container to dict storage.""" | """Transfer the image container to dict storage.""" | ||||
| sample_id = sample_data.image_id | |||||
| has_uncertainty = False | |||||
| sample_id = sample_data.sample_id | |||||
| sample_info = { | sample_info = { | ||||
| 'id': sample_id, | 'id': sample_id, | ||||
| 'name': sample_id, | |||||
| 'image': sample_data.image_path, | |||||
| 'name': str(sample_id), | |||||
| 'labels': [self._labels_info[x]['label'] | 'labels': [self._labels_info[x]['label'] | ||||
| for x in sample_data.ground_truth_label], | for x in sample_data.ground_truth_label], | ||||
| 'inferences': []} | 'inferences': []} | ||||
| self._image_dict[sample_id] = sample_data.image_data | |||||
| ground_truth_labels = list(sample_data.ground_truth_label) | ground_truth_labels = list(sample_data.ground_truth_label) | ||||
| ground_truth_probs = list(sample_data.inference.ground_truth_prob) | ground_truth_probs = list(sample_data.inference.ground_truth_prob) | ||||
| predicted_labels = list(sample_data.inference.predicted_label) | predicted_labels = list(sample_data.inference.predicted_label) | ||||
| predicted_probs = list(sample_data.inference.predicted_prob) | predicted_probs = list(sample_data.inference.predicted_prob) | ||||
| if sample_data.inference.predicted_prob_sd or sample_data.inference.ground_truth_prob_sd: | |||||
| ground_truth_prob_sds = list(sample_data.inference.ground_truth_prob_sd) | |||||
| ground_truth_prob_lows = list(sample_data.inference.ground_truth_prob_itl95_low) | |||||
| ground_truth_prob_his = list(sample_data.inference.ground_truth_prob_itl95_hi) | |||||
| predicted_prob_sds = list(sample_data.inference.predicted_prob_sd) | |||||
| predicted_prob_lows = list(sample_data.inference.predicted_prob_itl95_low) | |||||
| predicted_prob_his = list(sample_data.inference.predicted_prob_itl95_hi) | |||||
| has_uncertainty = True | |||||
| else: | |||||
| ground_truth_prob_sds = ground_truth_prob_lows = ground_truth_prob_his = None | |||||
| predicted_prob_sds = predicted_prob_lows = predicted_prob_his = None | |||||
| inference_info = {} | inference_info = {} | ||||
| for label, prob in zip( | for label, prob in zip( | ||||
| ground_truth_labels + predicted_labels, | ground_truth_labels + predicted_labels, | ||||
| @@ -260,41 +270,31 @@ class ExplainJob: | |||||
| 'confidence': round(prob, _NUM_DIGIT), | 'confidence': round(prob, _NUM_DIGIT), | ||||
| 'saliency_maps': []} | 'saliency_maps': []} | ||||
| if ground_truth_prob_sds or predicted_prob_sds: | |||||
| for label, sd, low, hi in zip( | |||||
| ground_truth_labels + predicted_labels, | |||||
| ground_truth_prob_sds + predicted_prob_sds, | |||||
| ground_truth_prob_lows + predicted_prob_lows, | |||||
| ground_truth_prob_his + predicted_prob_his): | |||||
| inference_info[label]['confidence_sd'] = sd | |||||
| inference_info[label]['confidence_itl95'] = [low, hi] | |||||
| if EventParser.is_attr_ready(sample_data, 'explanation'): | if EventParser.is_attr_ready(sample_data, 'explanation'): | ||||
| for explanation in sample_data.explanation: | for explanation in sample_data.explanation: | ||||
| explanation_dict = self._explanation_to_dict( | |||||
| explanation, sample_id) | |||||
| explanation_dict = self._explanation_to_dict(explanation) | |||||
| inference_info[explanation.label]['saliency_maps'].append(explanation_dict) | inference_info[explanation.label]['saliency_maps'].append(explanation_dict) | ||||
| sample_info['inferences'] = list(inference_info.values()) | sample_info['inferences'] = list(inference_info.values()) | ||||
| return sample_info | |||||
| return sample_info, has_uncertainty | |||||
| def _import_sample(self, sample): | def _import_sample(self, sample): | ||||
| """Add sample object of given sample id.""" | """Add sample object of given sample id.""" | ||||
| for label_id in sample.ground_truth_label: | for label_id in sample.ground_truth_label: | ||||
| self._labels_info[label_id]['sample_ids'].add(sample.image_id) | |||||
| self._labels_info[label_id]['sample_ids'].add(sample.sample_id) | |||||
| sample_info = self._image_container_to_dict(sample) | |||||
| sample_info, has_uncertainty = self._image_container_to_dict(sample) | |||||
| self._samples_info.update({sample_info['id']: sample_info}) | self._samples_info.update({sample_info['id']: sample_info}) | ||||
| def retrieve_image(self, image_id: str): | |||||
| """ | |||||
| Retrieve image data from the job given image_id. | |||||
| Return: | |||||
| string, image data in base64 byte | |||||
| """ | |||||
| return self._image_dict.get(image_id, None) | |||||
| def retrieve_overlay(self, overlay_id: str): | |||||
| """ | |||||
| Retrieve sample map from the job given overlay_id. | |||||
| Return: | |||||
| string, saliency_map data in base64 byte | |||||
| """ | |||||
| return self._overlay_dict.get(overlay_id, None) | |||||
| self._uncertainty_enabled |= has_uncertainty | |||||
| def get_all_samples(self): | def get_all_samples(self): | ||||
| """ | """ | ||||
| @@ -321,7 +321,7 @@ class ExplainJob: | |||||
| def _import_data_from_event(self, event): | def _import_data_from_event(self, event): | ||||
| """Parse and import data from the event data.""" | """Parse and import data from the event data.""" | ||||
| tags = { | tags = { | ||||
| 'image_id': PluginNameEnum.IMAGE_ID, | |||||
| 'sample_id': PluginNameEnum.SAMPLE_ID, | |||||
| 'benchmark': PluginNameEnum.BENCHMARK, | 'benchmark': PluginNameEnum.BENCHMARK, | ||||
| 'metadata': PluginNameEnum.METADATA | 'metadata': PluginNameEnum.METADATA | ||||
| } | } | ||||
| @@ -332,7 +332,7 @@ class ExplainJob: | |||||
| if tag not in event: | if tag not in event: | ||||
| continue | continue | ||||
| if tag == PluginNameEnum.IMAGE_ID.value: | |||||
| if tag == PluginNameEnum.SAMPLE_ID.value: | |||||
| sample_event = event[tag] | sample_event = event[tag] | ||||
| sample_data = self._event_parser.parse_sample(sample_event) | sample_data = self._event_parser.parse_sample(sample_event) | ||||
| if sample_data is not None: | if sample_data is not None: | ||||
| @@ -385,8 +385,6 @@ class ExplainJob: | |||||
| self._labels_info.clear() | self._labels_info.clear() | ||||
| self._explainer_score_dict.clear() | self._explainer_score_dict.clear() | ||||
| self._label_score_dict.clear() | self._label_score_dict.clear() | ||||
| self._overlay_dict.clear() | |||||
| self._image_dict.clear() | |||||
| self._event_parser.clear() | self._event_parser.clear() | ||||
| def _update_benchmark(self, explainer_score_dict, labels_score_dict): | def _update_benchmark(self, explainer_score_dict, labels_score_dict): | ||||
| @@ -47,8 +47,8 @@ class ImageDataContainer: | |||||
| """ | """ | ||||
| def __init__(self, explain_message: Explain): | def __init__(self, explain_message: Explain): | ||||
| self.image_id = explain_message.image_id | |||||
| self.image_data = explain_message.image_data | |||||
| self.sample_id = explain_message.sample_id | |||||
| self.image_path = explain_message.image_path | |||||
| self.ground_truth_label = explain_message.ground_truth_label | self.ground_truth_label = explain_message.ground_truth_label | ||||
| self.inference = explain_message.inference | self.inference = explain_message.inference | ||||
| self.explanation = explain_message.explanation | self.explanation = explain_message.explanation | ||||
| @@ -153,7 +153,7 @@ class _ExplainParser(_SummaryParser): | |||||
| logger.debug("Deserialize event string completed.") | logger.debug("Deserialize event string completed.") | ||||
| fields = { | fields = { | ||||
| 'image_id': PluginNameEnum.IMAGE_ID, | |||||
| 'sample_id': PluginNameEnum.SAMPLE_ID, | |||||
| 'benchmark': PluginNameEnum.BENCHMARK, | 'benchmark': PluginNameEnum.BENCHMARK, | ||||
| 'metadata': PluginNameEnum.METADATA | 'metadata': PluginNameEnum.METADATA | ||||
| } | } | ||||
| @@ -170,7 +170,7 @@ class _ExplainParser(_SummaryParser): | |||||
| continue | continue | ||||
| tensor_value = None | tensor_value = None | ||||
| if field == PluginNameEnum.IMAGE_ID.value: | |||||
| if field == PluginNameEnum.SAMPLE_ID.value: | |||||
| tensor_value = _ExplainParser._add_image_data(tensor_event_value) | tensor_value = _ExplainParser._add_image_data(tensor_event_value) | ||||
| elif field == PluginNameEnum.BENCHMARK.value: | elif field == PluginNameEnum.BENCHMARK.value: | ||||
| tensor_value = _ExplainParser._add_benchmark(tensor_event_value) | tensor_value = _ExplainParser._add_benchmark(tensor_event_value) | ||||
| @@ -184,7 +184,7 @@ class _ExplainParser(_SummaryParser): | |||||
| @staticmethod | @staticmethod | ||||
| def _add_image_data(tensor_event_value): | def _add_image_data(tensor_event_value): | ||||
| """ | """ | ||||
| Parse image data based on image_id in Explain message | |||||
| Parse image data based on sample_id in Explain message | |||||
| Args: | Args: | ||||
| tensor_event_value: the object of Explain message | tensor_event_value: the object of Explain message | ||||
| @@ -8,6 +8,7 @@ Jinja2>=2.10.1 | |||||
| MarkupSafe>=1.1.1 | MarkupSafe>=1.1.1 | ||||
| marshmallow>=2.19.2 | marshmallow>=2.19.2 | ||||
| numpy>=1.17.0 | numpy>=1.17.0 | ||||
| pillow>=6.2.0 | |||||
| protobuf>=3.8.0 | protobuf>=3.8.0 | ||||
| psutil>=5.6.1 | psutil>=5.6.1 | ||||
| pyyaml>=5.3.1 | pyyaml>=5.3.1 | ||||
| @@ -20,6 +20,7 @@ from unittest.mock import patch | |||||
| from mindinsight.explainer.encapsulator.explain_job_encap import ExplainJobEncap | from mindinsight.explainer.encapsulator.explain_job_encap import ExplainJobEncap | ||||
| from mindinsight.explainer.encapsulator.saliency_encap import SaliencyEncap | from mindinsight.explainer.encapsulator.saliency_encap import SaliencyEncap | ||||
| from mindinsight.explainer.encapsulator.evaluation_encap import EvaluationEncap | from mindinsight.explainer.encapsulator.evaluation_encap import EvaluationEncap | ||||
| from mindinsight.explainer.encapsulator.datafile_encap import DatafileEncap | |||||
| from .conftest import EXPLAINER_ROUTES | from .conftest import EXPLAINER_ROUTES | ||||
| @@ -182,13 +183,13 @@ class TestExplainerApi: | |||||
| expect_result = {"explainer_scores": explainer_scores} | expect_result = {"explainer_scores": explainer_scores} | ||||
| assert response.get_json() == expect_result | assert response.get_json() == expect_result | ||||
| @patch.object(ExplainJobEncap, "query_image_binary") | |||||
| @patch.object(DatafileEncap, "query_image_binary") | |||||
| def test_query_image(self, mock_query_image_binary, client): | def test_query_image(self, mock_query_image_binary, client): | ||||
| """Test query a image's binary content.""" | """Test query a image's binary content.""" | ||||
| mock_query_image_binary.return_value = b'123' | mock_query_image_binary.return_value = b'123' | ||||
| response = client.get(f"{EXPLAINER_ROUTES['image']}?train_id=.%2Fmock_job_1&image_id=1&type=original") | |||||
| response = client.get(f"{EXPLAINER_ROUTES['image']}?train_id=.%2Fmock_job_1&path=1&type=original") | |||||
| assert response.status_code == 200 | assert response.status_code == 200 | ||||
| assert response.data == b'123' | assert response.data == b'123' | ||||
| @@ -30,6 +30,7 @@ class MockExplainJob: | |||||
| self.min_confidence = 0.5 | self.min_confidence = 0.5 | ||||
| self.explainers = ["Gradient"] | self.explainers = ["Gradient"] | ||||
| self.metrics = ["Localization"] | self.metrics = ["Localization"] | ||||
| self.uncertainty_enabled = False | |||||
| self.all_classes = [ | self.all_classes = [ | ||||
| { | { | ||||
| "id": 0, | "id": 0, | ||||
| @@ -77,6 +78,7 @@ class MockExplainJob: | |||||
| sample = { | sample = { | ||||
| "id": "123", | "id": "123", | ||||
| "name": "123", | "name": "123", | ||||
| "image": "123", | |||||
| "labels": ["car"], | "labels": ["car"], | ||||
| "inferences": [ | "inferences": [ | ||||
| { | { | ||||
| @@ -41,13 +41,3 @@ class TestExplainJobEncap: | |||||
| job = self.encapsulator.query_meta("./mock_job_1") | job = self.encapsulator.query_meta("./mock_job_1") | ||||
| assert job is not None | assert job is not None | ||||
| assert job["train_id"] == "./mock_job_1" | assert job["train_id"] == "./mock_job_1" | ||||
| def test_query_image_binary(self): | |||||
| """Test query images' binary content.""" | |||||
| image = self.encapsulator.query_image_binary("./mock_job_1", "1", "original") | |||||
| assert image is not None | |||||
| assert image == b'123' | |||||
| image = self.encapsulator.query_image_binary("./mock_job_1", "4", "overlay") | |||||
| assert image is not None | |||||
| assert image == b'456' | |||||
| @@ -18,9 +18,9 @@ from mindinsight.explainer.encapsulator.saliency_encap import SaliencyEncap | |||||
| from .mock_explain_manager import MockExplainManager | from .mock_explain_manager import MockExplainManager | ||||
| def _image_url_formatter(_, image_id, image_type): | |||||
| def _image_url_formatter(_, image_path, image_type): | |||||
| """Return image url.""" | """Return image url.""" | ||||
| return f"{image_type}-{image_id}" | |||||
| return f"{image_type}-{image_path}" | |||||
| class TestEvaluationEncap: | class TestEvaluationEncap: | ||||