You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

hierarchical_occlusion_encap.py 4.2 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Hierarchical Occlusion encapsulator."""
  16. from mindinsight.datavisual.common.exceptions import TrainJobNotExistError
  17. from mindinsight.explainer.encapsulator.explain_data_encap import ExplanationEncap, ExplanationKeys
  18. class HierarchicalOcclusionEncap(ExplanationEncap):
  19. """Hierarchical occlusion encapsulator."""
  20. def query_hierarchical_occlusion(self,
  21. train_id,
  22. labels,
  23. limit,
  24. offset,
  25. sorted_name,
  26. sorted_type,
  27. prediction_types=None,
  28. drop_empty=True,
  29. ):
  30. """
  31. Query hierarchical occlusion results.
  32. Args:
  33. train_id (str): Job ID.
  34. labels (list[str]): Label filter.
  35. limit (int): Maximum number of items to be returned.
  36. offset (int): Page offset.
  37. sorted_name (str): Field to be sorted.
  38. sorted_type (str): Sorting order, 'ascending' or 'descending'.
  39. prediction_types (list[str]): Prediction types filter.
  40. drop_empty (bool): Whether to drop out the data without hoc data. Default: True.
  41. Returns:
  42. tuple[int, list[dict]], total number of samples after filtering and list of sample results.
  43. """
  44. job = self.job_manager.get_job(train_id)
  45. if job is None:
  46. raise TrainJobNotExistError(train_id)
  47. if drop_empty:
  48. samples = self._query_samples(job, labels, sorted_name, sorted_type, prediction_types,
  49. drop_type=ExplanationKeys.HOC.value)
  50. else:
  51. samples = self._query_samples(job, labels, sorted_name, sorted_type, prediction_types)
  52. sample_infos = []
  53. obj_offset = offset * limit
  54. count = len(samples)
  55. end = count
  56. if obj_offset + limit < end:
  57. end = obj_offset + limit
  58. for i in range(obj_offset, end):
  59. sample = samples[i]
  60. sample_infos.append(self._touch_sample(sample, job, drop_empty))
  61. return count, sample_infos
  62. def _touch_sample(self, sample, job, drop_empty):
  63. """
  64. Final edit on single sample info.
  65. Args:
  66. sample (dict): Sample info.
  67. job (ExplainManager): Explain job.
  68. drop_empty (bool): Whether to drop out inferences without HOC explanations.
  69. Returns:
  70. dict, the edited sample info.
  71. """
  72. sample["image"] = self._get_image_url(job.train_id, sample["image"], "original")
  73. inferences = sample["inferences"]
  74. i = 0 # init index for while loop
  75. while i < len(inferences):
  76. inference_item = inferences[i]
  77. if drop_empty and not inference_item[ExplanationKeys.HOC.value]:
  78. inferences.pop(i)
  79. continue
  80. new_list = []
  81. for idx, hoc_layer in enumerate(inference_item[ExplanationKeys.HOC.value]):
  82. hoc_layer["outcome"] = self._get_image_url(job.train_id,
  83. f"{sample['id']}_{inference_item['label']}_{idx}.jpg",
  84. "outcome")
  85. new_list.append(hoc_layer)
  86. inference_item[ExplanationKeys.HOC.value] = new_list
  87. i += 1
  88. return sample