You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

roipooling_ops.h 3.7 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_OP_ROIPOOLING_OPS_H_
  17. #define GE_OP_ROIPOOLING_OPS_H_
  18. #include "graph/operator_reg.h"
  19. namespace ge {
  20. /**
  21. *@brief Performs Region of Interest (ROI) pooling.
  22. *@par Inputs:
  23. * Three inputs, including:
  24. *@li x: An NC1HWC0 tensor of type float16 or float32, describing the feature map.
  25. *@li rois: A tensor of type float16 or float32, with shape [batch, 5, roi_max_num], describing the RIOs.
  26. *@li roi_actual_num: A tensor of type int32, with shape [batch, 8], specifying the number of ROIs per batch.
  27. *@par Attributes:
  28. *@li roi_max_num: An optional int32, specifying the maximum number of ROIs per batch, at most 6000. Defaults to "3008". The value must be a multiple of 16.
  29. *@li pooled_h: A required int32, specifying the pooled H. Must be greater than 0.
  30. *@li pooled_w: A required int32, specifying the pooled W. Must be greater than 0.
  31. *@li spatial_scale: An optional scaling factor for mapping the input coordinates to the ROI coordinates. Defaults to "0.0625".
  32. *@par Outputs:
  33. *y: An NC1HWC0 tensor of type float16 or float32, describing the result feature map.
  34. *@attention Constraints:\n
  35. *@li For the feature map input: \n
  36. (1) If pooled_h = pooled_w = 2, the feature map size must not exceed 50. \n
  37. (2) If pooled_h = pooled_w = 3, the feature map size must not exceed 60. \n
  38. (3) If pooled_h = pooled_w = 4, the feature map size must not exceed 70. \n
  39. (4) If pooled_h = pooled_w = 5, the feature map size must not exceed 70. \n
  40. (5) If pooled_h = pooled_w = 6, the feature map size must not exceed 80. \n
  41. (6) If pooled_h = pooled_w = 7, the feature map size must not exceed 80. \n
  42. (7) If pooled_h = pooled_w = 8, the feature map size must not exceed 80. \n
  43. (8) If pooled_h = pooled_w = 9, the feature map size must not exceed 70. \n
  44. (9) If pooled_h = pooled_w = 10, the feature map size must not exceed 70. \n
  45. (10) If pooled_h = pooled_w = 11, the feature map size must not exceed 70. \n
  46. (11) If pooled_h = pooled_w = 12, the feature map size must not exceed 70. \n
  47. (12) If pooled_h = pooled_w = 13, the feature map size must not exceed 70. \n
  48. (13) If pooled_h = pooled_w = 14, the feature map size must not exceed 70. \n
  49. (14) If pooled_h = pooled_w = 15, the feature map size must not exceed 70. \n
  50. (15) If pooled_h = pooled_w = 16, the feature map size must not exceed 70. \n
  51. (16) If pooled_h = pooled_w = 17, the feature map size must not exceed 50. \n
  52. (17) If pooled_h = pooled_w = 18, the feature map size must not exceed 40. \n
  53. (18) If pooled_h = pooled_w = 19, the feature map size must not exceed 40. \n
  54. (19) If pooled_h = pooled_w = 20, the feature map size must not exceed 40. \n
  55. */
  56. REG_OP(RoiPooling)
  57. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16}))
  58. .INPUT(rois, TensorType({DT_FLOAT, DT_FLOAT16}))
  59. .INPUT(roi_actual_num, TensorType({DT_INT32}))
  60. .ATTR(roi_max_num, Int,3008)
  61. .REQUIRED_ATTR(pooled_h, Int)
  62. .REQUIRED_ATTR(pooled_w, Int)
  63. .ATTR(spatial_scale, Float, 0.0625)
  64. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16}))
  65. .OP_END_FACTORY_REG(RoiPooling)
  66. } // namespace ge
  67. #endif // GE_OP_BITWISE_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示