You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

functional_ops.h 12 kB

5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_FUNCTIONAL_OPS_H_
  17. #define GE_FUNCTIONAL_OPS_H_
  18. #include "graph/operator_reg.h"
  19. #include "graph/operator.h"
  20. namespace ge {
  21. REG_OP(SymbolicGradient)
  22. .DYNAMIC_INPUT(input, TensorType::ALL())
  23. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  24. .GRAPH(f)
  25. .OP_END_FACTORY_REG(SymbolicGradient)
  26. REG_OP(RemoteCall)
  27. .INPUT(target, DT_STRING)
  28. .DYNAMIC_INPUT(args, TensorType::ALL())
  29. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  30. .GRAPH(f)
  31. .OP_END_FACTORY_REG(RemoteCall)
  32. /**
  33. *@brief Select one of the subgraphs to pass the input tensors and return the output tensors. \n
  34. * If "cond" means True, the selected subgraph is "then_branch". \n
  35. * Otherwise, the selected subgraph is "else_branch".
  36. *@par Inputs:
  37. *@li cond: A Tensor. If "cond" is not a scalar of boolean type, \n
  38. * it will be converted to a boolean according to the following rule: \n
  39. * if "cond" is a numerical scalar, non-zero means True and zero means False; \n
  40. * if "cond" is a string scalar, non-empty means True and empty means False; \n
  41. * if "cond" is not a scalar, non-empty means True and empty means False.
  42. *@li input: The input tensors.
  43. *@par Graphs:
  44. *@li then_branch: A subgraph takes 'input' and returns a list of tensors, \n
  45. * whose types are the same as what else_branch returns.
  46. *@li else_branch: A subgraph takes 'input' and returns a list of tensors, \n
  47. * whose types are the same as what then_branch returns.
  48. *@par Outputs:
  49. *output: The output tensors returned by either then_branch(input) or else_branch(input).
  50. *@par Third-party framework compatibility
  51. *@Compatible with the TensorFlow operator _If.
  52. */
  53. REG_OP(_If)
  54. .INPUT(cond, TensorType::ALL())
  55. .DYNAMIC_INPUT(input, TensorType::ALL())
  56. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  57. .GRAPH(then_branch)
  58. .GRAPH(else_branch)
  59. .OP_END_FACTORY_REG(_If)
  60. /**
  61. *@brief Select one of the subgraphs to pass the input tensors and return the output tensors. \n
  62. * If "cond" means True, the selected subgraph is "then_branch". \n
  63. * Otherwise, the selected subgraph is "else_branch".
  64. *@par Inputs:
  65. *@li cond: A Tensor. If "cond" is not a scalar of boolean type, \n
  66. * it will be converted to a boolean according to the following rule: \n
  67. * if "cond" is a numerical scalar, non-zero means True and zero means False; \n
  68. * if "cond" is a string scalar, non-empty means True and empty means False; \n
  69. * if "cond" is not a scalar, non-empty means True and empty means False.
  70. *@li input: The input tensors.
  71. *@par Graphs:
  72. *@li then_branch: A subgraph takes 'input' and returns a list of tensors, \n
  73. * whose types are the same as what else_branch returns.
  74. *@li else_branch: A subgraph takes 'input' and returns a list of tensors, \n
  75. * whose types are the same as what then_branch returns.
  76. *@par Outputs:
  77. *output: The output tensors returned by either then_branch(input) or else_branch(input).
  78. *@par Third-party framework compatibility
  79. *@Compatible with the TensorFlow operator StatelessIf.
  80. */
  81. REG_OP(StatelessIf)
  82. .INPUT(cond, TensorType::ALL())
  83. .DYNAMIC_INPUT(input, TensorType::ALL())
  84. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  85. .GRAPH(then_branch)
  86. .GRAPH(else_branch)
  87. .OP_END_FACTORY_REG(StatelessIf)
  88. /**
  89. *@brief Select one of the subgraphs to pass the input tensors and return the output tensors. \n
  90. * If "cond" means True, the selected subgraph is "then_branch". \n
  91. * Otherwise, the selected subgraph is "else_branch".
  92. *@par Inputs:
  93. *@li cond: A Tensor. If "cond" is not a scalar of boolean type, \n
  94. * it will be converted to a boolean according to the following rule: \n
  95. * if "cond" is a numerical scalar, non-zero means True and zero means False; \n
  96. * if "cond" is a string scalar, non-empty means True and empty means False; \n
  97. * if "cond" is not a scalar, non-empty means True and empty means False.
  98. *@li input: The input tensors.
  99. *@par Graphs:
  100. *@li then_branch: A subgraph takes 'input' and returns a list of tensors, \n
  101. * whose types are the same as what else_branch returns.
  102. *@li else_branch: A subgraph takes 'input' and returns a list of tensors, \n
  103. * whose types are the same as what then_branch returns.
  104. *@par Outputs:
  105. *output: The output tensors returned by either then_branch(input) or else_branch(input).
  106. *@par Third-party framework compatibility
  107. *@Compatible with the TensorFlow operator If.
  108. */
  109. REG_OP(If)
  110. .INPUT(cond, TensorType::ALL())
  111. .DYNAMIC_INPUT(input, TensorType::ALL())
  112. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  113. .GRAPH(then_branch)
  114. .GRAPH(else_branch)
  115. .OP_END_FACTORY_REG(If)
  116. /**
  117. *@brief Select one of the subgraphs to pass the input tensors and return the output tensors.
  118. *@par Inputs:
  119. *@li branch_index: A int32 scalar which determines the selected subgraph.
  120. *@li input: The input tensors, which will be passed to the subgraph.
  121. *@par Graphs:
  122. *branches: A list of subgraphs, each of which takes 'input' and returns a list of tensors, \n
  123. * whose types are the same as what every other subgraph returns.
  124. *@par Outputs:
  125. *output: The output tensors returned by one of branches.
  126. *@par Third-party framework compatibility
  127. *@Compatible with the TensorFlow operator Case.
  128. */
  129. REG_OP(Case)
  130. .INPUT(branch_index, DT_INT32)
  131. .DYNAMIC_INPUT(input, TensorType::ALL())
  132. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  133. .DYNAMIC_GRAPH(branches)
  134. .OP_END_FACTORY_REG(Case)
  135. /**
  136. *@brief Cyclic execute the "body" subgraph until the return tensor of "cond" subgraph means False.
  137. *@par Inputs:
  138. *input: The input tensors.
  139. *@par Graphs:
  140. *@li cond: A subgraph takes 'input' and returns a tensor. \n
  141. * If the tensor is not a scalar of boolean type, \n
  142. * it will be converted to a boolean according to the following rule: \n
  143. * if it is a numerical scalar, non-zero means True and zero means False; \n
  144. * if it is a string scalar, non-empty means True and empty means False; \n
  145. * if it is not a scalar, non-empty means True and empty means False.
  146. *@li body: A subgraph takes 'input' and returns a another list of tensors.
  147. *@par Attributes:
  148. *parallel_iterations: An optional int, default as 10.
  149. *@par Outputs:
  150. *output: The output tensors returned by "body". Has the same type as "input".
  151. *@par Third-party framework compatibility
  152. *@Compatible with the TensorFlow operator _While.
  153. */
  154. REG_OP(_While)
  155. .DYNAMIC_INPUT(input, TensorType::ALL())
  156. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  157. .GRAPH(cond)
  158. .GRAPH(body)
  159. .OP_END_FACTORY_REG(_While)
  160. /**
  161. *@brief Cyclic execute the "body" subgraph until the return tensor of "cond" subgraph means False.
  162. *@par Inputs:
  163. *input: The input tensors.
  164. *@par Graphs:
  165. *@li cond: A subgraph takes 'input' and returns a tensor. \n
  166. * If the tensor is not a scalar of boolean type, \n
  167. * it will be converted to a boolean according to the following rule: \n
  168. * if it is a numerical scalar, non-zero means True and zero means False; \n
  169. * if it is a string scalar, non-empty means True and empty means False; \n
  170. * if it is not a scalar, non-empty means True and empty means False.
  171. *@li body: A subgraph takes 'input' and returns a another list of tensors.
  172. *@par Attributes:
  173. *parallel_iterations: An optional int, default as 10.
  174. *@par Outputs:
  175. *output: The output tensors returned by "body". Has the same type as "input".
  176. *@par Third-party framework compatibility
  177. *@Compatible with the TensorFlow operator While.
  178. */
  179. REG_OP(While)
  180. .DYNAMIC_INPUT(input, TensorType::ALL())
  181. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  182. .GRAPH(cond)
  183. .GRAPH(body)
  184. .ATTR(parallel_iterations, Int, 10)
  185. .OP_END_FACTORY_REG(While)
  186. /**
  187. *@brief Cyclic execute the "body" subgraph until the return tensor of "cond" subgraph means False.
  188. *@par Inputs:
  189. *input: The input tensors.
  190. *@par Graphs:
  191. *@li cond: A subgraph takes 'input' and returns a tensor. \n
  192. * If the tensor is not a scalar of boolean type, \n
  193. * it will be converted to a boolean according to the following rule: \n
  194. * if it is a numerical scalar, non-zero means True and zero means False; \n
  195. * if it is a string scalar, non-empty means True and empty means False; \n
  196. * if it is not a scalar, non-empty means True and empty means False.
  197. *@li body: A subgraph takes 'input' and returns a another list of tensors.
  198. *@par Attributes:
  199. *parallel_iterations: An optional int, default as 10.
  200. *@par Outputs:
  201. *output: The output tensors returned by "body". Has the same type as "input".
  202. *@par Third-party framework compatibility
  203. *@Compatible with the TensorFlow operator StatelessWhile.
  204. */
  205. REG_OP(StatelessWhile)
  206. .DYNAMIC_INPUT(input, TensorType::ALL())
  207. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  208. .GRAPH(cond)
  209. .GRAPH(body)
  210. .ATTR(parallel_iterations, Int, 10)
  211. .OP_END_FACTORY_REG(StatelessWhile)
  212. /**
  213. *@brief Cyclic execute the "body" subgraph until the first input of For op exceed upper bound.
  214. *@par Inputs:
  215. *@li start: A int32 scalar. The lower bound.
  216. *@li limit: A int32 scalar. The upper bound.
  217. *@li delta: A int32 scalar. The step size.
  218. *@li input: The input tensors, which will be passed to "body".
  219. *@par Graphs:
  220. *body: A subgraph takes 'input' and returns a another list of tensors.
  221. *@par Outputs:
  222. *output: The output tensors returned by "body". Has the same type as "input".
  223. *@par Third-party framework compatibility
  224. *@Compatible with the TensorFlow operator For.
  225. */
  226. REG_OP(For)
  227. .INPUT(start, DT_INT32)
  228. .INPUT(limit, DT_INT32)
  229. .INPUT(delta, DT_INT32)
  230. .DYNAMIC_INPUT(input, TensorType::ALL())
  231. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  232. .GRAPH(body)
  233. .OP_END_FACTORY_REG(For)
  234. /**
  235. *@brief Pass the input tensors to the subgraph "f" and return the output tensors.
  236. *@par Inputs:
  237. *args: The input tensors, which will be passed to "f".
  238. *@par Graphs:
  239. *f: A subgraph takes 'args' and returns a another list of tensors.
  240. *@par Attributes:
  241. *@li config: An optional string, default as "".
  242. *@li config_proto: An optional int, default as "".
  243. *@li executor_type: An optional int, default as "".
  244. *@par Outputs:
  245. *output: The output tensors returned by "f".
  246. *@par Third-party framework compatibility
  247. *@Compatible with the TensorFlow operator PartitionedCall.
  248. */
  249. REG_OP(PartitionedCall)
  250. .DYNAMIC_INPUT(args, TensorType::ALL())
  251. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  252. .GRAPH(f)
  253. .ATTR(config, String, "")
  254. .ATTR(config_proto, String, "")
  255. .ATTR(executor_type, String, "")
  256. .OP_END_FACTORY_REG(PartitionedCall)
  257. /**
  258. *@brief Pass the input tensors to the subgraph "f" and return the output tensors.
  259. *@par Inputs:
  260. *args: The input tensors, which will be passed to "f".
  261. *@par Graphs:
  262. *f: A subgraph takes 'args' and returns a another list of tensors.
  263. *@par Attributes:
  264. *@li config: An optional string, default as "".
  265. *@li config_proto: An optional int, default as "".
  266. *@li executor_type: An optional int, default as "".
  267. *@par Outputs:
  268. *output: The output tensors returned by "f".
  269. *@par Third-party framework compatibility
  270. *@Compatible with the TensorFlow operator StatefulPartitionedCall.
  271. */
  272. REG_OP(StatefulPartitionedCall)
  273. .DYNAMIC_INPUT(args, TensorType::ALL())
  274. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  275. .GRAPH(f)
  276. .ATTR(config, String, "")
  277. .ATTR(config_proto, String, "")
  278. .ATTR(executor_type, String, "")
  279. .OP_END_FACTORY_REG(StatefulPartitionedCall)
  280. REG_OP(FakeParam)
  281. .OUTPUT(output, TensorType::ALL())
  282. .ATTR(shape, ListInt, {})
  283. .OP_END_FACTORY_REG(FakeParam)
  284. } // namespace ge
  285. #endif // GE_FUNCTIONAL_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示