You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

functional_ops.h 13 kB

5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file functional_ops.h
  18. * \brief
  19. */
  20. #ifndef GE_FUNCTIONAL_OPS_H_
  21. #define GE_FUNCTIONAL_OPS_H_
  22. #include "graph/operator_reg.h"
  23. #include "graph/operator.h"
  24. namespace ge {
  25. REG_OP(SymbolicGradient)
  26. .DYNAMIC_INPUT(input, TensorType::ALL())
  27. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  28. .GRAPH(f)
  29. .OP_END_FACTORY_REG(SymbolicGradient)
  30. REG_OP(RemoteCall)
  31. .INPUT(target, DT_STRING)
  32. .DYNAMIC_INPUT(args, TensorType::ALL())
  33. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  34. .GRAPH(f)
  35. .OP_END_FACTORY_REG(RemoteCall)
  36. /**
  37. *@brief Select one of the subgraphs to pass the input tensors and return the output tensors. \n
  38. * If "cond" means True, the selected subgraph is "then_branch". \n
  39. * Otherwise, the selected subgraph is "else_branch".
  40. *@par Inputs:
  41. *@li cond: A Tensor. If "cond" is not a scalar of boolean type, \n
  42. * it will be converted to a boolean according to the following rule: \n
  43. * if "cond" is a numerical scalar, non-zero means True and zero means False; \n
  44. * if "cond" is a string scalar, non-empty means True and empty means False; \n
  45. * if "cond" is not a scalar, non-empty means True and empty means False.
  46. *@li input: The input tensors.
  47. *@par Graphs:
  48. *@li then_branch: A subgraph takes 'input' and returns a list of tensors, \n
  49. * whose types are the same as what else_branch returns.
  50. *@li else_branch: A subgraph takes 'input' and returns a list of tensors, \n
  51. * whose types are the same as what then_branch returns.
  52. *@par Outputs:
  53. *output: The output tensors returned by either then_branch(input) or else_branch(input).
  54. *@par Third-party framework compatibility
  55. *@Compatible with the TensorFlow operator _If.
  56. */
  57. REG_OP(_If)
  58. .INPUT(cond, TensorType::ALL())
  59. .DYNAMIC_INPUT(input, TensorType::ALL())
  60. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  61. .GRAPH(then_branch)
  62. .GRAPH(else_branch)
  63. .OP_END_FACTORY_REG(_If)
  64. /**
  65. *@brief Select one of the subgraphs to pass the input tensors and return the output tensors. \n
  66. * If "cond" means True, the selected subgraph is "then_branch". \n
  67. * Otherwise, the selected subgraph is "else_branch".
  68. *@par Inputs:
  69. *@li cond: A Tensor. If "cond" is not a scalar of boolean type, \n
  70. * it will be converted to a boolean according to the following rule: \n
  71. * if "cond" is a numerical scalar, non-zero means True and zero means False; \n
  72. * if "cond" is a string scalar, non-empty means True and empty means False; \n
  73. * if "cond" is not a scalar, non-empty means True and empty means False.
  74. *@li input: The input tensors.
  75. *@par Graphs:
  76. *@li then_branch: A subgraph takes 'input' and returns a list of tensors, \n
  77. * whose types are the same as what else_branch returns.
  78. *@li else_branch: A subgraph takes 'input' and returns a list of tensors, \n
  79. * whose types are the same as what then_branch returns.
  80. *@par Outputs:
  81. *output: The output tensors returned by either then_branch(input) or else_branch(input).
  82. *@par Third-party framework compatibility
  83. *@Compatible with the TensorFlow operator StatelessIf.
  84. */
  85. REG_OP(StatelessIf)
  86. .INPUT(cond, TensorType::ALL())
  87. .DYNAMIC_INPUT(input, TensorType::ALL())
  88. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  89. .GRAPH(then_branch)
  90. .GRAPH(else_branch)
  91. .OP_END_FACTORY_REG(StatelessIf)
  92. /**
  93. *@brief Select one of the subgraphs to pass the input tensors and return the output tensors. \n
  94. * If "cond" means True, the selected subgraph is "then_branch". \n
  95. * Otherwise, the selected subgraph is "else_branch".
  96. *@par Inputs:
  97. *@li cond: A Tensor. If "cond" is not a scalar of boolean type, \n
  98. * it will be converted to a boolean according to the following rule: \n
  99. * if "cond" is a numerical scalar, non-zero means True and zero means False; \n
  100. * if "cond" is a string scalar, non-empty means True and empty means False; \n
  101. * if "cond" is not a scalar, non-empty means True and empty means False.
  102. *@li input: The input tensors.
  103. *@par Graphs:
  104. *@li then_branch: A subgraph takes 'input' and returns a list of tensors, \n
  105. * whose types are the same as what else_branch returns.
  106. *@li else_branch: A subgraph takes 'input' and returns a list of tensors, \n
  107. * whose types are the same as what then_branch returns.
  108. *@par Outputs:
  109. *output: The output tensors returned by either then_branch(input) or else_branch(input).
  110. *@par Third-party framework compatibility
  111. *@Compatible with the TensorFlow operator If.
  112. */
  113. REG_OP(If)
  114. .INPUT(cond, TensorType::ALL())
  115. .DYNAMIC_INPUT(input, TensorType::ALL())
  116. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  117. .GRAPH(then_branch)
  118. .GRAPH(else_branch)
  119. .OP_END_FACTORY_REG(If)
  120. /**
  121. *@brief Select one of the subgraphs to pass the input tensors and return the output tensors.
  122. *@par Inputs:
  123. *@li branch_index: A int32 scalar which determines the selected subgraph.
  124. *@li input: The input tensors, which will be passed to the subgraph.
  125. *@par Graphs:
  126. *branches: A list of subgraphs, each of which takes 'input' and returns a list of tensors, \n
  127. * whose types are the same as what every other subgraph returns.
  128. *@par Outputs:
  129. *output: The output tensors returned by one of branches.
  130. *@par Third-party framework compatibility
  131. *@Compatible with the TensorFlow operator Case.
  132. */
  133. REG_OP(Case)
  134. .INPUT(branch_index, DT_INT32)
  135. .DYNAMIC_INPUT(input, TensorType::ALL())
  136. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  137. .DYNAMIC_GRAPH(branches)
  138. .OP_END_FACTORY_REG(Case)
  139. /**
  140. *@brief Cyclic execute the "body" subgraph until the return tensor of "cond" subgraph means False.
  141. *@par Inputs:
  142. *input: The input tensors.
  143. *@par Graphs:
  144. *@li cond: A subgraph takes 'input' and returns a tensor. \n
  145. * If the tensor is not a scalar of boolean type, \n
  146. * it will be converted to a boolean according to the following rule: \n
  147. * if it is a numerical scalar, non-zero means True and zero means False; \n
  148. * if it is a string scalar, non-empty means True and empty means False; \n
  149. * if it is not a scalar, non-empty means True and empty means False.
  150. *@li body: A subgraph takes 'input' and returns a another list of tensors.
  151. *@par Attributes:
  152. *parallel_iterations: An optional int, default as 10.
  153. *@par Outputs:
  154. *output: The output tensors returned by "body". Has the same type as "input".
  155. *@par Third-party framework compatibility
  156. *@Compatible with the TensorFlow operator _While.
  157. */
  158. REG_OP(_While)
  159. .DYNAMIC_INPUT(input, TensorType::ALL())
  160. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  161. .GRAPH(cond)
  162. .GRAPH(body)
  163. .OP_END_FACTORY_REG(_While)
  164. /**
  165. *@brief Cyclic execute the "body" subgraph until the return tensor of "cond" subgraph means False.
  166. *@par Inputs:
  167. *input: The input tensors.
  168. *@par Graphs:
  169. *@li cond: A subgraph takes 'input' and returns a tensor. \n
  170. * If the tensor is not a scalar of boolean type, \n
  171. * it will be converted to a boolean according to the following rule: \n
  172. * if it is a numerical scalar, non-zero means True and zero means False; \n
  173. * if it is a string scalar, non-empty means True and empty means False; \n
  174. * if it is not a scalar, non-empty means True and empty means False.
  175. *@li body: A subgraph takes 'input' and returns a another list of tensors.
  176. *@par Attributes:
  177. *parallel_iterations: An optional int, default as 10.
  178. *@par Outputs:
  179. *output: The output tensors returned by "body". Has the same type as "input".
  180. *@par Third-party framework compatibility
  181. *@Compatible with the TensorFlow operator While.
  182. */
  183. REG_OP(While)
  184. .DYNAMIC_INPUT(input, TensorType::ALL())
  185. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  186. .GRAPH(cond)
  187. .GRAPH(body)
  188. .ATTR(parallel_iterations, Int, 10)
  189. .OP_END_FACTORY_REG(While)
  190. /**
  191. *@brief Cyclic execute the "body" subgraph until the return tensor of "cond" subgraph means False.
  192. *@par Inputs:
  193. *input: The input tensors.
  194. *@par Graphs:
  195. *@li cond: A subgraph takes 'input' and returns a tensor. \n
  196. * If the tensor is not a scalar of boolean type, \n
  197. * it will be converted to a boolean according to the following rule: \n
  198. * if it is a numerical scalar, non-zero means True and zero means False; \n
  199. * if it is a string scalar, non-empty means True and empty means False; \n
  200. * if it is not a scalar, non-empty means True and empty means False.
  201. *@li body: A subgraph takes 'input' and returns a another list of tensors.
  202. *@par Attributes:
  203. *parallel_iterations: An optional int, default as 10.
  204. *@par Outputs:
  205. *output: The output tensors returned by "body". Has the same type as "input".
  206. *@par Third-party framework compatibility
  207. *@Compatible with the TensorFlow operator StatelessWhile.
  208. */
  209. REG_OP(StatelessWhile)
  210. .DYNAMIC_INPUT(input, TensorType::ALL())
  211. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  212. .GRAPH(cond)
  213. .GRAPH(body)
  214. .ATTR(parallel_iterations, Int, 10)
  215. .OP_END_FACTORY_REG(StatelessWhile)
  216. /**
  217. *@brief Cyclic execute the "body" subgraph until the first input of For op exceed upper bound.
  218. *@par Inputs:
  219. *@li start: A int32 scalar. The lower bound.
  220. *@li limit: A int32 scalar. The upper bound.
  221. *@li delta: A int32 scalar. The step size.
  222. *@li input: The input tensors, which will be passed to "body".
  223. *@par Graphs:
  224. *body: A subgraph takes 'input' and returns a another list of tensors.
  225. *@par Outputs:
  226. *output: The output tensors returned by "body". Has the same type as "input".
  227. *@par Third-party framework compatibility
  228. *@Compatible with the TensorFlow operator For.
  229. */
  230. REG_OP(For)
  231. .INPUT(start, DT_INT32)
  232. .INPUT(limit, DT_INT32)
  233. .INPUT(delta, DT_INT32)
  234. .DYNAMIC_INPUT(input, TensorType::ALL())
  235. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  236. .GRAPH(body)
  237. .OP_END_FACTORY_REG(For)
  238. /**
  239. *@brief Pass the input tensors to the subgraph "f" and return the output tensors.
  240. *@par Inputs:
  241. *args: The input tensors, which will be passed to "f".
  242. *@par Graphs:
  243. *f: A subgraph takes 'args' and returns a another list of tensors.
  244. *@par Attributes:
  245. *@li config: An optional string, default as "".
  246. *@li config_proto: An optional int, default as "".
  247. *@li executor_type: An optional int, default as "".
  248. *@par Outputs:
  249. *output: The output tensors returned by "f".
  250. *@par Third-party framework compatibility
  251. *@Compatible with the TensorFlow operator PartitionedCall.
  252. */
  253. REG_OP(PartitionedCall)
  254. .DYNAMIC_INPUT(args, TensorType::ALL())
  255. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  256. .GRAPH(f)
  257. .ATTR(config, String, "")
  258. .ATTR(config_proto, String, "")
  259. .ATTR(executor_type, String, "")
  260. .OP_END_FACTORY_REG(PartitionedCall)
  261. /**
  262. *@brief Pass the input tensors to the subgraph "f" and return the output tensors.
  263. *@par Inputs:
  264. *args: The input tensors, which will be passed to "f".
  265. *@par Graphs:
  266. *f: A subgraph takes 'args' and returns a another list of tensors.
  267. *@par Attributes:
  268. *@li config: An optional string, default as "".
  269. *@li config_proto: An optional int, default as "".
  270. *@li executor_type: An optional int, default as "".
  271. *@par Outputs:
  272. *output: The output tensors returned by "f".
  273. *@par Third-party framework compatibility
  274. *@Compatible with the TensorFlow operator StatefulPartitionedCall.
  275. */
  276. REG_OP(StatefulPartitionedCall)
  277. .DYNAMIC_INPUT(args, TensorType::ALL())
  278. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  279. .GRAPH(f)
  280. .ATTR(config, String, "")
  281. .ATTR(config_proto, String, "")
  282. .ATTR(executor_type, String, "")
  283. .OP_END_FACTORY_REG(StatefulPartitionedCall)
  284. REG_OP(FakeParam)
  285. .OUTPUT(output, TensorType::ALL())
  286. .ATTR(shape, ListInt, {})
  287. .OP_END_FACTORY_REG(FakeParam)
  288. } // namespace ge
  289. #endif // GE_FUNCTIONAL_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示