You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

transformation_ops.h 17 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_OP_TRANSFORMATION_OPS_H
  17. #define GE_OP_TRANSFORMATION_OPS_H
  18. #include "../graph/operator_reg.h"
  19. namespace ge {
  20. REG_OP(DepthwiseWeight4DTo6D)
  21. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT16}))
  22. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT16}))
  23. .OP_END_FACTORY_REG(DepthwiseWeight4DTo6D)
  24. REG_OP(DepthwiseWeight6DTo4D)
  25. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT16}))
  26. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT16}))
  27. .ATTR(channel_size, Int, 16)
  28. .OP_END_FACTORY_REG(DepthwiseWeight6DTo4D)
  29. /**
  30. *@brief Permutes the dimensions according to perm.\n
  31. The returned tensor's dimension i will correspond to the input dimension perm[i].
  32. *@par Inputs:
  33. *x: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64.
  34. *@par Attributes:
  35. *perm: A permutation of the dimensions of "x".
  36. *@par Outputs:
  37. *y: A Tensor. Has the same type as "x".
  38. */
  39. REG_OP(TransposeD)
  40. .INPUT(x, TensorType::BasicType())
  41. .OUTPUT(y, TensorType::BasicType())
  42. .REQUIRED_ATTR(perm, ListInt)
  43. .OP_END_FACTORY_REG(TransposeD)
  44. /**
  45. *@brief Permutes the dimensions according to perm.\n
  46. The returned tensor's dimension i will correspond to the input dimension perm[i].
  47. *@par Inputs:
  48. *@li x: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64.
  49. *@li perm: A Tensor of type int32 or int64. A permutation of the dimensions of "x".
  50. *@par Outputs:
  51. *y: A Tensor. Has the same type as "x".
  52. */
  53. REG_OP(Transpose)
  54. .INPUT(x, TensorType::BasicType())
  55. .INPUT(perm, TensorType::IndexNumberType())
  56. .OUTPUT(y, TensorType::BasicType())
  57. .OP_END_FACTORY_REG(Transpose)
  58. /**
  59. *@brief Permutes the dimensions according to order.\n
  60. The returned tensor's dimension i will correspond to the input dimension order[i].
  61. *@par Inputs:
  62. *x: A Tensor. Must be one of the following types: float16, float32.
  63. *@par Attributes:
  64. *order: A permutation of the dimensions of "x".support any axis transformation
  65. *@par Outputs:
  66. *y: A Tensor. Has the same type as "x".
  67. */
  68. REG_OP(Permute)
  69. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  70. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  71. .ATTR(order, ListInt, {0})
  72. .OP_END_FACTORY_REG(Permute)
  73. /**
  74. *@brief Flattens the inputs. Reserves axis 0 and flattens the input tensors along axis 1.
  75. *@par Inputs:
  76. *One input: \n
  77. *x: A multi-dimensional Tensor. Must be one of the following types: \n
  78. int8, uint8, int16, uint16, int32, int64, float16, float32, float64.
  79. *@par Outputs:
  80. *y: A 2D flattened Tensor (Reserves axis 0 and flattens the input tensors along axis 1). Must be one of the following data types: int8, uint8, int16, uint16, int32, int64, float16, float32, float64.
  81. */
  82. REG_OP(Flatten)
  83. .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64,
  84. DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64,
  85. DT_FLOAT, DT_FLOAT16}))
  86. .OUTPUT(y, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64,
  87. DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64,
  88. DT_FLOAT, DT_FLOAT16}))
  89. .OP_END_FACTORY_REG(Flatten)
  90. /**
  91. *@brief Permutes and crops the input tensor.
  92. *@par Inputs:
  93. * Three inputs, including: \n
  94. *@li x: A 5D Tensor of type float16 or float32, with format NC1HWC0.
  95. *@li block_shape: A 1D list or tuple of int32 or int64.
  96. *@li crops: A 2D list or tuple of int32 or int64. Specifies the amount to crop from start and end dimensions after permutation.
  97. *@par Outputs:
  98. *y: A Tensor with format NC1HWC0. Has the same type as input "x".
  99. */
  100. REG_OP(BatchToSpaceND)
  101. .INPUT(x, TensorType::BasicType())
  102. .INPUT(block_shape, TensorType::IndexNumberType())
  103. .INPUT(crops, TensorType::IndexNumberType())
  104. .OUTPUT(y, TensorType::BasicType())
  105. .OP_END_FACTORY_REG(BatchToSpaceND)
  106. /**
  107. *@brief Permutes and crops the input tensor.
  108. *@par Inputs:
  109. * One input: \n
  110. *x: A 5D Tensor of type float16 or float32, with format NC1HWC0.
  111. *@par Attributes:
  112. *@li block_shape: A required 1D list or tuple of int32 or int64.
  113. *@li crops: A required 2D list or tuple of int32 or int64. Specifies the amount to crop from the start and end dimensions after permutation.
  114. *@par Outputs:
  115. *y: A Tensor with format NC1HWC0. Has the same type as input "x".
  116. */
  117. REG_OP(BatchToSpaceNDD)
  118. .INPUT(x, TensorType::BasicType())
  119. .OUTPUT(y, TensorType::BasicType())
  120. .REQUIRED_ATTR(block_shape, ListInt)
  121. .REQUIRED_ATTR(crops, ListInt)
  122. .OP_END_FACTORY_REG(BatchToSpaceNDD)
  123. /**
  124. *@brief Pads and permutes the input tensor.
  125. *@par Inputs:
  126. * Three inputs, including: \n
  127. *@li x: A 5D Tensor of type float16 or float32, with format NC1HWC0.
  128. *@li block_shape: A 1D list or tuple of int32 or int64.
  129. *@li paddings: A 2D list or tuple of int32 or int64. Specifies the padding for the start and end dimensions after permutation.
  130. *@par Outputs:
  131. *y: A Tensor with format NC1HWC0. Has the same type as input "x".
  132. */
  133. REG_OP(SpaceToBatchND)
  134. .INPUT(x, TensorType::BasicType())
  135. .INPUT(block_shape, TensorType::IndexNumberType())
  136. .INPUT(paddings, TensorType::IndexNumberType())
  137. .OUTPUT(y, TensorType::BasicType())
  138. .OP_END_FACTORY_REG(SpaceToBatchND)
  139. /**
  140. *@brief Pads and permutes the input tensor.
  141. *@par Inputs:
  142. * One input: \n
  143. *x: A 5D Tensor of type float16 or float32, with format NC1HWC0.
  144. *@par Attributes:
  145. *@li block_shape: A required 1D list or tuple of int32 or int64.
  146. *@li paddings: A required 2D list or tuple of int32 or int64. Specifies the padding for the start and end dimensions after permutation.
  147. *@par Outputs:
  148. *y: A Tensor with format NC1HWC0. Has the same type as input "x".
  149. */
  150. REG_OP(SpaceToBatchNDD)
  151. .INPUT(x, TensorType::BasicType())
  152. .OUTPUT(y, TensorType::BasicType())
  153. .REQUIRED_ATTR(block_shape, ListInt)
  154. .REQUIRED_ATTR(paddings, ListInt)
  155. .OP_END_FACTORY_REG(SpaceToBatchNDD)
  156. /**
  157. *@brief Outputs a copy of the input tensor where values from the "height" and "width" dimensions are moved to the "depth" dimension.
  158. *@par Inputs:
  159. *x: An NHWC Tensor. Must be one of the following types:
  160. * float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8, int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32.
  161. *@par Attributes:
  162. *@li block_size: A required int, specifying the input block size.
  163. *@li data_format: An optional string from "NHWC" and "NCHW"
  164. *@par Outputs:
  165. *y: A Tensor. Has the same type as input "x".
  166. */
  167. REG_OP(SpaceToDepth)
  168. .INPUT(x, TensorType::BasicType())
  169. .OUTPUT(y, TensorType::BasicType())
  170. .REQUIRED_ATTR(block_size, Int)
  171. .ATTR(data_format, String, "NHWC")
  172. .OP_END_FACTORY_REG(SpaceToDepth)
  173. /**
  174. *@brief Rearranges data from depth into blocks of spatial data.
  175. *@par Inputs:
  176. *x: A Tensor. Must be one of the following types: float16, float32, double, int32, uint8,
  177. * int16, int8, complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16,
  178. * complex128, uint32, uint64
  179. *@par Attributes:
  180. *Two attributes, including:
  181. * @li block_size: An int >= 2, specifying the size of the spatial block.
  182. * @li data_format: An optional string, specifying the data format. Defaults to "NHWC".
  183. *@par Outputs:
  184. *y: A Tensor of the same type as "x".
  185. */
  186. REG_OP(DepthToSpace)
  187. .INPUT(x, TensorType::BasicType())
  188. .OUTPUT(y, TensorType::BasicType())
  189. .REQUIRED_ATTR(block_size, Int)
  190. .ATTR(data_format, String, "NHWC")
  191. .OP_END_FACTORY_REG(DepthToSpace)
  192. /**
  193. *@brief Permutes data into spatial data blocks and then prunes them.
  194. *@par Inputs:
  195. *x: A 4D Tensor with format NC1HWC0. \n
  196. *Must be one of the following types: float16, float32
  197. *@par Attributes:
  198. *@li crops: A required list of int8, int16, int32, or int64. No default value.
  199. *@li block_size: A required int8, int16, int32, or int64. No default value.
  200. *@par Outputs:
  201. *y: A 4D Tensor with format NC1HWC0, \n
  202. * of type float16 or float32.
  203. *@attention Constraints:
  204. *@li The size of the first dimension of input "x" must be divisible by (block_size * block_size).
  205. *@li "crops" is a 2D tensor of non-negative integers with shape (2, 2).
  206. *@li block_size >= 2
  207. */
  208. REG_OP(BatchToSpace)
  209. .INPUT(x, TensorType::BasicType())
  210. .INPUT(crops, TensorType::IndexNumberType())
  211. .OUTPUT(y, TensorType::BasicType())
  212. .REQUIRED_ATTR(block_size, Int)
  213. .OP_END_FACTORY_REG(BatchToSpace)
  214. /**
  215. *@brief Rearrange the batch (permutes) data into spatial data blocks, and then crop them.
  216. *@par Inputs:
  217. * One input:
  218. *x: An Tensor of shape [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, depth].\n
  219. *The batch size of the input tensor must be divisible by (block size * block size).
  220. *@par Attributes:
  221. *@li block_size: Must be one of the following types: `int32`, `int64`.
  222. *@li crops: An Tensor. Must be one of the following types: int32, Int64.\n
  223. *2D tensor with non negative integer of shape [2, 2]. It specifies how many\n
  224. *elements are clipped from the intermediate result of spatial dimension.
  225. *@par Outputs:
  226. *y: A Tensor. Has the same type and format as input "x".
  227. *@attention Constraints:
  228. *@li The size of the first dimension of input "x" must be divisible by (block_size * block_size).
  229. *@li "crops" is a 2D tensor of non-negative integers with shape (2, 2).
  230. *@li block_size >= 2
  231. */
  232. REG_OP(BatchToSpaceD)
  233. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8,
  234. DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, DT_INT16, DT_COMPLEX64,
  235. DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32}))
  236. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8,
  237. DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, DT_INT16, DT_COMPLEX64,
  238. DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32}))
  239. .REQUIRED_ATTR(block_size, Int)
  240. .REQUIRED_ATTR(crops, ListInt)
  241. .OP_END_FACTORY_REG(BatchToSpaceD)
  242. /**
  243. *@brief Outputs a copy of the input tensor where values from the "height" and "width" dimensions are padded and rearranged to the "batch" dimension.
  244. *@par Inputs:
  245. *@li x: An NC1HWC0 Tensor. Must be one of the following types:
  246. * float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8, int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32.
  247. *@li paddings: A 2D tensor of type int, specifying the input.
  248. *@par Attributes:
  249. *block_size: A required int, specifying the input block size.
  250. *@par Outputs:
  251. *y: A Tensor. Has the same type as input "x".
  252. */
  253. REG_OP(SpaceToBatch)
  254. .INPUT(x, TensorType::BasicType())
  255. .INPUT(paddings, TensorType::IndexNumberType())
  256. .OUTPUT(y, TensorType::BasicType())
  257. .REQUIRED_ATTR(block_size, Int)
  258. .OP_END_FACTORY_REG(SpaceToBatch)
  259. /**
  260. *@brief Outputs a copy of the input tensor where values from the "height" and "width" dimensions are padded and rearranged to the "batch" dimension.
  261. *@par Inputs:
  262. *x: An NC1HWC0 Tensor. Must be one of the following types: float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8, int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32.
  263. *@par Attributes:
  264. *@li block_size: A required int, specifying the input block size.
  265. *@li paddings: A 2D tensor. All data types are supported.
  266. *@par Outputs:
  267. *y: A Tensor. Has the same type as input "x".
  268. */
  269. REG_OP(SpaceToBatchD)
  270. .INPUT(x, TensorType::BasicType())
  271. .OUTPUT(y, TensorType::BasicType())
  272. .REQUIRED_ATTR(block_size, Int)
  273. .REQUIRED_ATTR(paddings, ListInt)
  274. .OP_END_FACTORY_REG(SpaceToBatchD)
  275. /**
  276. * @brief Unpacks the given dimension of a rank-R tensor "x" into rank-(R-1)
  277. * tensors.
  278. * @par Inputs:
  279. * @ x: A rank-R tensor (R > 0) of type BasicType, with format ND or NC1HWC0.
  280. * @par Attributes:
  281. * @li num: An optional int, specifying the number of tensors to be unpacked to.
  282. * Defaults to "None".
  283. * @li axis: A required int, specifying the axis to unpack along. The value range
  284. * is [-R, R).
  285. * @par Outputs:
  286. * y: The list of Tensor objects unpacked from "x", of type BasicType.
  287. * @attention Constraints:
  288. * @li If "num" is not specified, it is inferred from the shape of "x".
  289. * @li For the ND format, "axis" is in the range [-R, R); For the NC1HWC0 format,
  290. * "axis" must not be 2, 3, -2, or -3.
  291. */
  292. REG_OP(Unpack)
  293. .INPUT(x, TensorType::BasicType())
  294. .DYNAMIC_OUTPUT(y, TensorType::BasicType())
  295. .REQUIRED_ATTR(num, Int)
  296. .ATTR(axis, Int, 0)
  297. .OP_END_FACTORY_REG(Unpack)
  298. /**
  299. * @brief Extract "patches" from "images" and stacks them in the "depth"
  300. * dimension of the output.
  301. * @par Inputs:
  302. * x: A 4D Tensor with shape [batch, in_rows, in_cols, depth].
  303. * @par Attributes:
  304. * @li ksizes: A required list or tuple. The size of the sliding window for each
  305. * dimension of images.
  306. * @li strides: A required list or tuple. How far the centers of two consecutive
  307. * patches are in the images. Must be: [1, stride_rows, stride_cols, 1].
  308. * @li rates: A required list or tuple. Must be: [1, rate_rows, rate_cols, 1]. \n
  309. * This is the input stride, specifying how far two consecutive patch \n
  310. * samples are in the input. Equivalent to extracting patches
  311. * with patch_sizes_eff = patch_sizes + (patch_sizes - 1) *\n
  312. * (rates - 1), followed by subsampling them spatially by a factor of rates. \n
  313. * This is equivalent to rate in dilated (a.k.a. Atrous) convolutions.
  314. * @li padding: A required string. The type of padding algorithm to use.
  315. * @par Outputs:
  316. * Output: A 4D Tensor with shape [batch, out_rows, out_cols, ksize_rows *\n
  317. * ksize_cols * depth] containing image patches with size ksize_rows x ksize_cols\n
  318. * x depth vectorized in the "depth" dimension. Note "out_rows" and "out_cols"\n
  319. * are the dimensions of the output patches.
  320. * @attention Constraints:
  321. * "ksizes", "strides" and "rates" are lists of integers.
  322. */
  323. REG_OP(ExtractImagePatches)
  324. .INPUT(x, TensorType::REALNUMBERTYPE())
  325. .OUTPUT(y, TensorType::REALNUMBERTYPE())
  326. .REQUIRED_ATTR(ksizes, ListInt)
  327. .REQUIRED_ATTR(strides, ListInt)
  328. .REQUIRED_ATTR(rates, ListInt)
  329. .REQUIRED_ATTR(padding, String)
  330. .OP_END_FACTORY_REG(ExtractImagePatches)
  331. /**
  332. *@brief Confuse reshape and transpose.
  333. *@par Inputs:
  334. *x: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64.
  335. *@par Attributes:
  336. *@li perm: A permutation of the dimensions of "x".
  337. *@li shape: The shape of the input.
  338. *@li transpose_first: If True, the transpose is first, otherwise the reshape is first.
  339. *@par Outputs:
  340. *y: A Tensor. Has the same type as "x".
  341. */
  342. REG_OP(ConfusionTransposeD)
  343. .INPUT(x, TensorType::BasicType())
  344. .OUTPUT(y, TensorType::BasicType())
  345. .REQUIRED_ATTR(perm, ListInt)
  346. .REQUIRED_ATTR(shape, ListInt)
  347. .REQUIRED_ATTR(transpose_first, Bool)
  348. .OP_END_FACTORY_REG(ConfusionTransposeD)
  349. /**
  350. *@brief Confuse reshape and transpose.
  351. *@par Inputs:
  352. *@li x: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64.
  353. *@li shape: The shape of the input.
  354. *@par Attributes:
  355. *@li perm: A permutation of the dimensions of "x".
  356. *@li transpose_first: If True, the transpose is first, otherwise the reshape is first.
  357. *@par Outputs:
  358. *y: A Tensor. Has the same type as "x".
  359. */
  360. REG_OP(ConfusionTranspose)
  361. .INPUT(x, TensorType::BasicType())
  362. .INPUT(shape, TensorType::IndexNumberType())
  363. .OUTPUT(y, TensorType::BasicType())
  364. .REQUIRED_ATTR(perm, ListInt)
  365. .REQUIRED_ATTR(transpose_first, Bool)
  366. .OP_END_FACTORY_REG(ConfusionTranspose)
  367. /**
  368. *@brief Flattens the input tensor to one-dimensional.
  369. *@par Inputs:
  370. *x: An ND tensor. All data types are supported.
  371. *@par Attributes:
  372. *@li axis: An optional int32, specifying the first axis to flatten. All preceding axes are retained in the output. Defaults to "1".
  373. *@li end_axis: An optional int32, specifying the last axis to flatten. All following axes are retained in the output. Defaults to "-1".
  374. *@par Outputs:
  375. *y: The flattened ND tensor. All data types are supported.
  376. *@attention Constraints:
  377. * "axis" and "end_axis" must be within the dimension range of the input.
  378. */
  379. REG_OP(FlattenV2)
  380. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_INT16, DT_UINT16,
  381. DT_INT32, DT_UINT32, DT_INT64, DT_UINT64}))
  382. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_INT16, DT_UINT16,
  383. DT_INT32, DT_UINT32, DT_INT64, DT_UINT64}))
  384. .ATTR(axis, Int, 1)
  385. .ATTR(end_axis, Int, -1)
  386. .OP_END_FACTORY_REG(FlattenV2)
  387. REG_OP(DeConvTrans)
  388. .INPUT(x, TensorType({DT_INT8}))
  389. .OUTPUT(y, TensorType({DT_INT8}))
  390. .OP_END_FACTORY_REG(DeConvTrans)
  391. } // namespace ge
  392. #endif // GE_OP_TRANSFORMATION_OPS_H

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示