You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ops_stub.h 16 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef MAIN_OPS_STUB_H
  17. #define MAIN_OPS_STUB_H
  18. #include "external/graph/operator_reg.h"
  19. #include "register/op_registry.h"
  20. #include "graph/utils/op_desc_utils.h"
  21. namespace ge {
  22. // for ir
  23. REG_OP(Data)
  24. .INPUT(x, TensorType::ALL())
  25. .OUTPUT(y, TensorType::ALL())
  26. .ATTR(index, Int, 0)
  27. .OP_END_FACTORY_REG(Data)
  28. REG_OP(Variable)
  29. .INPUT(x, TensorType::ALL())
  30. .OUTPUT(y, TensorType::ALL())
  31. .ATTR(index, Int, 0)
  32. .ATTR(value, Tensor, Tensor())
  33. .OP_END_FACTORY_REG(Variable)
  34. REG_OP(Const)
  35. .OUTPUT(y, TensorType::ALL())
  36. .ATTR(value, Tensor, Tensor())
  37. .ATTR(dtype, Int, 0)
  38. .OP_END_FACTORY_REG(Const)
  39. REG_OP(Assign)
  40. .INPUT(resource, TensorType::ALL())
  41. .INPUT(value, TensorType::ALL())
  42. .OUTPUT(y, TensorType::ALL())
  43. .OP_END_FACTORY_REG(Assign) REG_OP(Sqrt)
  44. .INPUT(x, TensorType{(DT_FLOAT.DT_FLOAT16)})
  45. .OUTPUT(y, TensorType{(DT_FLOAT, DT_FLOAT16)})
  46. .ATTR(T, Int, 1)
  47. .ATTR(alpha, Float, 1.0)
  48. .ATTR(beta, Float, 0.0)
  49. .OP_END_FACTORY_REG(Sqrt)
  50. REG_OP(Conv2D)
  51. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8}))
  52. .INPUT(filter, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8}))
  53. .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  54. .OPTIONAL_INPUT(offset_w, TensorType({DT_INT8}))
  55. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  56. .REQUIRED_ATTR(strides, ListInt)
  57. .REQUIRED_ATTR(pads, ListInt)
  58. .ATTR(dilations, ListInt, {1, 1, 1, 1})
  59. .ATTR(groups, Int, 1)
  60. .ATTR(data_format, String, "NHWC")
  61. .ATTR(offset_x, Int, 0)
  62. .OP_END_FACTORY_REG(Conv2D)
  63. REG_OP(If)
  64. .INPUT(cond, TensorType::ALL())
  65. .DYNAMIC_INPUT(input, TensorType::ALL())
  66. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  67. .GRAPH(then_branch)
  68. .GRAPH(else_branch)
  69. .OP_END_FACTORY_REG(If)
  70. REG_OP(Add)
  71. .INPUT(x1, TensorType({DT_FLOAT, DT_INT32, DT_INT64, DT_FLOAT16, DT_INT16,
  72. DT_INT8, DT_UINT8, DT_DOUBLE, DT_COMPLEX128,
  73. DT_COMPLEX64, DT_STRING}))
  74. .INPUT(x2, TensorType({DT_FLOAT, DT_INT32, DT_INT64, DT_FLOAT16, DT_INT16,
  75. DT_INT8, DT_UINT8, DT_DOUBLE, DT_COMPLEX128,
  76. DT_COMPLEX64, DT_STRING}))
  77. .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT64, DT_FLOAT16, DT_INT16,
  78. DT_INT8, DT_UINT8, DT_DOUBLE, DT_COMPLEX128,
  79. DT_COMPLEX64, DT_STRING}))
  80. .OP_END_FACTORY_REG(Add)
  81. REG_OP(Identity)
  82. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  83. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  84. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  85. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  86. .OP_END_FACTORY_REG(Identity)
  87. REG_OP(Abs)
  88. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64}))
  89. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64}))
  90. .OP_END_FACTORY_REG(Abs)
  91. REG_OP(PartitionedCall)
  92. .DYNAMIC_INPUT(args, TensorType::ALL())
  93. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  94. .GRAPH(f)
  95. .ATTR(config, String, "")
  96. .ATTR(config_proto, String, "")
  97. .ATTR(executor_type, String, "")
  98. .OP_END_FACTORY_REG(PartitionedCall)
  99. REG_OP(TensorArray)
  100. .INPUT(size, TensorType({DT_INT32}))
  101. .OUTPUT(handle, TensorType({DT_RESOURCE}))
  102. .OUTPUT(flow, TensorType({DT_FLOAT}))
  103. .REQUIRED_ATTR(dtype, Type)
  104. .ATTR(element_shape, ListInt, ge::UNKNOWN_RANK)
  105. .ATTR(dynamic_size, Bool, false)
  106. .ATTR(clear_after_read, Bool, true)
  107. .ATTR(identical_element_shapes, Bool, false)
  108. .ATTR(tensor_array_name, String, "")
  109. .OP_END_FACTORY_REG(TensorArray)
  110. REG_OP(TensorArrayWrite)
  111. .INPUT(handle, TensorType({DT_RESOURCE}))
  112. .INPUT(index, TensorType({DT_INT32}))
  113. .INPUT(value, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_INT8,
  114. DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL,
  115. DT_STRING, DT_COMPLEX64, DT_COMPLEX128}))
  116. .INPUT(flow_in, TensorType({DT_FLOAT}))
  117. .OUTPUT(flow_out, TensorType({DT_FLOAT}))
  118. .OP_END_FACTORY_REG(TensorArrayWrite)
  119. REG_OP(AvgPool3DGrad)
  120. .INPUT(orig_input_shape, TensorType({DT_INT32}))
  121. .INPUT(grads, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))
  122. .OUTPUT(output, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))
  123. .REQUIRED_ATTR(ksize, ListInt)
  124. .REQUIRED_ATTR(strides, ListInt)
  125. .REQUIRED_ATTR(pads, ListInt)
  126. .ATTR(ceil_mode, Bool, false)
  127. .ATTR(count_include_pad, Bool, true)
  128. .ATTR(divisor_override, Int, 0)
  129. .ATTR(data_format, String, "NDHWC")
  130. .OP_END_FACTORY_REG(AvgPool3DGrad)
  131. REG_OP(Merge)
  132. .DYNAMIC_INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  133. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  134. DT_UINT64, DT_BOOL}))
  135. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  136. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  137. DT_UINT64, DT_BOOL}))
  138. .OUTPUT(value_index, TensorType({DT_INT32}))
  139. .OP_END_FACTORY_REG(Merge)
  140. REG_OP(NoOp)
  141. .OP_END_FACTORY_REG(NoOp)
  142. REG_OP(VarIsInitializedOp)
  143. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  144. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  145. .OUTPUT(y, TensorType({DT_BOOL}))
  146. .OP_END_FACTORY_REG(VarIsInitializedOp)
  147. REG_OP(AssignVariableOp)
  148. .INPUT(resource, TensorType({DT_RESOURCE}))
  149. .INPUT(value, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  150. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE}))
  151. .REQUIRED_ATTR(dtype, Type)
  152. .OP_END_FACTORY_REG(AssignVariableOp)
  153. REG_OP(ReadVariableOp)
  154. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  155. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  156. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  157. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  158. .ATTR(dtype, Int, DT_INT32)
  159. .OP_END_FACTORY_REG(ReadVariableOp)
  160. REG_OP(Reshape)
  161. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8, DT_INT32,
  162. DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  163. .INPUT(shape, TensorType({DT_INT32, DT_INT64}))
  164. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8, DT_INT32,
  165. DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  166. .ATTR(axis, Int, 0)
  167. .ATTR(num_axes, Int, -1)
  168. .OP_END_FACTORY_REG(Reshape)
  169. REG_OP(VarHandleOp)
  170. .ATTR(container, String, "")
  171. .ATTR(shared_name, String, "")
  172. .REQUIRED_ATTR(dtype, Type)
  173. .ATTR(shape, ListInt, ge::UNKNOWN_SHAPE)
  174. .OUTPUT(y, TensorType({DT_RESOURCE}))
  175. .OP_END_FACTORY_REG(VarHandleOp)
  176. REG_OP(Squeeze)
  177. .INPUT(x, TensorType::ALL())
  178. .OUTPUT(y, TensorType::ALL())
  179. .ATTR(axis, ListInt, {})
  180. .OP_END_FACTORY_REG(Squeeze)
  181. REG_OP(Fill)
  182. .INPUT(dims, TensorType::IndexNumberType())
  183. .INPUT(value, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_UINT8, DT_INT16,
  184. DT_INT8, DT_COMPLEX64, DT_INT64, DT_BOOL, DT_QINT8,
  185. DT_QUINT8, DT_QINT32, DT_QINT16, DT_QUINT16, DT_UINT16,
  186. DT_COMPLEX128, DT_FLOAT16, DT_UINT32, DT_UINT64}))
  187. .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_UINT8, DT_INT16,
  188. DT_INT8, DT_COMPLEX64, DT_INT64, DT_BOOL, DT_QINT8,
  189. DT_QUINT8, DT_QINT32, DT_QINT16, DT_QUINT16, DT_UINT16,
  190. DT_COMPLEX128, DT_FLOAT16, DT_UINT32, DT_UINT64}))
  191. .OP_END_FACTORY_REG(Fill)
  192. REG_OP(ShapeN)
  193. .DYNAMIC_INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  194. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  195. .DYNAMIC_OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
  196. .ATTR(dtype, Int, DT_INT32)
  197. .OP_END_FACTORY_REG(ShapeN)
  198. REG_OP(Switch)
  199. .INPUT(data, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  200. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  201. DT_UINT64, DT_BOOL}))
  202. .INPUT(pred, TensorType({DT_BOOL}))
  203. .OUTPUT(output_false, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  204. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  205. DT_UINT64, DT_BOOL}))
  206. .OUTPUT(output_true, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  207. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  208. DT_UINT64, DT_BOOL}))
  209. .OP_END_FACTORY_REG(Switch)
  210. REG_OP(RefSwitch)
  211. .INPUT(data, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  212. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  213. DT_UINT64, DT_BOOL}))
  214. .INPUT(pred, TensorType({DT_BOOL}))
  215. .OUTPUT(output_false, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  216. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  217. DT_UINT64, DT_BOOL}))
  218. .OUTPUT(output_true, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  219. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  220. DT_UINT64, DT_BOOL}))
  221. .OP_END_FACTORY_REG(RefSwitch)
  222. REG_OP(Enter)
  223. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  224. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  225. DT_UINT64, DT_BOOL}))
  226. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  227. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  228. DT_UINT64, DT_BOOL}))
  229. .REQUIRED_ATTR(frame_name, String)
  230. .REQUIRED_ATTR(is_constant, Bool)
  231. .OP_END_FACTORY_REG(Enter)
  232. REG_OP(VariableV2)
  233. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  234. DT_UINT8, DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  235. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  236. DT_UINT8, DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  237. .ATTR(index, Int, 0)
  238. .ATTR(value, Tensor, Tensor())
  239. .ATTR(container, String, "")
  240. .ATTR(shared_name, String, "")
  241. .OP_END_FACTORY_REG(VariableV2)
  242. REG_OP(Constant)
  243. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16,
  244. DT_UINT8, DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  245. .ATTR(value, Tensor, Tensor())
  246. .OP_END_FACTORY_REG(Constant)
  247. REG_OP(Mul)
  248. .INPUT(x1, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_UINT8, DT_INT8,
  249. DI_UINT16, DT_INT16, DT_INT32, DT_INT64,
  250. DT_COMPLEX64, DT_COMPLEX128}))
  251. .INPUT(x2, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_UINT8, DT_INT8,
  252. DI_UINT16, DT_INT16, DT_INT32, DT_INT64,
  253. DT_COMPLEX64, DT_COMPLEX128}))
  254. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_UINT8, DT_INT8,
  255. DI_UINT16, DT_INT16, DT_INT32, DT_INT64,
  256. DT_COMPLEX64, DT_COMPLEX128}))
  257. .OP_END_FACTORY_REG(Mul)
  258. // for plugin
  259. static Status ParseParamsStub(const google::protobuf::Message* op_src, ge::Operator& op_dest) {
  260. return SUCCESS;
  261. }
  262. static Status ParseParamByOpFuncStub(const ge::Operator &op_src, ge::Operator& op_dest) {
  263. return SUCCESS;
  264. }
  265. static Status ParseSubgraphPostFnIfStub(const std::string& subgraph_name, const ge::Graph& graph) {
  266. domi::AutoMappingSubgraphIOIndexFunc auto_mapping_subgraph_index_func =
  267. domi::FrameworkRegistry::Instance().GetAutoMappingSubgraphIOIndexFunc(domi::ONNX);
  268. if (auto_mapping_subgraph_index_func == nullptr) {
  269. std::cout<<"auto mapping if subgraph func is nullptr!"<<std::endl;
  270. return FAILED;
  271. }
  272. return auto_mapping_subgraph_index_func(graph,
  273. [&](int data_index, int &parent_index) -> Status {
  274. parent_index = data_index + 1;
  275. return SUCCESS;
  276. },
  277. [&](int output_index, int &parent_index) -> Status {
  278. parent_index = output_index;
  279. return SUCCESS;
  280. });
  281. }
  282. static Status ParseParamsClipV9Stub(const Message* op_src, ge::Operator& op_dest) {
  283. auto opDesc = ge::OpDescUtils::GetOpDescFromOperator(op_dest);
  284. // 1.add dynamic input and out
  285. opDesc->AddDynamicInputDesc("x", 1);
  286. opDesc->AddDynamicOutputDesc("output", 1);
  287. // 2.set original_type
  288. ge::AttrUtils::SetStr(opDesc, "original_type", "ai.onnx::9::Clip");
  289. return SUCCESS;
  290. }
  291. static Status ParseOpToGraphClipV9Stub(const Operator& op, Graph& graph) {
  292. auto data0 = op::Data("data0").set_attr_index(0);
  293. auto abs0 = op::Abs("abs0").set_input_x(data0);
  294. std::vector<Operator> inputs{data0};
  295. std::vector<std::pair<Operator, std::vector<size_t> > > output_indexs;
  296. output_indexs.emplace_back(abs0, vector<std::size_t>{0});
  297. graph.SetInputs(inputs).SetOutputs(output_indexs);
  298. return SUCCESS;
  299. }
  300. // caffe plugin
  301. REGISTER_CUSTOM_OP("Data")
  302. .FrameworkType(domi::CAFFE)
  303. .OriginOpType("Input")
  304. .ParseParamsFn(ParseParamsStub);
  305. REGISTER_CUSTOM_OP("Abs")
  306. .FrameworkType(domi::CAFFE)
  307. .OriginOpType("AbsVal")
  308. .ParseParamsFn(ParseParamsStub);
  309. // onnx plugin
  310. REGISTER_CUSTOM_OP("Conv2D")
  311. .FrameworkType(domi::ONNX)
  312. .OriginOpType("ai.onnx::11::Conv")
  313. .ParseParamsFn(ParseParamsStub);
  314. REGISTER_CUSTOM_OP("If")
  315. .FrameworkType(domi::ONNX)
  316. .OriginOpType({"ai.onnx::9::If",
  317. "ai.onnx::10::If",
  318. "ai.onnx::11::If",
  319. "ai.onnx::12::If",
  320. "ai.onnx::13::If"})
  321. .ParseParamsFn(ParseParamsStub)
  322. .ParseParamsByOperatorFn(ParseParamByOpFuncStub)
  323. .ParseSubgraphPostFn(ParseSubgraphPostFnIfStub);
  324. REGISTER_CUSTOM_OP("Add")
  325. .FrameworkType(domi::ONNX)
  326. .OriginOpType("ai.onnx::11::Add")
  327. .ParseParamsFn(ParseParamsStub);
  328. REGISTER_CUSTOM_OP("Identity")
  329. .FrameworkType(domi::ONNX)
  330. .OriginOpType("ai.onnx::11::Identity")
  331. .ParseParamsFn(ParseParamsStub);
  332. // tf plugin
  333. REGISTER_CUSTOM_OP("Add")
  334. .FrameworkType(domi::TENSORFLOW)
  335. .OriginOpType("Add")
  336. .ParseParamsFn(ParseParamsStub);
  337. REGISTER_CUSTOM_OP("PartitionedCall")
  338. .FrameworkType(domi::ONNX)
  339. .OriginOpType({"ai.onnx::9::Clip"})
  340. .ParseParamsFn(ParseParamsClipV9Stub)
  341. .ParseOpToGraphFn(ParseOpToGraphClipV9Stub);
  342. REGISTER_CUSTOM_OP("TensorArray")
  343. .FrameworkType(domi::TENSORFLOW)
  344. .OriginOpType("TensorArrayV3")
  345. .ParseParamsFn(ParseParamsStub);
  346. REGISTER_CUSTOM_OP("TensorArrayWrite")
  347. .FrameworkType(domi::TENSORFLOW)
  348. .OriginOpType("TensorArrayWriteV3")
  349. .ParseParamsFn(ParseParamsStub);
  350. REGISTER_CUSTOM_OP("DynamicRNN")
  351. .FrameworkType(domi::TENSORFLOW)
  352. .OriginOpType("BlockLSTM")
  353. .ParseParamsFn(ParseParamsStub);
  354. REGISTER_CUSTOM_OP("Merge")
  355. .FrameworkType(domi::TENSORFLOW)
  356. .OriginOpType("HistogramSummary")
  357. .ParseParamsFn(ParseParamsStub);
  358. REGISTER_CUSTOM_OP("NoOp")
  359. .FrameworkType(domi::TENSORFLOW)
  360. .OriginOpType("NoOp")
  361. .ParseParamsFn(ParseParamsStub);
  362. REGISTER_CUSTOM_OP("Fill")
  363. .FrameworkType(domi::TENSORFLOW)
  364. .OriginOpType("Fill")
  365. .ParseParamsFn(ParseParamsStub);
  366. } // namespace ge
  367. #endif // MAIN_OPS_STUB_H