You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ge_onnx.proto 22 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563
  1. // Copyright (c) ONNX Project Contributors.
  2. // Licensed under the MIT license.
  3. syntax = "proto3";
  4. package ge.onnx;
  5. // Overview
  6. //
  7. // ONNX is an open specification that is comprised of the following components:
  8. //
  9. // 1) A definition of an extensible computation graph model.
  10. // 2) Definitions of standard data types.
  11. // 3) Definitions of built-in operators.
  12. //
  13. // This document describes the syntax of models and their computation graphs,
  14. // as well as the standard data types. Together, they are referred to as the ONNX
  15. // Intermediate Representation, or 'IR' for short.
  16. //
  17. // The normative semantic specification of the ONNX IR is found in docs/IR.md.
  18. // Definitions of the built-in neural network operators may be found in docs/Operators.md.
  19. // Notes
  20. //
  21. // Release
  22. //
  23. // We are still in the very early stage of defining ONNX. The current
  24. // version of ONNX is a starting point. While we are actively working
  25. // towards a complete spec, we would like to get the community involved
  26. // by sharing our working version of ONNX.
  27. //
  28. // Protobuf compatibility
  29. //
  30. // To simplify framework compatibility, ONNX is defined using the subset of protobuf
  31. // that is compatible with both protobuf v2 and v3. This means that we do not use any
  32. // protobuf features that are only available in one of the two versions.
  33. //
  34. // Here are the most notable contortions we have to carry out to work around
  35. // these limitations:
  36. //
  37. // - No 'map' (added protobuf 3.0). We instead represent mappings as lists
  38. // of key-value pairs, where order does not matter and duplicates
  39. // are not allowed.
  40. // Versioning
  41. //
  42. // ONNX versioning is specified in docs/IR.md and elaborated on in docs/Versioning.md
  43. //
  44. // To be compatible with both proto2 and proto3, we will use a version number
  45. // that is not defined by the default value but an explicit enum number.
  46. enum Version {
  47. // proto3 requires the first enum value to be zero.
  48. // We add this just to appease the compiler.
  49. _START_VERSION = 0;
  50. // The version field is always serialized and we will use it to store the
  51. // version that the graph is generated from. This helps us set up version
  52. // control.
  53. // For the IR, we are using simple numbers starting with with 0x00000001,
  54. // which was the version we published on Oct 10, 2017.
  55. IR_VERSION_2017_10_10 = 0x0000000000000001;
  56. // IR_VERSION 2 published on Oct 30, 2017
  57. // - Added type discriminator to AttributeProto to support proto3 users
  58. IR_VERSION_2017_10_30 = 0x0000000000000002;
  59. // IR VERSION 3 published on Nov 3, 2017
  60. // - For operator versioning:
  61. // - Added new message OperatorSetIdProto
  62. // - Added opset_import in ModelProto
  63. // - For vendor extensions, added domain in NodeProto
  64. IR_VERSION_2017_11_3 = 0x0000000000000003;
  65. // IR VERSION 4 published on Jan 22, 2019
  66. // - Relax constraint that initializers should be a subset of graph inputs
  67. // - Add type BFLOAT16
  68. IR_VERSION_2019_1_22 = 0x0000000000000004;
  69. // IR VERSION 5 published on March 18, 2019
  70. // - Add message TensorAnnotation.
  71. // - Add quantization annotation in GraphProto to map tensor with its scale and zero point quantization parameters.
  72. IR_VERSION_2019_3_18 = 0x0000000000000005;
  73. // IR VERSION 6 published on Sep 19, 2019
  74. // - Add support for sparse tensor constants stored in model.
  75. // - Add message SparseTensorProto
  76. // - Add sparse initializers
  77. IR_VERSION = 0x0000000000000006;
  78. }
  79. // Attributes
  80. //
  81. // A named attribute containing either singular float, integer, string, graph,
  82. // and tensor values, or repeated float, integer, string, graph, and tensor values.
  83. // An AttributeProto MUST contain the name field, and *only one* of the
  84. // following content fields, effectively enforcing a C/C++ union equivalent.
  85. message AttributeProto {
  86. // Note: this enum is structurally identical to the OpSchema::AttrType
  87. // enum defined in schema.h. If you rev one, you likely need to rev the other.
  88. enum AttributeType {
  89. UNDEFINED = 0;
  90. FLOAT = 1;
  91. INT = 2;
  92. STRING = 3;
  93. TENSOR = 4;
  94. GRAPH = 5;
  95. SPARSE_TENSOR = 11;
  96. FLOATS = 6;
  97. INTS = 7;
  98. STRINGS = 8;
  99. TENSORS = 9;
  100. GRAPHS = 10;
  101. SPARSE_TENSORS = 12;
  102. }
  103. // The name field MUST be present for this version of the IR.
  104. string name = 1; // namespace Attribute
  105. // if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function.
  106. // In this case, this AttributeProto does not contain data, and it's a reference of attribute
  107. // in parent scope.
  108. // NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph.
  109. string ref_attr_name = 21;
  110. // A human-readable documentation for this attribute. Markdown is allowed.
  111. string doc_string = 13;
  112. // The type field MUST be present for this version of the IR.
  113. // For 0.0.1 versions of the IR, this field was not defined, and
  114. // implementations needed to use has_field hueristics to determine
  115. // which value field was in use. For IR_VERSION 0.0.2 or later, this
  116. // field MUST be set and match the f|i|s|t|... field in use. This
  117. // change was made to accomodate proto3 implementations.
  118. AttributeType type = 20; // discriminator that indicates which field below is in use
  119. // Exactly ONE of the following fields must be present for this version of the IR
  120. float f = 2; // float
  121. int64 i = 3; // int
  122. bytes s = 4; // UTF-8 string
  123. TensorProto t = 5; // tensor value
  124. GraphProto g = 6; // graph
  125. SparseTensorProto sparse_tensor = 22; // sparse tensor value
  126. // Do not use field below, it's deprecated.
  127. // optional ValueProto v = 12; // value - subsumes everything but graph
  128. repeated float floats = 7; // list of floats
  129. repeated int64 ints = 8; // list of ints
  130. repeated bytes strings = 9; // list of UTF-8 strings
  131. repeated TensorProto tensors = 10; // list of tensors
  132. repeated GraphProto graphs = 11; // list of graph
  133. repeated SparseTensorProto sparse_tensors = 23; // list of sparse tensors
  134. }
  135. // Defines information on value, including the name, the type, and
  136. // the shape of the value.
  137. message ValueInfoProto {
  138. // This field MUST be present in this version of the IR.
  139. string name = 1; // namespace Value
  140. // This field MUST be present in this version of the IR for
  141. // inputs and outputs of the top-level graph.
  142. TypeProto type = 2;
  143. // A human-readable documentation for this value. Markdown is allowed.
  144. string doc_string = 3;
  145. }
  146. // Nodes
  147. //
  148. // Computation graphs are made up of a DAG of nodes, which represent what is
  149. // commonly called a "layer" or "pipeline stage" in machine learning frameworks.
  150. //
  151. // For example, it can be a node of type "Conv" that takes in an image, a filter
  152. // tensor and a bias tensor, and produces the convolved output.
  153. message NodeProto {
  154. repeated string input = 1; // namespace Value
  155. repeated string output = 2; // namespace Value
  156. // An optional identifier for this node in a graph.
  157. // This field MAY be absent in ths version of the IR.
  158. string name = 3; // namespace Node
  159. // The symbolic identifier of the Operator to execute.
  160. string op_type = 4; // namespace Operator
  161. // The domain of the OperatorSet that specifies the operator named by op_type.
  162. string domain = 7; // namespace Domain
  163. // Additional named attributes.
  164. repeated AttributeProto attribute = 5;
  165. // A human-readable documentation for this node. Markdown is allowed.
  166. string doc_string = 6;
  167. }
  168. // Models
  169. //
  170. // ModelProto is a top-level file/container format for bundling a ML model and
  171. // associating its computation graph with metadata.
  172. //
  173. // The semantics of the model are described by the associated GraphProto.
  174. message ModelProto {
  175. // The version of the IR this model targets. See Version enum above.
  176. // This field MUST be present.
  177. int64 ir_version = 1;
  178. // The OperatorSets this model relies on.
  179. // All ModelProtos MUST have at least one entry that
  180. // specifies which version of the ONNX OperatorSet is
  181. // being imported.
  182. //
  183. // All nodes in the ModelProto's graph will bind against the operator
  184. // with the same-domain/same-op_type operator with the HIGHEST version
  185. // in the referenced operator sets.
  186. repeated OperatorSetIdProto opset_import = 8;
  187. // The name of the framework or tool used to generate this model.
  188. // This field SHOULD be present to indicate which implementation/tool/framework
  189. // emitted the model.
  190. string producer_name = 2;
  191. // The version of the framework or tool used to generate this model.
  192. // This field SHOULD be present to indicate which implementation/tool/framework
  193. // emitted the model.
  194. string producer_version = 3;
  195. // Domain name of the model.
  196. // We use reverse domain names as name space indicators. For example:
  197. // `com.facebook.fair` or `com.microsoft.cognitiveservices`
  198. //
  199. // Together with `model_version` and GraphProto.name, this forms the unique identity of
  200. // the graph.
  201. string domain = 4;
  202. // The version of the graph encoded. See Version enum below.
  203. int64 model_version = 5;
  204. // A human-readable documentation for this model. Markdown is allowed.
  205. string doc_string = 6;
  206. // The parameterized graph that is evaluated to execute the model.
  207. GraphProto graph = 7;
  208. // Named metadata values; keys should be distinct.
  209. repeated StringStringEntryProto metadata_props = 14;
  210. };
  211. // StringStringEntryProto follows the pattern for cross-proto-version maps.
  212. // See https://developers.google.com/protocol-buffers/docs/proto3#maps
  213. message StringStringEntryProto {
  214. string key = 1;
  215. string value= 2;
  216. };
  217. message TensorAnnotation {
  218. string tensor_name = 1;
  219. // <key, value> pairs to annotate tensor specified by <tensor_name> above.
  220. // The keys used in the mapping below must be pre-defined in ONNX spec.
  221. // For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
  222. // quantization parameter keys.
  223. repeated StringStringEntryProto quant_parameter_tensor_names = 2;
  224. }
  225. // Graphs
  226. //
  227. // A graph defines the computational logic of a model and is comprised of a parameterized
  228. // list of nodes that form a directed acyclic graph based on their inputs and outputs.
  229. // This is the equivalent of the "network" or "graph" in many deep learning
  230. // frameworks.
  231. message GraphProto {
  232. // The nodes in the graph, sorted topologically.
  233. repeated NodeProto node = 1;
  234. // The name of the graph.
  235. string name = 2; // namespace Graph
  236. // A list of named tensor values, used to specify constant inputs of the graph.
  237. // Each TensorProto entry must have a distinct name (within the list) that
  238. // MAY also appear in the input list.
  239. repeated TensorProto initializer = 5;
  240. // Initializers (see above) stored in sparse format.
  241. repeated SparseTensorProto sparse_initializer = 15;
  242. // A human-readable documentation for this graph. Markdown is allowed.
  243. string doc_string = 10;
  244. // The inputs and outputs of the graph.
  245. repeated ValueInfoProto input = 11;
  246. repeated ValueInfoProto output = 12;
  247. // Information for the values in the graph. The ValueInfoProto.name's
  248. // must be distinct. It is optional for a value to appear in value_info list.
  249. repeated ValueInfoProto value_info = 13;
  250. // This field carries information to indicate the mapping among a tensor and its
  251. // quantization parameter tensors. For example:
  252. // For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
  253. // which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
  254. repeated TensorAnnotation quantization_annotation = 14;
  255. // DO NOT USE the following fields, they were deprecated from earlier versions.
  256. // repeated string input = 3;
  257. // repeated string output = 4;
  258. // optional int64 ir_version = 6;
  259. // optional int64 producer_version = 7;
  260. // optional string producer_tag = 8;
  261. // optional string domain = 9;
  262. }
  263. // Tensors
  264. //
  265. // A serialized tensor value.
  266. message TensorProto {
  267. enum DataType {
  268. UNDEFINED = 0;
  269. // Basic types.
  270. FLOAT = 1; // float
  271. UINT8 = 2; // uint8_t
  272. INT8 = 3; // int8_t
  273. UINT16 = 4; // uint16_t
  274. INT16 = 5; // int16_t
  275. INT32 = 6; // int32_t
  276. INT64 = 7; // int64_t
  277. STRING = 8; // string
  278. BOOL = 9; // bool
  279. // IEEE754 half-precision floating-point format (16 bits wide).
  280. // This format has 1 sign bit, 5 exponent bits, and 10 mantissa bits.
  281. FLOAT16 = 10;
  282. DOUBLE = 11;
  283. UINT32 = 12;
  284. UINT64 = 13;
  285. COMPLEX64 = 14; // complex with float32 real and imaginary components
  286. COMPLEX128 = 15; // complex with float64 real and imaginary components
  287. // Non-IEEE floating-point format based on IEEE754 single-precision
  288. // floating-point number truncated to 16 bits.
  289. // This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits.
  290. BFLOAT16 = 16;
  291. // Future extensions go here.
  292. }
  293. // The shape of the tensor.
  294. repeated int64 dims = 1;
  295. // The data type of the tensor.
  296. // This field MUST have a valid TensorProto.DataType value
  297. int32 data_type = 2;
  298. // For very large tensors, we may want to store them in chunks, in which
  299. // case the following fields will specify the segment that is stored in
  300. // the current TensorProto.
  301. message Segment {
  302. int64 begin = 1;
  303. int64 end = 2;
  304. }
  305. Segment segment = 3;
  306. // Tensor content must be organized in row-major order.
  307. //
  308. // Depending on the data_type field, exactly one of the fields below with
  309. // name ending in _data is used to store the elements of the tensor.
  310. // For float and complex64 values
  311. // Complex64 tensors are encoded as a single array of floats,
  312. // with the real components appearing in odd numbered positions,
  313. // and the corresponding imaginary component apparing in the
  314. // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
  315. // is encoded as [1.0, 2.0 ,3.0 ,4.0]
  316. // When this field is present, the data_type field MUST be FLOAT or COMPLEX64.
  317. repeated float float_data = 4 [packed = true];
  318. // For int32, uint8, int8, uint16, int16, bool, and float16 values
  319. // float16 values must be bit-wise converted to an uint16_t prior
  320. // to writing to the buffer.
  321. // When this field is present, the data_type field MUST be
  322. // INT32, INT16, INT8, UINT16, UINT8, BOOL, or FLOAT16
  323. repeated int32 int32_data = 5 [packed = true];
  324. // For strings.
  325. // Each element of string_data is a UTF-8 encoded Unicode
  326. // string. No trailing null, no leading BOM. The protobuf "string"
  327. // scalar type is not used to match ML community conventions.
  328. // When this field is present, the data_type field MUST be STRING
  329. repeated bytes string_data = 6;
  330. // For int64.
  331. // When this field is present, the data_type field MUST be INT64
  332. repeated int64 int64_data = 7 [packed = true];
  333. // Optionally, a name for the tensor.
  334. string name = 8; // namespace Value
  335. // A human-readable documentation for this tensor. Markdown is allowed.
  336. string doc_string = 12;
  337. // Serializations can either use one of the fields above, or use this
  338. // raw bytes field. The only exception is the string case, where one is
  339. // required to store the content in the repeated bytes string_data field.
  340. //
  341. // When this raw_data field is used to store tensor value, elements MUST
  342. // be stored in as fixed-width, little-endian order.
  343. // Floating-point data types MUST be stored in IEEE 754 format.
  344. // Complex64 elements must be written as two consecutive FLOAT values, real component first.
  345. // Complex128 elements must be written as two consecutive DOUBLE values, real component first.
  346. // Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false).
  347. //
  348. // Note: the advantage of specific field rather than the raw_data field is
  349. // that in some cases (e.g. int data), protobuf does a better packing via
  350. // variable length storage, and may lead to smaller binary footprint.
  351. // When this field is present, the data_type field MUST NOT be STRING or UNDEFINED
  352. bytes raw_data = 9;
  353. // Data can be stored inside the protobuf file using type-specific fields or raw_data.
  354. // Alternatively, raw bytes data can be stored in an external file, using the external_data field.
  355. // external_data stores key-value pairs describing data location. Recognized keys are:
  356. // - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
  357. // protobuf model was stored
  358. // - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
  359. // Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
  360. // - "length" (optional) - number of bytes containing data. Integer stored as string.
  361. // - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
  362. repeated StringStringEntryProto external_data = 13;
  363. // Location of the data for this tensor. MUST be one of:
  364. // - DEFAULT - data stored inside the protobuf message. Data is stored in raw_data (if set) otherwise in type-specified field.
  365. // - EXTERNAL - data stored in an external location as described by external_data field.
  366. enum DataLocation {
  367. DEFAULT = 0;
  368. EXTERNAL = 1;
  369. }
  370. // If value not set, data is stored in raw_data (if set) otherwise in type-specified field.
  371. DataLocation data_location = 14;
  372. // For double
  373. // Complex128 tensors are encoded as a single array of doubles,
  374. // with the real components appearing in odd numbered positions,
  375. // and the corresponding imaginary component apparing in the
  376. // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
  377. // is encoded as [1.0, 2.0 ,3.0 ,4.0]
  378. // When this field is present, the data_type field MUST be DOUBLE or COMPLEX128
  379. repeated double double_data = 10 [packed = true];
  380. // For uint64 and uint32 values
  381. // When this field is present, the data_type field MUST be
  382. // UINT32 or UINT64
  383. repeated uint64 uint64_data = 11 [packed = true];
  384. }
  385. // A serialized sparse-tensor value
  386. message SparseTensorProto {
  387. // The sequence of non-default values are encoded as a tensor of shape [NNZ].
  388. // The default-value is zero for numeric tensors, and empty-string for string tensors.
  389. TensorProto values = 1;
  390. // The indices of the non-default values, which may be stored in one of two formats.
  391. // (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value
  392. // corresponding to the j-th index of the i-th value (in the values tensor).
  393. // (b) Indices can be a tensor of shape [NNZ], in which case the i-th value
  394. // must be the linearized-index of the i-th value (in the values tensor).
  395. // The linearized-index can be converted into an index tuple (k_1,...,k_rank)
  396. // using the shape provided below.
  397. // The indices must appear in ascending order without duplication.
  398. // In the first format, the ordering is lexicographic-ordering:
  399. // e.g., index-value [1,4] must appear before [2,1]
  400. TensorProto indices = 2;
  401. // The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank]
  402. repeated int64 dims = 3;
  403. }
  404. // Defines a tensor shape. A dimension can be either an integer value
  405. // or a symbolic variable. A symbolic variable represents an unknown
  406. // dimension.
  407. message TensorShapeProto {
  408. message Dimension {
  409. oneof value {
  410. int64 dim_value = 1;
  411. string dim_param = 2; // namespace Shape
  412. };
  413. // Standard denotation can optionally be used to denote tensor
  414. // dimensions with standard semantic descriptions to ensure
  415. // that operations are applied to the correct axis of a tensor.
  416. // Refer to https://github.com/onnx/onnx/blob/master/docs/DimensionDenotation.md#denotation-definition
  417. // for pre-defined dimension denotations.
  418. string denotation = 3;
  419. };
  420. repeated Dimension dim = 1;
  421. }
  422. // Types
  423. //
  424. // The standard ONNX data types.
  425. message TypeProto {
  426. message Tensor {
  427. // This field MUST NOT have the value of UNDEFINED
  428. // This field MUST have a valid TensorProto.DataType value
  429. // This field MUST be present for this version of the IR.
  430. int32 elem_type = 1;
  431. TensorShapeProto shape = 2;
  432. }
  433. // repeated T
  434. message Sequence {
  435. // The type and optional shape of each element of the sequence.
  436. // This field MUST be present for this version of the IR.
  437. TypeProto elem_type = 1;
  438. };
  439. // map<K,V>
  440. message Map {
  441. // This field MUST have a valid TensorProto.DataType value
  442. // This field MUST be present for this version of the IR.
  443. // This field MUST refer to an integral type ([U]INT{8|16|32|64}) or STRING
  444. int32 key_type = 1;
  445. // This field MUST be present for this version of the IR.
  446. TypeProto value_type = 2;
  447. };
  448. oneof value {
  449. // The type of a tensor.
  450. Tensor tensor_type = 1;
  451. // NOTE: DNN-only implementations of ONNX MAY elect to not support non-tensor values
  452. // as input and output to graphs and nodes. These types are needed to naturally
  453. // support classical ML operators. DNN operators SHOULD restrict their input
  454. // and output types to tensors.
  455. // The type of a sequence.
  456. Sequence sequence_type = 4;
  457. // The type of a map.
  458. Map map_type = 5;
  459. }
  460. // An optional denotation can be used to denote the whole
  461. // type with a standard semantic description as to what is
  462. // stored inside. Refer to https://github.com/onnx/onnx/blob/master/docs/TypeDenotation.md#type-denotation-definition
  463. // for pre-defined type denotations.
  464. string denotation = 6;
  465. }
  466. // Operator Sets
  467. //
  468. // OperatorSets are uniquely identified by a (domain, opset_version) pair.
  469. message OperatorSetIdProto {
  470. // The domain of the operator set being identified.
  471. // The empty string ("") or absence of this field implies the operator
  472. // set that is defined as part of the ONNX specification.
  473. // This field MUST be present in this version of the IR when referring to any other operator set.
  474. string domain = 1;
  475. // The version of the operator set being identified.
  476. // This field MUST be present in this version of the IR.
  477. int64 version = 2;
  478. }

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示