Browse Source

update ascend software package 16 Aug 21

tags/v1.5.1
yanghaoran 3 years ago
parent
commit
f2d2c36150
100 changed files with 979 additions and 635 deletions
  1. +9
    -0
      inc/external/acl/OWNERS
  2. +0
    -37
      inc/external/acl/acl_prof.h
  3. +0
    -3
      inc/external/acl/error_codes/rt_error_codes.h
  4. +7
    -1
      inc/framework/common/fmk_error_codes.h
  5. +1
    -1
      inc/framework/common/ge_compiler_options.h
  6. +1
    -1
      inc/framework/common/l2_cache_optimize.h
  7. +1
    -1
      inc/framework/common/profiling/ge_profiling.h
  8. +1
    -1
      inc/framework/common/profiling/ge_runner_profiling.h
  9. +7
    -5
      inc/framework/common/string_util.h
  10. +1
    -1
      inc/framework/common/taskdown_common.h
  11. +1
    -1
      inc/framework/memory/memory_api.h
  12. +1
    -1
      inc/framework/omg/ge_init.h
  13. +1
    -1
      inc/framework/omg/omg.h
  14. +8
    -4
      inc/framework/omg/parser/model_parser.h
  15. +1
    -1
      inc/framework/omg/parser/op_parser.h
  16. +3
    -3
      inc/framework/omg/parser/parser_api.h
  17. +1
    -1
      inc/framework/omg/parser/parser_factory.h
  18. +1
    -1
      inc/framework/omg/parser/weights_parser.h
  19. +1
    -1
      metadef
  20. +60
    -60
      third_party/fwkacllib/inc/aicpu/aicpu_schedule/aicpu_op_type_list.h
  21. +1
    -0
      third_party/fwkacllib/inc/cce/aicpu_engine.h
  22. +109
    -103
      third_party/fwkacllib/inc/external/runtime/rt_error_codes.h
  23. +83
    -83
      third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_typedef_win.h
  24. +1
    -1
      third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_win.h
  25. +1
    -1
      third_party/fwkacllib/inc/ops/aipp.h
  26. +1
    -1
      third_party/fwkacllib/inc/ops/all_ops.h
  27. +1
    -33
      third_party/fwkacllib/inc/ops/array_ops.h
  28. +1
    -1
      third_party/fwkacllib/inc/ops/audio_ops.h
  29. +1
    -1
      third_party/fwkacllib/inc/ops/avg_pool_1d_ops.h
  30. +1
    -1
      third_party/fwkacllib/inc/ops/batch_ops.h
  31. +1
    -1
      third_party/fwkacllib/inc/ops/bitwise_ops.h
  32. +1
    -1
      third_party/fwkacllib/inc/ops/boosted_trees_ops.h
  33. +1
    -1
      third_party/fwkacllib/inc/ops/candidate_sampling_ops.h
  34. +53
    -0
      third_party/fwkacllib/inc/ops/case_condition_ops.h
  35. +1
    -1
      third_party/fwkacllib/inc/ops/condtake_ops.h
  36. +1
    -1
      third_party/fwkacllib/inc/ops/control_flow_ops.h
  37. +48
    -0
      third_party/fwkacllib/inc/ops/coordinates_1d_to_2d_ops.h
  38. +1
    -1
      third_party/fwkacllib/inc/ops/correlation.h
  39. +2
    -2
      third_party/fwkacllib/inc/ops/ctc_ops.h
  40. +1
    -1
      third_party/fwkacllib/inc/ops/data_flow_ops.h
  41. +1
    -1
      third_party/fwkacllib/inc/ops/elewise_calculation_ops.h
  42. +1
    -1
      third_party/fwkacllib/inc/ops/functional_ops.h
  43. +1
    -1
      third_party/fwkacllib/inc/ops/get_data_ops.h
  44. +48
    -48
      third_party/fwkacllib/inc/ops/globalavgpool.h
  45. +1
    -1
      third_party/fwkacllib/inc/ops/hcom_ops.h
  46. +1
    -1
      third_party/fwkacllib/inc/ops/hvd_ops.h
  47. +1
    -1
      third_party/fwkacllib/inc/ops/image_ops.h
  48. +63
    -0
      third_party/fwkacllib/inc/ops/index_to_addr_ops.h
  49. +1
    -1
      third_party/fwkacllib/inc/ops/internal_ops.h
  50. +1
    -1
      third_party/fwkacllib/inc/ops/linalg_ops.h
  51. +1
    -1
      third_party/fwkacllib/inc/ops/list_ops.h
  52. +33
    -1
      third_party/fwkacllib/inc/ops/logging_ops.h
  53. +1
    -1
      third_party/fwkacllib/inc/ops/lookup_ops.h
  54. +1
    -1
      third_party/fwkacllib/inc/ops/math_ops.h
  55. +25
    -78
      third_party/fwkacllib/inc/ops/matrix_calculation_ops.h
  56. +1
    -1
      third_party/fwkacllib/inc/ops/nn_batch_norm_ops.h
  57. +47
    -46
      third_party/fwkacllib/inc/ops/nn_calculation_ops.h
  58. +6
    -3
      third_party/fwkacllib/inc/ops/nn_detect_ops.h
  59. +1
    -1
      third_party/fwkacllib/inc/ops/nn_norm_ops.h
  60. +1
    -1
      third_party/fwkacllib/inc/ops/nn_ops.h
  61. +27
    -1
      third_party/fwkacllib/inc/ops/nn_pooling_ops.h
  62. +1
    -1
      third_party/fwkacllib/inc/ops/nn_training_ops.h
  63. +1
    -1
      third_party/fwkacllib/inc/ops/no_op.h
  64. +17
    -1
      third_party/fwkacllib/inc/ops/nonlinear_fuc_ops.h
  65. +1
    -1
      third_party/fwkacllib/inc/ops/npu_loss_scale_ops.h
  66. +1
    -1
      third_party/fwkacllib/inc/ops/outfeed_ops.h
  67. +1
    -1
      third_party/fwkacllib/inc/ops/pad_ops.h
  68. +1
    -1
      third_party/fwkacllib/inc/ops/parsing_ops.h
  69. +4
    -3
      third_party/fwkacllib/inc/ops/quantize_ops.h
  70. +1
    -1
      third_party/fwkacllib/inc/ops/ragged_array_ops.h
  71. +1
    -1
      third_party/fwkacllib/inc/ops/ragged_conversion_ops.h
  72. +1
    -1
      third_party/fwkacllib/inc/ops/ragged_math_ops.h
  73. +1
    -1
      third_party/fwkacllib/inc/ops/random_ops.h
  74. +1
    -1
      third_party/fwkacllib/inc/ops/reduce_ops.h
  75. +1
    -1
      third_party/fwkacllib/inc/ops/resource_variable_ops.h
  76. +3
    -3
      third_party/fwkacllib/inc/ops/rnn.h
  77. +1
    -1
      third_party/fwkacllib/inc/ops/rpn_ops.h
  78. +1
    -1
      third_party/fwkacllib/inc/ops/save_ops.h
  79. +1
    -1
      third_party/fwkacllib/inc/ops/sdca_ops.h
  80. +57
    -19
      third_party/fwkacllib/inc/ops/selection_ops.h
  81. +1
    -1
      third_party/fwkacllib/inc/ops/set_ops.h
  82. +50
    -0
      third_party/fwkacllib/inc/ops/slice_write_ops.h
  83. +1
    -1
      third_party/fwkacllib/inc/ops/sparse_ops.h
  84. +1
    -1
      third_party/fwkacllib/inc/ops/spectral_ops.h
  85. +1
    -1
      third_party/fwkacllib/inc/ops/split_combination_ops.h
  86. +1
    -1
      third_party/fwkacllib/inc/ops/state_ops.h
  87. +1
    -1
      third_party/fwkacllib/inc/ops/stateful_random_ops.h
  88. +1
    -1
      third_party/fwkacllib/inc/ops/stateless_random_ops.h
  89. +1
    -1
      third_party/fwkacllib/inc/ops/string_ops.h
  90. +1
    -1
      third_party/fwkacllib/inc/ops/swap_co_ops.h
  91. +1
    -1
      third_party/fwkacllib/inc/ops/target_crop_and_resize.h
  92. +27
    -1
      third_party/fwkacllib/inc/ops/transformation_ops.h
  93. +1
    -1
      third_party/fwkacllib/inc/ops/warp_perspective_ops.h
  94. +1
    -1
      third_party/fwkacllib/inc/opt_info/opt_info.h
  95. +90
    -6
      third_party/fwkacllib/inc/runtime/base.h
  96. +5
    -5
      third_party/fwkacllib/inc/runtime/config.h
  97. +5
    -5
      third_party/fwkacllib/inc/runtime/context.h
  98. +5
    -5
      third_party/fwkacllib/inc/runtime/dev.h
  99. +5
    -5
      third_party/fwkacllib/inc/runtime/dvfsprofile.h
  100. +5
    -5
      third_party/fwkacllib/inc/runtime/event.h

+ 9
- 0
inc/external/acl/OWNERS View File

@@ -0,0 +1,9 @@
approvers:
- ji_chen
- wqtshg
- zhangfan_hq
reviewers:
- justin_zhao
- zhangyongfeng88
options:
no_parent_owners: true

+ 0
- 37
inc/external/acl/acl_prof.h View File

@@ -40,20 +40,13 @@ typedef enum {
ACL_AICORE_MEMORY_BANDWIDTH = 2,
ACL_AICORE_L0B_AND_WIDTH = 3,
ACL_AICORE_RESOURCE_CONFLICT_RATIO = 4,
ACL_AICORE_MEMORY_UB = 5,
ACL_AICORE_NONE = 0xFF
} aclprofAicoreMetrics;

typedef enum {
ACL_STEP_START = 0, // step start
ACL_STEP_END = 1 // step end
} aclprofStepTag;

typedef struct aclprofConfig aclprofConfig;
typedef struct aclprofStopConfig aclprofStopConfig;
typedef struct aclprofAicoreEvents aclprofAicoreEvents;
typedef struct aclprofSubscribeConfig aclprofSubscribeConfig;
typedef struct aclprofStepInfo aclprofStepInfo;

/**
* @ingroup AscendCL
@@ -329,36 +322,6 @@ ACL_FUNC_VISIBILITY uint64_t aclprofGetOpDuration(const void *opInfo, size_t opI
*/
ACL_FUNC_VISIBILITY size_t aclprofGetModelId(const void *opInfo, size_t opInfoLen, uint32_t index);

/**
* @ingroup AscendCL
* @brief
*
* @param stepInfo [IN] pointer to stepInfo data
* @param aclprofstepTag [IN] start or end flag
* @param stream [IN] steam info
*
* @retval 0 for failed
*/
ACL_FUNC_VISIBILITY aclError aclprofGetStepTimestamp(aclprofStepInfo *stepInfo, aclprofStepTag tag, aclrtStream stream);

/**
* @ingroup AscendCL
* @brief create pointer to aclprofStepInfo data
*
*
* @retval aclprofStepInfo pointer
*/
ACL_FUNC_VISIBILITY aclprofStepInfo *aclprofCreateStepInfo();

/**
* @ingroup AscendCL
* @brief destroy aclprofStepInfo pointer
*
*
* @retval void
*/
ACL_FUNC_VISIBILITY void aclprofDestroyStepInfo(aclprofStepInfo *stepinfo);

#ifdef __cplusplus
}
#endif


+ 0
- 3
inc/external/acl/error_codes/rt_error_codes.h View File

@@ -44,7 +44,6 @@ static const int32_t ACL_ERROR_RT_STREAM_NO_CB_REG = 107015; // callbac
static const int32_t ACL_ERROR_RT_INVALID_MEMORY_TYPE = 107016; // invalid memory type
static const int32_t ACL_ERROR_RT_INVALID_HANDLE = 107017; // invalid handle
static const int32_t ACL_ERROR_RT_INVALID_MALLOC_TYPE = 107018; // invalid malloc type
static const int32_t ACL_ERROR_RT_WAIT_TIMEOUT = 107019; // wait timeout

static const int32_t ACL_ERROR_RT_FEATURE_NOT_SUPPORT = 207000; // feature not support
static const int32_t ACL_ERROR_RT_MEMORY_ALLOCATION = 207001; // memory allocation error
@@ -57,7 +56,6 @@ static const int32_t ACL_ERROR_RT_NO_EVENT_RESOURCE = 207007; // no event res
static const int32_t ACL_ERROR_RT_NO_STREAM_RESOURCE = 207008; // no stream resource
static const int32_t ACL_ERROR_RT_NO_NOTIFY_RESOURCE = 207009; // no notify resource
static const int32_t ACL_ERROR_RT_NO_MODEL_RESOURCE = 207010; // no model resource
static const int32_t ACL_ERROR_RT_NO_CDQ_RESOURCE = 207011; // no cdq resource

static const int32_t ACL_ERROR_RT_INTERNAL_ERROR = 507000; // runtime internal error
static const int32_t ACL_ERROR_RT_TS_ERROR = 507001; // ts internel error
@@ -96,7 +94,6 @@ static const int32_t ACL_ERROR_RT_DEV_SETUP_ERROR = 507033; // devic
static const int32_t ACL_ERROR_RT_VECTOR_CORE_TIMEOUT = 507034; // vector core timeout
static const int32_t ACL_ERROR_RT_VECTOR_CORE_EXCEPTION = 507035; // vector core exception
static const int32_t ACL_ERROR_RT_VECTOR_CORE_TRAP_EXCEPTION = 507036; // vector core trap exception
static const int32_t ACL_ERROR_RT_CDQ_BATCH_ABNORMAL = 507037; // cdq alloc batch abnormal

static const int32_t ACL_ERROR_RT_DRV_INTERNAL_ERROR = 507899; // drv internal error
static const int32_t ACL_ERROR_RT_AICPU_INTERNAL_ERROR = 507900; // aicpu internal error


+ 7
- 1
inc/framework/common/fmk_error_codes.h View File

@@ -20,14 +20,18 @@
#if defined(_MSC_VER)
#ifdef FUNC_VISIBILITY
#define GE_FUNC_VISIBILITY _declspec(dllexport)
#define GE_OBJECT_VISIBILITY
#else
#define GE_FUNC_VISIBILITY
#define GE_OBJECT_VISIBILITY
#endif
#else
#ifdef FUNC_VISIBILITY
#define GE_FUNC_VISIBILITY __attribute__((visibility("default")))
#define GE_OBJECT_VISIBILITY
#else
#define GE_FUNC_VISIBILITY
#define GE_OBJECT_VISIBILITY __attribute__((visibility("hidden")))
#endif
#endif

@@ -70,7 +74,9 @@ class GE_FUNC_VISIBILITY StatusFactory {

class GE_FUNC_VISIBILITY ErrorNoRegisterar {
public:
ErrorNoRegisterar(uint32_t err, const std::string &desc) { StatusFactory::Instance()->RegisterErrorNo(err, desc); }
ErrorNoRegisterar(uint32_t err, const std::string &desc) {
StatusFactory::Instance()->RegisterErrorNo(err, desc);
}
~ErrorNoRegisterar() {}
};



+ 1
- 1
inc/framework/common/ge_compiler_options.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
inc/framework/common/l2_cache_optimize.h View File

@@ -104,7 +104,7 @@ class GE_FUNC_VISIBILITY L2CacheOptimize {

// maximum common divisor
uint32_t Measure(uint32_t x, uint32_t y) {
if (x == 0 || y == 0) return RC_VALUE_DEFAULT;
if ((x == 0) || (y == 0)) return RC_VALUE_DEFAULT;
uint32_t z = y;
while (x % y != 0) {
z = x % y;


+ 1
- 1
inc/framework/common/profiling/ge_profiling.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
inc/framework/common/profiling/ge_runner_profiling.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 7
- 5
inc/framework/common/string_util.h View File

@@ -45,16 +45,16 @@ class GE_FUNC_VISIBILITY StringUtils {
public:
static std::string &Ltrim(std::string &s) {
#if __cplusplus >= 201103L
(void)s.erase(s.begin(), std::find_if(s.begin(), s.end(), [](int c) { return !std::isspace(c); }));
(void)s.erase(s.begin(), std::find_if(s.begin(), s.end(), [](int c) { return std::isspace(c) == 0; }));
#else
(void)s.erase(s.begin(), std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun<int, int>(std::isspace))));
#endif
return s;
}
// lint -esym(551,*)
static std::string &Rtrim(std::string &s) { /*lint !e618*/
static std::string &Rtrim(std::string &s) { /*lint !e618*/
#if __cplusplus >= 201103L
(void)s.erase(s.begin(), std::find_if(s.begin(), s.end(), [](int c) { return !std::isspace(c); }));
(void)s.erase(s.begin(), std::find_if(s.begin(), s.end(), [](int c) { return std::isspace(c) == 0; }));
#else
(void)s.erase(std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun<int, int>(std::isspace))).base(), s.end());
#endif
@@ -67,7 +67,9 @@ class GE_FUNC_VISIBILITY StringUtils {
/// @param [in] string to be trimmed
/// @return string after trim
///
static std::string &Trim(std::string &s) { return Ltrim(Rtrim(s)); }
static std::string &Trim(std::string &s) {
return Ltrim(Rtrim(s));
}

///
/// @ingroup domi_common
@@ -92,7 +94,7 @@ class GE_FUNC_VISIBILITY StringUtils {
}

auto str_size = str.size();
if (str_size > 0 && str[str_size - 1] == delim) {
if ((str_size > 0) && (str[str_size - 1] == delim)) {
elems.emplace_back("");
}



+ 1
- 1
inc/framework/common/taskdown_common.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
inc/framework/memory/memory_api.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
inc/framework/omg/ge_init.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
inc/framework/omg/omg.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 8
- 4
inc/framework/omg/parser/model_parser.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -36,7 +36,7 @@ using Status = domi::Status;

namespace domi {
using GetGraphCallback = std::function<std::unique_ptr<google::protobuf::Message>(
const google::protobuf::Message *root_proto, const std::string &graph)>;
const google::protobuf::Message *root_proto, const std::string &graph)>;

using GetGraphCallbackV2 = std::function<std::string(const std::string &subgraph_name)>;

@@ -109,7 +109,9 @@ class GE_FUNC_VISIBILITY ModelParser {
* @return SUCCESS
* @return Others failed
*/
virtual Status ToJson(const char *model_file, const char *json_file) { return domi::SUCCESS; }
virtual Status ToJson(const char *model_file, const char *json_file) {
return domi::SUCCESS;
}

/*
* @ingroup domi_omg
@@ -129,7 +131,9 @@ class GE_FUNC_VISIBILITY ModelParser {
* @return SUCCESS
* @return Others failed
*/
virtual Status ParseProto(const std::string &serialized_proto, ge::ComputeGraphPtr &graph) { return UNSUPPORTED; }
virtual Status ParseProto(const std::string &serialized_proto, ge::ComputeGraphPtr &graph) {
return UNSUPPORTED;
}

/**
* @ingroup domi_omg


+ 1
- 1
inc/framework/omg/parser/op_parser.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 3
- 3
inc/framework/omg/parser/parser_api.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -24,8 +24,8 @@

namespace ge {
// Initialize parser
GE_FUNC_VISIBILITY Status ParserInitialize(const std::map<std::string, std::string>& options);
GE_FUNC_VISIBILITY Status ParserInitialize(const std::map<std::string, std::string> &options);
// Finalize parser, release all resources
GE_FUNC_VISIBILITY Status ParserFinalize();
} // namespace ge
#endif // INC_FRAMEWORK_OMG_PARSER_PARSER_API_H_
#endif // INC_FRAMEWORK_OMG_PARSER_PARSER_API_H_

+ 1
- 1
inc/framework/omg/parser/parser_factory.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
inc/framework/omg/parser/weights_parser.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
metadef

@@ -1 +1 @@
Subproject commit e7b48d87d861921299e7c0fe293aaeac61136900
Subproject commit 51418f61f26599c85bee2b57328afbbf1c9927c7

+ 60
- 60
third_party/fwkacllib/inc/aicpu/aicpu_schedule/aicpu_op_type_list.h View File

@@ -1,60 +1,60 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef AICPU_OP_TYPE_LIST_H_
#define AICPU_OP_TYPE_LIST_H_
enum OpKernelType {
TF_KERNEL,
CPU_KERNEL
};
enum ReturnCode {
OP_TYPE_NOT_SUPPORT,
FORMAT_NOT_SUPPORT,
DTYPE_NOT_SUPPORT
};
#pragma pack(push, 1)
//One byte alignment
struct SysOpInfo {
uint64_t opLen;
uint64_t opType;
OpKernelType kernelsType;
};
struct OpParamInfo {
uint64_t num;
uint64_t dtypeList;
uint64_t formatList;
};
struct SysOpCheckInfo {
uint64_t opListNum;
uint64_t offSetLen;
uint64_t sysOpInfoList;
uint64_t opParamInfoList;
};
struct SysOpCheckResp {
uint64_t opListNum;
bool isWithoutJson;
uint64_t returnCodeList;
uint64_t sysOpInfoList;
uint64_t opParamInfoList;
};
#pragma pack(pop)
#endif // AICPU_OP_TYPE_LIST_H_
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef AICPU_OP_TYPE_LIST_H_
#define AICPU_OP_TYPE_LIST_H_
enum OpKernelType {
TF_KERNEL,
CPU_KERNEL
};
enum ReturnCode {
OP_TYPE_NOT_SUPPORT,
FORMAT_NOT_SUPPORT,
DTYPE_NOT_SUPPORT
};
#pragma pack(push, 1)
//One byte alignment
struct SysOpInfo {
uint64_t opLen;
uint64_t opType;
OpKernelType kernelsType;
};
struct OpParamInfo {
uint64_t num;
uint64_t dtypeList;
uint64_t formatList;
};
struct SysOpCheckInfo {
uint64_t opListNum;
uint64_t offSetLen;
uint64_t sysOpInfoList;
uint64_t opParamInfoList;
};
struct SysOpCheckResp {
uint64_t opListNum;
bool isWithoutJson;
uint64_t returnCodeList;
uint64_t sysOpInfoList;
uint64_t opParamInfoList;
};
#pragma pack(pop)
#endif // AICPU_OP_TYPE_LIST_H_

+ 1
- 0
third_party/fwkacllib/inc/cce/aicpu_engine.h View File

@@ -13,6 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef AICPU_ENGINE_H__
#define AICPU_ENGINE_H__



+ 109
- 103
third_party/fwkacllib/inc/external/runtime/rt_error_codes.h View File

@@ -1,103 +1,109 @@
/**
* @file rt_error_codes.h
*
* Copyright (C) Huawei Technologies Co., Ltd. 2019-2020. All Rights Reserved.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef __INC_EXTERNEL_RT_ERROR_CODES_H__
#define __INC_EXTERNEL_RT_ERROR_CODES_H__
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
static const int32_t ACL_RT_SUCCESS = 0; // success
static const int32_t ACL_ERROR_RT_PARAM_INVALID = 107000; // param invalid
static const int32_t ACL_ERROR_RT_INVALID_DEVICEID = 107001; // invalid device id
static const int32_t ACL_ERROR_RT_CONTEXT_NULL = 107002; // current context null
static const int32_t ACL_ERROR_RT_STREAM_CONTEXT = 107003; // stream not in current context
static const int32_t ACL_ERROR_RT_MODEL_CONTEXT = 107004; // model not in current context
static const int32_t ACL_ERROR_RT_STREAM_MODEL = 107005; // stream not in model
static const int32_t ACL_ERROR_RT_EVENT_TIMESTAMP_INVALID = 107006; // event timestamp invalid
static const int32_t ACL_ERROR_RT_EVENT_TIMESTAMP_REVERSAL = 107007; // event timestamp reversal
static const int32_t ACL_ERROR_RT_ADDR_UNALIGNED = 107008; // memory address unaligned
static const int32_t ACL_ERROR_RT_FILE_OPEN = 107009; // open file failed
static const int32_t ACL_ERROR_RT_FILE_WRITE = 107010; // write file failed
static const int32_t ACL_ERROR_RT_STREAM_SUBSCRIBE = 107011; // error subscribe stream
static const int32_t ACL_ERROR_RT_THREAD_SUBSCRIBE = 107012; // error subscribe thread
static const int32_t ACL_ERROR_RT_GROUP_NOT_SET = 107013; // group not set
static const int32_t ACL_ERROR_RT_GROUP_NOT_CREATE = 107014; // group not create
static const int32_t ACL_ERROR_RT_STREAM_NO_CB_REG = 107015; // callback not register to stream
static const int32_t ACL_ERROR_RT_INVALID_MEMORY_TYPE = 107016; // invalid memory type
static const int32_t ACL_ERROR_RT_INVALID_HANDLE = 107017; // invalid handle
static const int32_t ACL_ERROR_RT_INVALID_MALLOC_TYPE = 107018; // invalid malloc type
static const int32_t ACL_ERROR_RT_WAIT_TIMEOUT = 107019; // wait timeout
static const int32_t ACL_ERROR_RT_FEATURE_NOT_SUPPORT = 207000; // feature not support
static const int32_t ACL_ERROR_RT_MEMORY_ALLOCATION = 207001; // memory allocation error
static const int32_t ACL_ERROR_RT_MEMORY_FREE = 207002; // memory free error
static const int32_t ACL_ERROR_RT_AICORE_OVER_FLOW = 207003; // aicore over flow
static const int32_t ACL_ERROR_RT_NO_DEVICE = 207004; // no device
static const int32_t ACL_ERROR_RT_RESOURCE_ALLOC_FAIL = 207005; // resource alloc fail
static const int32_t ACL_ERROR_RT_NO_PERMISSION = 207006; // no permission
static const int32_t ACL_ERROR_RT_NO_EVENT_RESOURCE = 207007; // no event resource
static const int32_t ACL_ERROR_RT_NO_STREAM_RESOURCE = 207008; // no stream resource
static const int32_t ACL_ERROR_RT_NO_NOTIFY_RESOURCE = 207009; // no notify resource
static const int32_t ACL_ERROR_RT_NO_MODEL_RESOURCE = 207010; // no model resource
static const int32_t ACL_ERROR_RT_NO_CDQ_RESOURCE = 207011; // no cdq resource
static const int32_t ACL_ERROR_RT_INTERNAL_ERROR = 507000; // runtime internal error
static const int32_t ACL_ERROR_RT_TS_ERROR = 507001; // ts internel error
static const int32_t ACL_ERROR_RT_STREAM_TASK_FULL = 507002; // task full in stream
static const int32_t ACL_ERROR_RT_STREAM_TASK_EMPTY = 507003; // task empty in stream
static const int32_t ACL_ERROR_RT_STREAM_NOT_COMPLETE = 507004; // stream not complete
static const int32_t ACL_ERROR_RT_END_OF_SEQUENCE = 507005; // end of sequence
static const int32_t ACL_ERROR_RT_EVENT_NOT_COMPLETE = 507006; // event not complete
static const int32_t ACL_ERROR_RT_CONTEXT_RELEASE_ERROR = 507007; // context release error
static const int32_t ACL_ERROR_RT_SOC_VERSION = 507008; // soc version error
static const int32_t ACL_ERROR_RT_TASK_TYPE_NOT_SUPPORT = 507009; // task type not support
static const int32_t ACL_ERROR_RT_LOST_HEARTBEAT = 507010; // ts lost heartbeat
static const int32_t ACL_ERROR_RT_MODEL_EXECUTE = 507011; // model execute failed
static const int32_t ACL_ERROR_RT_REPORT_TIMEOUT = 507012; // report timeout
static const int32_t ACL_ERROR_RT_SYS_DMA = 507013; // sys dma error
static const int32_t ACL_ERROR_RT_AICORE_TIMEOUT = 507014; // aicore timeout
static const int32_t ACL_ERROR_RT_AICORE_EXCEPTION = 507015; // aicore exception
static const int32_t ACL_ERROR_RT_AICORE_TRAP_EXCEPTION = 507016; // aicore trap exception
static const int32_t ACL_ERROR_RT_AICPU_TIMEOUT = 507017; // aicpu timeout
static const int32_t ACL_ERROR_RT_AICPU_EXCEPTION = 507018; // aicpu exception
static const int32_t ACL_ERROR_RT_AICPU_DATADUMP_RSP_ERR = 507019; // aicpu datadump response error
static const int32_t ACL_ERROR_RT_AICPU_MODEL_RSP_ERR = 507020; // aicpu model operate response error
static const int32_t ACL_ERROR_RT_PROFILING_ERROR = 507021; // profiling error
static const int32_t ACL_ERROR_RT_IPC_ERROR = 507022; // ipc error
static const int32_t ACL_ERROR_RT_MODEL_ABORT_NORMAL = 507023; // model abort normal
static const int32_t ACL_ERROR_RT_KERNEL_UNREGISTERING = 507024; // kernel unregistering
static const int32_t ACL_ERROR_RT_RINGBUFFER_NOT_INIT = 507025; // ringbuffer not init
static const int32_t ACL_ERROR_RT_RINGBUFFER_NO_DATA = 507026; // ringbuffer no data
static const int32_t ACL_ERROR_RT_KERNEL_LOOKUP = 507027; // kernel lookup error
static const int32_t ACL_ERROR_RT_KERNEL_DUPLICATE = 507028; // kernel register duplicate
static const int32_t ACL_ERROR_RT_DEBUG_REGISTER_FAIL = 507029; // debug register failed
static const int32_t ACL_ERROR_RT_DEBUG_UNREGISTER_FAIL = 507030; // debug unregister failed
static const int32_t ACL_ERROR_RT_LABEL_CONTEXT = 507031; // label not in current context
static const int32_t ACL_ERROR_RT_PROGRAM_USE_OUT = 507032; // program register num use out
static const int32_t ACL_ERROR_RT_DEV_SETUP_ERROR = 507033; // device setup error
static const int32_t ACL_ERROR_RT_VECTOR_CORE_TIMEOUT = 507034; // vector core timeout
static const int32_t ACL_ERROR_RT_VECTOR_CORE_EXCEPTION = 507035; // vector core exception
static const int32_t ACL_ERROR_RT_VECTOR_CORE_TRAP_EXCEPTION = 507036; // vector core trap exception
static const int32_t ACL_ERROR_RT_CDQ_BATCH_ABNORMAL = 507037; // cdq alloc batch abnormal
static const int32_t ACL_ERROR_RT_DRV_INTERNAL_ERROR = 507899; // drv internal error
static const int32_t ACL_ERROR_RT_AICPU_INTERNAL_ERROR = 507900; // aicpu internal error
static const int32_t ACL_ERROR_RT_SOCKET_CLOSE = 507901; // hdc disconnect
#ifdef __cplusplus
}
#endif
#endif // __INC_EXTERNEL_RT_ERROR_CODES_H__
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef __INC_EXTERNEL_RT_ERROR_CODES_H__
#define __INC_EXTERNEL_RT_ERROR_CODES_H__

#include <stddef.h>

#ifdef __cplusplus
extern "C" {
#endif

static const int32_t ACL_RT_SUCCESS = 0; // success

static const int32_t ACL_ERROR_RT_PARAM_INVALID = 107000; // param invalid
static const int32_t ACL_ERROR_RT_INVALID_DEVICEID = 107001; // invalid device id
static const int32_t ACL_ERROR_RT_CONTEXT_NULL = 107002; // current context null
static const int32_t ACL_ERROR_RT_STREAM_CONTEXT = 107003; // stream not in current context
static const int32_t ACL_ERROR_RT_MODEL_CONTEXT = 107004; // model not in current context
static const int32_t ACL_ERROR_RT_STREAM_MODEL = 107005; // stream not in model
static const int32_t ACL_ERROR_RT_EVENT_TIMESTAMP_INVALID = 107006; // event timestamp invalid
static const int32_t ACL_ERROR_RT_EVENT_TIMESTAMP_REVERSAL = 107007; // event timestamp reversal
static const int32_t ACL_ERROR_RT_ADDR_UNALIGNED = 107008; // memory address unaligned
static const int32_t ACL_ERROR_RT_FILE_OPEN = 107009; // open file failed
static const int32_t ACL_ERROR_RT_FILE_WRITE = 107010; // write file failed
static const int32_t ACL_ERROR_RT_STREAM_SUBSCRIBE = 107011; // error subscribe stream
static const int32_t ACL_ERROR_RT_THREAD_SUBSCRIBE = 107012; // error subscribe thread
static const int32_t ACL_ERROR_RT_GROUP_NOT_SET = 107013; // group not set
static const int32_t ACL_ERROR_RT_GROUP_NOT_CREATE = 107014; // group not create
static const int32_t ACL_ERROR_RT_STREAM_NO_CB_REG = 107015; // callback not register to stream
static const int32_t ACL_ERROR_RT_INVALID_MEMORY_TYPE = 107016; // invalid memory type
static const int32_t ACL_ERROR_RT_INVALID_HANDLE = 107017; // invalid handle
static const int32_t ACL_ERROR_RT_INVALID_MALLOC_TYPE = 107018; // invalid malloc type
static const int32_t ACL_ERROR_RT_WAIT_TIMEOUT = 107019; // wait timeout

static const int32_t ACL_ERROR_RT_FEATURE_NOT_SUPPORT = 207000; // feature not support
static const int32_t ACL_ERROR_RT_MEMORY_ALLOCATION = 207001; // memory allocation error
static const int32_t ACL_ERROR_RT_MEMORY_FREE = 207002; // memory free error
static const int32_t ACL_ERROR_RT_AICORE_OVER_FLOW = 207003; // aicore over flow
static const int32_t ACL_ERROR_RT_NO_DEVICE = 207004; // no device
static const int32_t ACL_ERROR_RT_RESOURCE_ALLOC_FAIL = 207005; // resource alloc fail
static const int32_t ACL_ERROR_RT_NO_PERMISSION = 207006; // no permission
static const int32_t ACL_ERROR_RT_NO_EVENT_RESOURCE = 207007; // no event resource
static const int32_t ACL_ERROR_RT_NO_STREAM_RESOURCE = 207008; // no stream resource
static const int32_t ACL_ERROR_RT_NO_NOTIFY_RESOURCE = 207009; // no notify resource
static const int32_t ACL_ERROR_RT_NO_MODEL_RESOURCE = 207010; // no model resource
static const int32_t ACL_ERROR_RT_NO_CDQ_RESOURCE = 207011; // no cdq resource

static const int32_t ACL_ERROR_RT_INTERNAL_ERROR = 507000; // runtime internal error
static const int32_t ACL_ERROR_RT_TS_ERROR = 507001; // ts internel error
static const int32_t ACL_ERROR_RT_STREAM_TASK_FULL = 507002; // task full in stream
static const int32_t ACL_ERROR_RT_STREAM_TASK_EMPTY = 507003; // task empty in stream
static const int32_t ACL_ERROR_RT_STREAM_NOT_COMPLETE = 507004; // stream not complete
static const int32_t ACL_ERROR_RT_END_OF_SEQUENCE = 507005; // end of sequence
static const int32_t ACL_ERROR_RT_EVENT_NOT_COMPLETE = 507006; // event not complete
static const int32_t ACL_ERROR_RT_CONTEXT_RELEASE_ERROR = 507007; // context release error
static const int32_t ACL_ERROR_RT_SOC_VERSION = 507008; // soc version error
static const int32_t ACL_ERROR_RT_TASK_TYPE_NOT_SUPPORT = 507009; // task type not support
static const int32_t ACL_ERROR_RT_LOST_HEARTBEAT = 507010; // ts lost heartbeat
static const int32_t ACL_ERROR_RT_MODEL_EXECUTE = 507011; // model execute failed
static const int32_t ACL_ERROR_RT_REPORT_TIMEOUT = 507012; // report timeout
static const int32_t ACL_ERROR_RT_SYS_DMA = 507013; // sys dma error
static const int32_t ACL_ERROR_RT_AICORE_TIMEOUT = 507014; // aicore timeout
static const int32_t ACL_ERROR_RT_AICORE_EXCEPTION = 507015; // aicore exception
static const int32_t ACL_ERROR_RT_AICORE_TRAP_EXCEPTION = 507016; // aicore trap exception
static const int32_t ACL_ERROR_RT_AICPU_TIMEOUT = 507017; // aicpu timeout
static const int32_t ACL_ERROR_RT_AICPU_EXCEPTION = 507018; // aicpu exception
static const int32_t ACL_ERROR_RT_AICPU_DATADUMP_RSP_ERR = 507019; // aicpu datadump response error
static const int32_t ACL_ERROR_RT_AICPU_MODEL_RSP_ERR = 507020; // aicpu model operate response error
static const int32_t ACL_ERROR_RT_PROFILING_ERROR = 507021; // profiling error
static const int32_t ACL_ERROR_RT_IPC_ERROR = 507022; // ipc error
static const int32_t ACL_ERROR_RT_MODEL_ABORT_NORMAL = 507023; // model abort normal
static const int32_t ACL_ERROR_RT_KERNEL_UNREGISTERING = 507024; // kernel unregistering
static const int32_t ACL_ERROR_RT_RINGBUFFER_NOT_INIT = 507025; // ringbuffer not init
static const int32_t ACL_ERROR_RT_RINGBUFFER_NO_DATA = 507026; // ringbuffer no data
static const int32_t ACL_ERROR_RT_KERNEL_LOOKUP = 507027; // kernel lookup error
static const int32_t ACL_ERROR_RT_KERNEL_DUPLICATE = 507028; // kernel register duplicate
static const int32_t ACL_ERROR_RT_DEBUG_REGISTER_FAIL = 507029; // debug register failed
static const int32_t ACL_ERROR_RT_DEBUG_UNREGISTER_FAIL = 507030; // debug unregister failed
static const int32_t ACL_ERROR_RT_LABEL_CONTEXT = 507031; // label not in current context
static const int32_t ACL_ERROR_RT_PROGRAM_USE_OUT = 507032; // program register num use out
static const int32_t ACL_ERROR_RT_DEV_SETUP_ERROR = 507033; // device setup error
static const int32_t ACL_ERROR_RT_VECTOR_CORE_TIMEOUT = 507034; // vector core timeout
static const int32_t ACL_ERROR_RT_VECTOR_CORE_EXCEPTION = 507035; // vector core exception
static const int32_t ACL_ERROR_RT_VECTOR_CORE_TRAP_EXCEPTION = 507036; // vector core trap exception
static const int32_t ACL_ERROR_RT_CDQ_BATCH_ABNORMAL = 507037; // cdq alloc batch abnormal

static const int32_t ACL_ERROR_RT_DRV_INTERNAL_ERROR = 507899; // drv internal error
static const int32_t ACL_ERROR_RT_AICPU_INTERNAL_ERROR = 507900; // aicpu internal error
static const int32_t ACL_ERROR_RT_SOCKET_CLOSE = 507901; // hdc disconnect

#ifdef __cplusplus
}
#endif

#endif // __INC_EXTERNEL_RT_ERROR_CODES_H__

+ 83
- 83
third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_typedef_win.h View File

@@ -1,83 +1,83 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MMPA_TYPEDEF_WIN_H
#define MMPA_TYPEDEF_WIN_H
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif // __cpluscplus
#endif // __cpluscplus
#ifndef FALSE
#define FALSE 0
#endif
#ifndef TRUE
#define TRUE 1
#endif
#define EN_OK 0
#define EN_ERR 1
#define EN_ERROR (-1)
#define EN_INVALID_PARAM (-2)
#define EN_TIMEOUT (-3)
#define HANDLE_INVALID_VALUE (-1)
#define INVALID_SOCKET_HANDLE INVALID_SOCKET
#define MMPA_MEM_MAX_LEN (0x7fffffff)
#define MMPA_PROCESS_ERROR (0x7fffffff)
#define MMPA_ONE_THOUSAND 1000
#define MMPA_COMPUTER_BEGIN_YEAR 1900
#define SUMMER_TIME_OR_NOT (-1)
#define MMPA_ZERO 0
#define MMPA_VALUE_ONE 1
#define MMPA_SOCKET_MAIN_EDITION 2
#define MMPA_SOCKET_SECOND_EDITION 0
#define MMPA_PIPE_BUF_SIZE 1024
#define MMPA_MAX_SCANDIR_COUNT 1024
#define MAX_IOVEC_SIZE 32
#define MMPA_PIPE_COUNT 2
#define MMPA_THREADNAME_SIZE 16
#define MMPA_MIN_OS_NAME_SIZE (MAX_COMPUTERNAME_LENGTH + 1)
#define MMPA_MIN_OS_VERSION_SIZE 64
#define MMPA_MAX_NI 19
#define MMPA_MIDDLE_NI 5
#define MMPA_LOW_NI (-5)
#define MMPA_MIN_NI (-20)
#define MMPA_MAX_FILE 128
#define MMPA_MAX_THREAD_PIO 99
#define MMPA_MIDDLE_THREAD_PIO 66
#define MMPA_LOW_THREAD_PIO 33
#define MMPA_MIN_THREAD_PIO 1
#define MMPA_THREAD_SCHED_RR 0
#define MMPA_THREAD_SCHED_FIFO 0
#define MMPA_THREAD_SCHED_OTHER 0
#define MMPA_THREAD_MIN_STACK_SIZE 0
#define MM_MUTEX_INITIALIZER NULL
#ifdef __cplusplus
#if __cplusplus
}
#endif // __cpluscplus
#endif // __cpluscplus
#endif // _MMPA_TYPEDEF_WIN_H_
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MMPA_TYPEDEF_WIN_H
#define MMPA_TYPEDEF_WIN_H
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif // __cpluscplus
#endif // __cpluscplus
#ifndef FALSE
#define FALSE 0
#endif
#ifndef TRUE
#define TRUE 1
#endif
#define EN_OK 0
#define EN_ERR 1
#define EN_ERROR (-1)
#define EN_INVALID_PARAM (-2)
#define EN_TIMEOUT (-3)
#define HANDLE_INVALID_VALUE (-1)
#define INVALID_SOCKET_HANDLE INVALID_SOCKET
#define MMPA_MEM_MAX_LEN (0x7fffffff)
#define MMPA_PROCESS_ERROR (0x7fffffff)
#define MMPA_ONE_THOUSAND 1000
#define MMPA_COMPUTER_BEGIN_YEAR 1900
#define SUMMER_TIME_OR_NOT (-1)
#define MMPA_ZERO 0
#define MMPA_VALUE_ONE 1
#define MMPA_SOCKET_MAIN_EDITION 2
#define MMPA_SOCKET_SECOND_EDITION 0
#define MMPA_PIPE_BUF_SIZE 1024
#define MMPA_MAX_SCANDIR_COUNT 1024
#define MAX_IOVEC_SIZE 32
#define MMPA_PIPE_COUNT 2
#define MMPA_THREADNAME_SIZE 16
#define MMPA_MIN_OS_NAME_SIZE (MAX_COMPUTERNAME_LENGTH + 1)
#define MMPA_MIN_OS_VERSION_SIZE 64
#define MMPA_MAX_NI 19
#define MMPA_MIDDLE_NI 5
#define MMPA_LOW_NI (-5)
#define MMPA_MIN_NI (-20)
#define MMPA_MAX_FILE 128
#define MMPA_MAX_THREAD_PIO 99
#define MMPA_MIDDLE_THREAD_PIO 66
#define MMPA_LOW_THREAD_PIO 33
#define MMPA_MIN_THREAD_PIO 1
#define MMPA_THREAD_SCHED_RR 0
#define MMPA_THREAD_SCHED_FIFO 0
#define MMPA_THREAD_SCHED_OTHER 0
#define MMPA_THREAD_MIN_STACK_SIZE 0
#define MM_MUTEX_INITIALIZER NULL
#ifdef __cplusplus
#if __cplusplus
}
#endif // __cpluscplus
#endif // __cpluscplus
#endif // _MMPA_TYPEDEF_WIN_H_

+ 1
- 1
third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_win.h View File

@@ -1,4 +1,4 @@
/**
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");


+ 1
- 1
third_party/fwkacllib/inc/ops/aipp.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/all_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 33
third_party/fwkacllib/inc/ops/array_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -1249,38 +1249,6 @@ REG_OP(ExpandD)
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8}))
.REQUIRED_ATTR(shape, ListInt)
.OP_END_FACTORY_REG(ExpandD)

/**
*@brief Finds unique elements in a 1D tensor. \n

*@par Inputs:
*x: 1D tensor. Must be one of the following types:
* float16, float32, double, int64, int32, int16, uint16, int8 ,uint8. \n

*@par Attributes:
*@li return_inverse: Whether to also return the indices for where elements in the original
* input ended up in the returned unique list.
*@li return_inverse: Whether to also return the counts for each unique element.

*@par Outputs:
*@li y1: The output list of unique scalar elements. Has the same type as "x".
*@li y2: Representing the indices for where elements in the original input map to in the output.
*@li y3: Representing the number of occurrences for each unique value or tensor. \n

* @par Third-party framework compatibility
* Compatible with the troch operator _unique2.
*/

REG_OP(UniqueWithCountsAndSorting)
.INPUT(x, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE }))
.OUTPUT(y1, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE }))
.OUTPUT(y2, TensorType({ DT_INT32, DT_INT64 }))
.OUTPUT(y3, TensorType({ DT_INT32, DT_INT64 }))
.ATTR(return_inverse, Bool, false)
.ATTR(return_counts, Bool, false)
.OP_END_FACTORY_REG(UniqueWithCountsAndSorting)
} // namespace ge

#endif // OPS_BUILT_IN_OP_PROTO_INC_ARRAY_OPS_H_

+ 1
- 1
third_party/fwkacllib/inc/ops/audio_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/avg_pool_1d_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/batch_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/bitwise_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/boosted_trees_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/candidate_sampling_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 53
- 0
third_party/fwkacllib/inc/ops/case_condition_ops.h View File

@@ -0,0 +1,53 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/*!
* \file case_condition_ops.h
* \brief
*/
#ifndef OPS_BUILT_IN_OP_PROTO_INC_CASE_CONDITION_OPS_H_
#define OPS_BUILT_IN_OP_PROTO_INC_CASE_CONDITION_OPS_H_

#include "graph/operator_reg.h"

namespace ge {
/**
*@brief x[0] is i, x[1] is j and x[2] is k when algorithm is LU,
y = 0 when i >= k && j < k,
y = 1 when i == k && j == k,
y = 2 when i > k && j == k,
y = 3 when i == k && j > k,
y = 4 when i > k && j > k,
default y = 5
use for lu decomposition
*@par Inputs:
*x: A Tensor of type int32/int64/uint64. \n

*@par Attributes:
*algorithm: A string, only support LU now
*@par Outputs:
*y: A Tensor of type int32
*/
REG_OP(CaseCondition)
.INPUT(x, TensorType({DT_INT32, DT_INT64, DT_UINT64}))
.OUTPUT(y, TensorType({DT_INT32}))
.ATTR(algorithm, String, "LU")
.OP_END_FACTORY_REG(CaseCondition)

} // namespace ge


#endif // OPS_BUILT_IN_OP_PROTO_INC_CASE_CONDITION_OPS_H_

+ 1
- 1
third_party/fwkacllib/inc/ops/condtake_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/control_flow_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 48
- 0
third_party/fwkacllib/inc/ops/coordinates_1d_to_2d_ops.h View File

@@ -0,0 +1,48 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/*!
* \file coordinates_1d_to_2d_ops.h
* \brief
*/
#ifndef OPS_BUILT_IN_OP_PROTO_INC_COORDINATES_1D_TO_2D_OPS_H_
#define OPS_BUILT_IN_OP_PROTO_INC_COORDINATES_1D_TO_2D_OPS_H_

#include "graph/operator_reg.h"

namespace ge {
/**
*@brief Convert one-dimensional coordinates to two-dimensional coordinates.
*@par Inputs:
*@li x: A Tensor of type int32/int64/uint64. One-dimensional coordinates.
*@li shape: A Tensor of type int32/int64/uint64. 4D tensor [N,C,H,W].
*@par Outputs:
*@li row: row of two-dimensional
*@li col: col of two-dimensional
*@li n: col number of two-dimensional
*/
REG_OP(Coordinates1DTo2D)
.INPUT(x, TensorType({DT_INT32, DT_INT64, DT_UINT64}))
.INPUT(shape, TensorType({DT_INT32, DT_INT64, DT_UINT64}))
.OUTPUT(row, TensorType({DT_INT32, DT_INT64, DT_UINT64}))
.OUTPUT(col, TensorType({DT_INT32, DT_INT64, DT_UINT64}))
.OUTPUT(n, TensorType({DT_INT32, DT_INT64, DT_UINT64}))
.OP_END_FACTORY_REG(Coordinates1DTo2D)

} // namespace ge


#endif // OPS_BUILT_IN_OP_PROTO_INC_COORDINATES_1D_TO_2D_OPS_H_

+ 1
- 1
third_party/fwkacllib/inc/ops/correlation.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 2
- 2
third_party/fwkacllib/inc/ops/ctc_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -164,7 +164,7 @@ REG_OP(CTCBeamSearchDecoder)
* Compatible with Pytorch CTCLoss operator.

*@par Restrictions:
*The length of Label should in [4, 1000].
*The limit of Label’s length is 1K.
*/
REG_OP(CTCLossV2)
.INPUT(log_probs, TensorType({DT_FLOAT, DT_DOUBLE}))


+ 1
- 1
third_party/fwkacllib/inc/ops/data_flow_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/elewise_calculation_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/functional_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/get_data_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 48
- 48
third_party/fwkacllib/inc/ops/globalavgpool.h View File

@@ -1,49 +1,49 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*!
* \file globalavgpool.h
* \brief
*/
#ifndef OPS_BUILT_IN_OP_PROTO_INC_GLOBALAVERAGEPOOL_H_
#define OPS_BUILT_IN_OP_PROTO_INC_GLOBALAVERAGEPOOL_H_
#include "graph/operator_reg.h"
namespace ge {
/**
*@brief GlobalAveragePool consumes an input tensor X and applies average pooling across the values in the same channel.
This is equivalent to AveragePool with kernel size equal to the spatial dimension of input tensor \n
*@par Inputs:
*@li x: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W),
where N is the batch size, C is the number of channels, and H and W are the height and the width of the data.
For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size.
*@par Outputs:
*y: Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input.
The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1
*@par Restrictions:
*Warning: This operator can be integrated only by configuring INSERT_OP_FILE of aclgrphBuildModel. Please do not use it directly.
*/
REG_OP(GlobalAveragePool)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
.OP_END_FACTORY_REG(GlobalAveragePool)
} // namespace ge
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*!
* \file globalavgpool.h
* \brief
*/
#ifndef OPS_BUILT_IN_OP_PROTO_INC_GLOBALAVERAGEPOOL_H_
#define OPS_BUILT_IN_OP_PROTO_INC_GLOBALAVERAGEPOOL_H_
#include "graph/operator_reg.h"
namespace ge {
/**
*@brief GlobalAveragePool consumes an input tensor X and applies average pooling across the values in the same channel.
This is equivalent to AveragePool with kernel size equal to the spatial dimension of input tensor \n
*@par Inputs:
*@li x: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W),
where N is the batch size, C is the number of channels, and H and W are the height and the width of the data.
For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size.
*@par Outputs:
*y: Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input.
The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1
*@par Restrictions:
*Warning: This operator can be integrated only by configuring INSERT_OP_FILE of aclgrphBuildModel. Please do not use it directly.
*/
REG_OP(GlobalAveragePool)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
.OP_END_FACTORY_REG(GlobalAveragePool)
} // namespace ge
#endif // OPS_BUILT_IN_OP_PROTO_INC_GLOBALAVGPOOL_H_

+ 1
- 1
third_party/fwkacllib/inc/ops/hcom_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/hvd_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/image_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 63
- 0
third_party/fwkacllib/inc/ops/index_to_addr_ops.h View File

@@ -0,0 +1,63 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/*!
* \file index_to_addr_ops.h
* \brief
*/
#ifndef OPS_BUILT_IN_OP_PROTO_INC_INDEX_TO_ADDR_OPS_H_
#define OPS_BUILT_IN_OP_PROTO_INC_INDEX_TO_ADDR_OPS_H_

#include "graph/operator_reg.h"

namespace ge {

/**
*@brief get block tensor according to base addr tensor, for hccl remote read to use.
*@par Inputs:
*@li base_addr: A Tensor of type int64/uint64. \n
*@li row:A Tensor of type int64/uint64. \n
*@li col: A Tensor of type int64/uint64.

*@par Outputs:
*addr_table: list of [rank id, host addr, device addr, read size]

*@par Attributes:
*@li ori_shape: An required list int. Shape of base tensor.
*@li block_size: An required list int. Shape of split block tensor.
*@li ori_storage_mode: An optional string from: '"Matrix", "UT"'. Defaults to
"Matrix". Currently only support Matrix storage
*@li block_storage_mode: An optional string from: '"Matrix", "UT"'. Defaults to
"Matrix". Currently only support Matrix storage
*@li rank_id: An optional int of rank id. Defaults is 0
*@li dtype: An optional Type of base tensor. Defaults is DT_FLOAT
*/
REG_OP(IndexToAddr)
.INPUT(base_addr, TensorType({DT_INT64, DT_UINT64}))
.INPUT(x, TensorType({DT_INT64, DT_UINT64}))
.OUTPUT(addrs_table, TensorType({DT_INT64, DT_UINT64}))
.REQUIRED_ATTR(ori_shape, ListInt)
.REQUIRED_ATTR(block_size, ListInt)
.ATTR(ori_storage_mode, String, "Matrix")
.ATTR(block_storage_mode, String, "Matrix")
.ATTR(rank_id, Int, 0)
.ATTR(dtype, Type, DT_FLOAT)
.OP_END_FACTORY_REG(IndexToAddr)

} // namespace ge


#endif // OPS_BUILT_IN_OP_PROTO_INC_INDEX_TO_ADDR_OPS_H_

+ 1
- 1
third_party/fwkacllib/inc/ops/internal_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/linalg_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/list_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 33
- 1
third_party/fwkacllib/inc/ops/logging_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -111,6 +111,38 @@ REG_OP(PrintV2)
.INPUT(x, TensorType({DT_STRING}))
.ATTR(output_stream, String, "stderr")
.OP_END_FACTORY_REG(PrintV2)

/**
*@brief Prints a list of tensors. \n

*@par Inputs:
*x: A tensor passwd through this op . \n
*data: A list of tensors to print out when op is evaluated. \n

*@par Attributes:
*message: A string, prefix of the error message. \n
*first_n: Only log first_n number of times. Negative numbers
*log always; this is the default. \n
*summarize: Only print this many entries of each tensor.
*If None, then a maximum of 3 elements are printed per input tensor. \n

*@par Third-party framework compatibility
*Compatible with tensorflow Print operator . \n

*@par Restrictions:
*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
*/
REG_OP(PrintV3)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8, DT_INT32,
DT_INT64, DT_UINT32, DT_UINT64, DT_DOUBLE, DT_STRING}))
.DYNAMIC_INPUT(data, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8, DT_INT32,
DT_INT64, DT_UINT32, DT_UINT64, DT_DOUBLE, DT_STRING}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8, DT_INT32,
DT_INT64, DT_UINT32, DT_UINT64, DT_DOUBLE, DT_STRING}))
.ATTR(message, String, "")
.ATTR(first_n, Int, -1)
.ATTR(summarize, Int, 3)
.OP_END_FACTORY_REG(PrintV3)
} // namespace ge

#endif // OPS_BUILT_IN_OP_PROTO_INC_LOGGING_OPS_H_

+ 1
- 1
third_party/fwkacllib/inc/ops/lookup_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/math_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 25
- 78
third_party/fwkacllib/inc/ops/matrix_calculation_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -88,11 +88,11 @@ REG_OP(MatMul)
* Compatible with the TensorFlow operator BatchMatmul.
*/
REG_OP(MatMulV2)
.INPUT(x1, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8}))
.INPUT(x2, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8}))
.INPUT(x1, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8, DT_INT4}))
.INPUT(x2, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8 DT_INT4}))
.OPTIONAL_INPUT(bias, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
.OPTIONAL_INPUT(offset_w, TensorType({DT_INT8}))
.OPTIONAL_INPUT(offset_w, TensorType({DT_INT8, DT_INT4}))
.ATTR(transpose_x1, Bool, false)
.ATTR(transpose_x2, Bool, false)
.ATTR(offset_x, Int, 0)
@@ -180,12 +180,12 @@ REG_OP(MatMulV2Compress)
*/

REG_OP(GEMM)
.INPUT(a, TensorType({DT_FLOAT16, DT_INT8}))
.INPUT(b, TensorType({DT_FLOAT16, DT_INT8}))
.INPUT(c, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
.INPUT(alpha, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
.INPUT(beta, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
.INPUT(a, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT32}))
.INPUT(b, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT32}))
.INPUT(c, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT32}))
.INPUT(alpha, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT32}))
.INPUT(beta, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT32}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT32}))
.ATTR(transpose_a, Bool, false)
.ATTR(transpose_b, Bool, false)
.OP_END_FACTORY_REG(GEMM)
@@ -246,10 +246,10 @@ REG_OP(BatchMatMul)
*/

REG_OP(BatchMatMulV2)
.INPUT(x1, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8}))
.INPUT(x2, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8}))
.INPUT(x1, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8, DT_INT4}))
.INPUT(x2, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8, DT_INT4}))
.OPTIONAL_INPUT(bias, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
.OPTIONAL_INPUT(offset_w, TensorType({DT_INT8}))
.OPTIONAL_INPUT(offset_w, TensorType({DT_INT8, DT_INT4}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
.ATTR(adj_x1, Bool, false)
.ATTR(adj_x2, Bool, false)
@@ -531,61 +531,6 @@ REG_OP(ScatterAdd)
.ATTR(use_locking, Bool, false)
.OP_END_FACTORY_REG(ScatterAdd)

/**
*@brief Use a scalar to modify the tensor. \n

*@par Inputs:
*inputs, including:
*@li index: An ND Tensor . \n

*Must be one of the following types: float16, float32, int32, int8, uint8

*@par Attributes:
* dim : the axis along which to index .
* value : the source element(s) to scatter . \n

*@par Outputs:
*y: A Tensor. Has the same type and format as input "index" . \n

*@par Third-party framework compatibility
* Compatible with the Pytorch operator ScatterScalar.
*/
REG_OP(ScatterScalar)
.INPUT(index, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.REQUIRED_ATTR(dim, Int)
.REQUIRED_ATTR(value, Float)
.OP_END_FACTORY_REG(ScatterScalar)

/**
*@brief Use a tensor to modify the tensor . \n

*@par Inputs:
* Two inputs, including:
*@li index: An ND Tensor . \n

*Must be one of the following types: float16, float32, int32, int8, uint8

*@li src: An ND Tensor . \n

*Must be one of the following types: float16, float32, int32, int8, uint8

*@par Attributes:
* dim : the axis along which to index . \n

*@par Outputs:
*y: A Tensor. Has the same type and format as input "index" . \n

*@par Third-party framework compatibility
* Compatible with the Pytorch operator ScatterTensor.
*/
REG_OP(ScatterTensor)
.INPUT(index, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(src, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.REQUIRED_ATTR(dim, Int)
.OP_END_FACTORY_REG(ScatterTensor)

/**
*@brief Divides a variable reference by sparse updates . \n

@@ -839,10 +784,10 @@ REG_OP(DiagPart)
* Yes
*/
REG_OP(FullyConnection)
.INPUT(x, TensorType({DT_FLOAT16, DT_INT8}))
.INPUT(w, TensorType({DT_FLOAT16, DT_INT8}))
.INPUT(x, TensorType({DT_FLOAT16, DT_INT8, DT_INT4}))
.INPUT(w, TensorType({DT_FLOAT16, DT_INT8, DT_INT4}))
.OPTIONAL_INPUT(b, TensorType({DT_FLOAT16, DT_INT32,DT_FLOAT32}))
.OPTIONAL_INPUT(offset_w, TensorType({DT_INT8}))
.OPTIONAL_INPUT(offset_w, TensorType({DT_INT8, DT_INT4}))
.OUTPUT(y, TensorType({DT_FLOAT16, DT_INT32,DT_FLOAT32}))
.REQUIRED_ATTR(num_output, Int)
.ATTR(transpose, Bool, false)
@@ -1197,10 +1142,12 @@ REG_OP(IndexAdd)

* @par Inputs:
* Three inputs, including:
* @li x1: A Tensor. Must be one of the following types:
* float16, float32, int32, int8, uint8.
* @li x1: A Tensor. Must be one of the following types:
*float16, float32, double, int32, uint8, int16, int8, complex64, int64,
*qint8, quint8, qint32, uint16, complex128, uint32, uint64. \n

* @li x2: A Tensor of the same type as "x1".
* @li indices: A Tensor of the indices, type should be int32.
* @li indices: A Tensor of the indices,

* @par Attributes:
* @li accumulate: Does it support self accumulation.Defaults to 0.
@@ -1215,10 +1162,10 @@ REG_OP(IndexAdd)
* Warning:THIS FUNCTION IS EXPERIMENTAL. Please do not use.
*/
REG_OP(IndexPut)
.INPUT(x1, TensorType({DT_INT64, DT_INT32, DT_INT8, DT_UINT8, DT_FLOAT32, DT_FLOAT16}))
.INPUT(x2, TensorType({DT_INT64, DT_INT32, DT_INT8, DT_UINT8, DT_FLOAT32, DT_FLOAT16}))
.INPUT(indices, TensorType({DT_INT64, DT_INT32}))
.OUTPUT(y, TensorType({DT_INT64, DT_INT32, DT_INT8, DT_UINT8, DT_FLOAT32, DT_FLOAT16}))
.INPUT(x1, TensorType::BasicType())
.INPUT(x2, TensorType::BasicType())
.OUTPUT(y, TensorType::BasicType())
.REQUIRED_ATTR(indices, ListInt)
.ATTR(accumulate, Int, 0)
.OP_END_FACTORY_REG(IndexPut)



+ 1
- 1
third_party/fwkacllib/inc/ops/nn_batch_norm_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 47
- 46
third_party/fwkacllib/inc/ops/nn_calculation_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -869,59 +869,60 @@ REG_OP(Conv2D)
/**
*@brief Computes a 2D convolution given 4D "x" and "filter_compress" tensors.
*@par Inputs:
* @li x: A 4D tensor of input images.
* @li filter_compress: A 4D tensor of compressed filters.
* @li compress_index: A 1D Tensor dtype of int8.
* @li bias: An optional 1D tensor.
* @li offset_w: An optional 1D tensor for quantized convolution. Reserved.
*@li x: A 4D tensor of input images.
*@li filter_compress: A 4D tensor of compressed filter data blocks.
*@li compress_index: A 1D tensor of index for decompression.
*@li bias: An optional 1D tensor of additive biases to the filter outputs.
* The data is stored in the order of: [out_channels].
*@li offset_w: Reserved.
*\n
*\n
* The following are the supported data types and data formats:
*\n
*\n
| Tensor | x | filter_compress | compress_index | bias | y |\n
| :-------: | :-----: | :--------------: | :------------: | :-----: | :-----: |\n
| Data Type | int8 | int8 | int8 | int32 | int32 |\n
| Format | NCHW | NCHW | ND | ND | NCHW |\n
| | NHWC | HWCN | | | NHWC |\n
*\n
* For float32 type, the actual calculation on the chip is based on
* float16.
*\n
*
* The input and output tensor attributes are listed as follows:
* @verbatim
|Tensor | x | filter_compress | bias | offset_w | y
-----------|---------|---------|---------|----------|--------
|Data Type | float16 | float16 | float16 | _ | float16
| |---------|---------|---------|----------|--------
| | float32 | float32 | float32 | _ | float32
| |---------|---------|---------|----------|--------
| | int8 | int8 | int32 | int8 | int32
-----------|---------|---------|---------|----------|--------
|Format | NCHW | NCHW | ND | ND | NCHW
| | NHWC | NHWC | | | NHWC
| | | HWCN | | |
@endverbatim
* It should be noted that the data types must correspond to each other, but the
* format does not need to . \n

*@par Attributes:
* @li strides: A list of 4 integers. Specifying the strides of the
* convolution along the height and width. The dimension order is determined
* by the data format of "x". By default the N and C dimensions are set to 1.
* @li pads: A list of 4 integers. Specifying the top, bottom, left and right
* padding.
* @li dilations: A list of 4 integers. Specifying the dilation rate to use
* for dilated convolution. Has the same dimension order and value as "strides".
* @li groups: Number of blocked connections from input channels to output
* channels. Input channels and output channels must both be divisible by
* "groups".Type is int32.
* @li offset_x: An optional integer for quantized convolution. Type is int32.
* Defaults to "0".
* @li data_format: An optional string from: "NHWC", "NCHW". Specifying the
* data format of the input and output images. Type is string.
* Defaults to "NHWC". Reserved . \n
*@li strides: Required. A list of 4 integers. The stride of the sliding window
* for each dimension of input. The dimension order is determined by the data
* format of "x". The N and C dimensions must be set to 1.
*@li pads: Required. A list of 4 integers. The number of pixels to add to each
* (top, bottom, left, right) side of the input.
*@li dilations: Optional. A list of 4 integers. The dilation factor for each
* dimension of input. The dimension order is determined by the data format of
* "x". The N and C dimensions must be set to 1. Defaults to [1, 1, 1, 1].
*@li groups: Optional. An integer of type int32. The number of blocked
* connections from input channels to output channels. In_channels and
* out_channels must both be divisible by "groups". Only support 1.
*@li offset_x: Optional. An integer of type int32. The negative offset added
* to the input image for int8 type. Ensure that the output is within the
* effective range. Defaults to 0.
*@li data_format: Reserved.
*
*@par Outputs:
* @li y: A 4D Tensor of output images . \n

* y: A 4D Tensor of output feature map. Has the same type as "x". With the
* format "NHWC", the data is stored in the order of: [batch, out_height,
* out_width, out_channels].
*\n
*
*@par Restrictions:
*Warning: THIS FUNCTION IS DEPRECATED.
*Warning: THIS FUNCTION IS EXPERIMENTAL.
*/
REG_OP(Conv2DCompress)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8}))
.INPUT(filter_compress, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8}))
.INPUT(x, TensorType({DT_INT8}))
.INPUT(filter_compress, TensorType({DT_INT8}))
.INPUT(compress_index, TensorType({DT_INT8}))
.OPTIONAL_INPUT(bias, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
.OPTIONAL_INPUT(bias, TensorType({DT_INT32}))
.OPTIONAL_INPUT(offset_w, TensorType({DT_INT8}))
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
.OUTPUT(y, TensorType({DT_INT32}))
.REQUIRED_ATTR(strides, ListInt)
.REQUIRED_ATTR(pads, ListInt)
.ATTR(dilations, ListInt, {1, 1, 1, 1})


+ 6
- 3
third_party/fwkacllib/inc/ops/nn_detect_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -1002,9 +1002,12 @@ REG_OP(SPP)
*@par Inputs:
* Three inputs, including:
*@li x: An NC1HWC0 tensor of type float16 or float32, describing the feature
* map.
* map. The data of x must be greater than or equal to "0.0".
*@li rois: A tensor of type float16 or float32, with 3D shape
* [batch, 5, roi_max_num], describing the RIOs.
* [batch, 5, roi_max_num], describing the RIOs. Each ROI consists of five
* elements: "batch_id", "x1", "y1", "x2", and "y2", which "batch_id" indicates
* the index of the input feature map, "x1", "y1", "x2", or "y2" must be
* greater than or equal to "0.0".
* roi_max_num must be less than or equal to 6000 and must be divided by 16.
*@li roi_actual_num: A optional tensor of type int32, with shape [batch, 8], specifying
* the number of ROIs per batch . \n


+ 1
- 1
third_party/fwkacllib/inc/ops/nn_norm_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/nn_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 27
- 1
third_party/fwkacllib/inc/ops/nn_pooling_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -1402,6 +1402,7 @@ REG_OP(MaxPoolGradWithArgmaxV2)
* @li pads:A required list of int8, int16, int32, or int64 values,
* a data to caculate when padding_mode is "CALCULATED".
* @li data_format: An optional string. Defaults to "NHWC" .
* If data_format = "NC1HWC0", ori_format must be "NCHW".
* @li global_pooling bool, Whether to use the global pooling.
* If global_pooling = true, kernel size and paddings will be ignored.
* Default False
@@ -1786,5 +1787,30 @@ REG_OP(SubSampleLabels)
.REQUIRED_ATTR(positive_fraction, Float)
.OP_END_FACTORY_REG(SubSampleLabels)

/**
*@brief Computes GlobalLpPool, GlobalLpPool consumes an input tensor X and applies lp pool pooling across the
values in the same channel. \n

*@par Inputs:
* x: A Tensor of type float16 or float32 . \n

*@par Attributes:
*@li p: Optional. Must be one of the following types: float32. Defaults to 2.0. \n

*@par Outputs:
* y: A Tensor. Has the same type as "x", when shape of x is [N,C,H,W], shape of y is [N,C,1,1].
*@par Third-party framework compatibility
* Compatible with the onnx operator GlobalLpPool.
*@par Restrictions:
*Warning: THIS FUNCTION IS DEPRECATED.
*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
*/

REG_OP(GlobalLpPool)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
.ATTR(p, Float, 2.0)
.OP_END_FACTORY_REG(GlobalLpPool);

} // namespace ge
#endif // OPS_BUILT_IN_OP_PROTO_INC_NN_POOLING_OPS_H

+ 1
- 1
third_party/fwkacllib/inc/ops/nn_training_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/no_op.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 17
- 1
third_party/fwkacllib/inc/ops/nonlinear_fuc_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -98,6 +98,22 @@ REG_OP(FastGeluGrad)
.OUTPUT(z, TensorType({DT_FLOAT16, DT_FLOAT}))
.OP_END_FACTORY_REG(FastGeluGrad)

/**
* @brief Compute hardswish of "x" element-wise . \n

*@par Inputs:
*One input, including:
*x: A Tensor. Must be one of the following types: float16, float32

*@par Outputs:
*y: A Tensor. Has the same type as "x".
*@par Third-party framework compatibility
* Compatible with the Torch operator Hardswish.
*/
REG_OP(Hardswish)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
.OP_END_FACTORY_REG(Hardswish)

/**
*@brief Computes the gradient for the tanh of "x" . \n


+ 1
- 1
third_party/fwkacllib/inc/ops/npu_loss_scale_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/outfeed_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/pad_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/parsing_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 4
- 3
third_party/fwkacllib/inc/ops/quantize_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -96,18 +96,19 @@ REG_OP(Quantize)
*@li dst_type: A optional int32, specifying the output data type. Defaults to "DT_INT8" . \n

*@par Outputs:
*y: The quantized output tensor of type int8 and with format NC1HWC0 . \n
*y: The quantized output tensor of type int8 or int4 and with format NC1HWC0 . \n

*@par Third-party framework compatibility
* It is a custom operator. It has no corresponding operator in Caffe.
*/
REG_OP(AscendQuant)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT32}))
.OUTPUT(y, TensorType({DT_INT8}))
.OUTPUT(y, TensorType({DT_INT8, DT_INT4}))
.REQUIRED_ATTR(scale, Float)
.REQUIRED_ATTR(offset, Float)
.ATTR(sqrt_mode, Bool, false)
.ATTR(round_mode, String, "Round")
.ATTR(dst_type, Int, DT_INT8)
.OP_END_FACTORY_REG(AscendQuant)

/**


+ 1
- 1
third_party/fwkacllib/inc/ops/ragged_array_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/ragged_conversion_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/ragged_math_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/random_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/reduce_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/resource_variable_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 3
- 3
third_party/fwkacllib/inc/ops/rnn.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -285,9 +285,9 @@ REG_OP(DynamicRNN)
*@li num_proj:An integer identifying the num projection in the op. Default to 0.
*@li time_major:An bool identifying the time major in the op. Default to true.
*@li activation:An string identifying the type of activation function in the op. Default to "tanh".
*Only tanh is currently supported.
*Support "tanh" and "clip".
*@li recurrent_activation:An string identifying the type of activation function in the op. Default to "sigmoid".
*Supprot "sigmoid" and "hard_sigmoid". In general, set "hard_sigmoid" for TF Keras LSTM.
*Support "sigmoid" and "hard_sigmoid". In general, set "hard_sigmoid" for TF Keras LSTM.
*@li forget_bias:An float identifying the forget bias in the op. Default to 0.
*@li gate_order:An string identifying the type of gate order in the op. Support "ijfo" and "ifco". Default to "ijfo".
*Set "ijfo" for TF operator LSTM, Set "ifco" for TF Keras LSTM.


+ 1
- 1
third_party/fwkacllib/inc/ops/rpn_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/save_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/sdca_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 57
- 19
third_party/fwkacllib/inc/ops/selection_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -1895,6 +1895,33 @@ REG_OP(Cummin)
.REQUIRED_ATTR(axis, Int)
.OP_END_FACTORY_REG(Cummin)

/**
*@brief Returns a namedtuple (values, indices) where values is the cumulative
* the cumulative maximum of elements of input in the dimension dim.
* And indices is the index location of each maximum value found in the dimension dim. \n

*@par Inputs:
*One inputs, including:
* x: A tensor . Must be one of the following types:
* float16, float32, int32, uint32, int8, uint8. \n

*@par Attributes:
* dim: Axis along which to cummax. \n

*@par Outputs:
* @li y: A Tensor with the same type and shape of x's.
* @li indices: A Tensor with the int32/int64 type and the same shape of x's. \n

*@par Third-party framework compatibility
*Compatible with the Pytorch operator Cummax. \n
*/
REG_OP(Cummax)
.INPUT(x, TensorType::BasicType())
.OUTPUT(y, TensorType::BasicType())
.OUTPUT(indices, TensorType::BasicType())
.REQUIRED_ATTR(dim, Int)
.OP_END_FACTORY_REG(Cummax)

/**
*@brief Extends the input with copies of data along a specified dimension. For example:
*(1) If x = [[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, 12]]], with shape (2, 3, 2);
@@ -2129,24 +2156,6 @@ REG_OP(MaskedSelectV2)
.INPUT(mask, TensorType({DT_BOOL}))
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
.OP_END_FACTORY_REG(MaskedSelectV2)
/**
* @brief Choose the value of X with value according to mask.

* @par Inputs:
* two inputs, including:
* @li x: A Tensor of dtype is float16 or float32 or float64 or int64 or int32 or int16 or int8 or uint8.
* @li mask: A Tensor of dtype is bool. \n

* @par Outputs:
* @li y: A tensor with the same type as x. \n

*/
REG_OP(MaskedSelect)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_UINT8, DT_INT8, DT_INT16, DT_INT32, DT_INT64}))
.INPUT(mask, TensorType({DT_BOOL}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_UINT8, DT_INT8, DT_INT16, DT_INT32, DT_INT64}))
.OP_END_FACTORY_REG(MaskedSelect)

/**
* @brief update the value of X with value according to mask.
@@ -2364,6 +2373,35 @@ REG_OP(InplaceTopKDistance)
.INPUT(pq_ivf, TensorType({DT_INT32}))
.ATTR(order, String, "asc")
.OP_END_FACTORY_REG(InplaceTopKDistance)

/**
* @brief After a set of sorted data and a new set of data are re-sorted, get the first k data. \n
*
* @par Inputs:
* @li sorted_distance: A sorted Tensor, Will be updated after calculation. Must be one of the following types: float16.
* @li pq_ivf: A Tensor of type int32, index corresponding to sorted_distance.
* @li pq_index: A Tensor of type int32 , the bucket number corresponding to sorted_distance. \n
*
*@par Outputs:
* @li topk_distance: A Tensor of type float16, the new data set will be reordered with sorted_distance and updated to topk_distance.
* @li topk_ivf: A Tensor of type int32, index corresponding to topk_distance.
* @li topk_index: A scalar of type int32 , the bucket number corresponding to topk_distance. \n
*
* @par Attributes:
* k: get the first k data of sorted_distance. \n
*
* @par Restrictions:
* Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
*/
REG_OP(TopKPQDistanceMerge)
.INPUT(sorted_distance, TensorType({DT_FLOAT16}))
.INPUT(pq_ivf, TensorType({DT_INT32}))
.INPUT(pq_index, TensorType({DT_INT32}))
.OUTPUT(topk_distance, TensorType({DT_FLOAT16}))
.OUTPUT(topk_ivf, TensorType({DT_INT32}))
.OUTPUT(topk_index, TensorType({DT_INT32}))
.REQUIRED_ATTR(k, Int)
.OP_END_FACTORY_REG(TopKPQDistanceMerge)
} // namespace ge

#endif // OPS_BUILT_IN_OP_PROTO_INC_SELECTION_OPS_H_

+ 1
- 1
third_party/fwkacllib/inc/ops/set_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 50
- 0
third_party/fwkacllib/inc/ops/slice_write_ops.h View File

@@ -0,0 +1,50 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/*!
* \file slice_write_ops.h
* \brief
*/
#ifndef OPS_BUILT_IN_OP_PROTO_INC_SLICE_WRITE_OPS_H_
#define OPS_BUILT_IN_OP_PROTO_INC_SLICE_WRITE_OPS_H_

#include "graph/operator_reg.h"

namespace ge {

/**
*@brief write tensor value to tensor x.
*@par Inputs:
*x: A Tensor of type float16/float/double/int32/int64. \n
*begin:A Tensor of type int32/int64. \n
*value: A Tensor of type float16/float/double/int32/int64.
*@par Outputs:
*x: same tensor with input x
*/
REG_OP(SliceWrite)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, \
DT_INT32, DT_INT64}))
.INPUT(begin, TensorType({DT_INT32, DT_INT64}))
.INPUT(value, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, \
DT_INT32, DT_INT64}))
.OUTPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, \
DT_INT32, DT_INT64}))
.OP_END_FACTORY_REG(SliceWrite)

} // namespace ge


#endif // OPS_BUILT_IN_OP_PROTO_INC_SLICE_WRITE_OPS_H_

+ 1
- 1
third_party/fwkacllib/inc/ops/sparse_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/spectral_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/split_combination_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/state_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/stateful_random_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/stateless_random_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/string_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/swap_co_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/ops/target_crop_and_resize.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 27
- 1
third_party/fwkacllib/inc/ops/transformation_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -153,6 +153,32 @@ REG_OP(TransData)
.ATTR(groups, Int, 1)
.OP_END_FACTORY_REG(TransData)

/**
*@brief Do format transfer for various data format only
support "ND" to "ND_RNN_BIAS" and "ND" to "FRACTAL_ZN_RNN"

*@par Inputs:
*src: A Tensor. For all branches can be types: float16, float32, int32, int8, bool.
* For branches without padding also can be types: int16, int64, uint8, uint16, uint32, uint64 . \n

*@par Attributes:
*@li src_format: A string source data format, can be "ND", "ND_RNN_BIAS", "FRACTAL_ZN_RNN" etc.
*@li dst_format: A string target data format, can be "ND", "ND_RNN_BIAS", "FRACTAL_ZN_RNN" etc.
*@li input_size: A mental int32.
*@li hidden_size: A mental int32.

*@par Outputs:
*dst: A Tensor. Has the same type as "src".
*/
REG_OP(TransDataRNN)
.INPUT(src, TensorType::BasicType())
.OUTPUT(dst, TensorType::BasicType())
.REQUIRED_ATTR(src_format, String)
.REQUIRED_ATTR(dst_format, String)
.REQUIRED_ATTR(input_size, Int)
.REQUIRED_ATTR(hidden_size, Int)
.OP_END_FACTORY_REG(TransDataRNN)

/**
*@brief Permutes the dimensions according to order.
The returned tensor's dimension i will correspond to the input dimension order[i] . \n


+ 1
- 1
third_party/fwkacllib/inc/ops/warp_perspective_ops.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
third_party/fwkacllib/inc/opt_info/opt_info.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 90
- 6
third_party/fwkacllib/inc/runtime/base.h View File

@@ -1,18 +1,18 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
*/

#ifndef __CCE_RUNTIME_BASE_H__
#define __CCE_RUNTIME_BASE_H__
@@ -94,6 +94,14 @@ typedef void (*rtTaskFailCallback)(rtExceptionInfo *exceptionInfo);

typedef void (*rtDeviceStateCallback)(uint32_t devId, bool isOpen);

/**
* @ingroup profiling_base
* @brief dataType: rtProfCtrlType_t
* @brief data: data swtich or reporter function
* @brief dataLen: length of data
*/
typedef rtError_t (*rtProfCtrlHandle)(uint32_t dataType, void *data, uint32_t dataLen);

/**
* @ingroup dvrt_base
* @brief stream handle.
@@ -118,6 +126,32 @@ typedef void *rtLabel_t;
*/
typedef void *rtModel_t;

#define RT_PROF_MAX_DEV_NUM 64

/**
* @ingroup profiling_base
* @brief profiling command info
*/
typedef struct rtProfCommandHandle {
uint64_t profSwitch;
uint64_t profSwitchHi;
uint32_t devNums;
uint32_t devIdList[RT_PROF_MAX_DEV_NUM];
uint32_t modelId;
uint32_t type;
} rtProfCommandHandle_t;

/**
* @ingroup profiling_base
* @brief type of app register profiling switch or reporter callback
*/
typedef enum {
RT_PROF_CTRL_INVALID = 0,
RT_PROF_CTRL_SWITCH,
RT_PROF_CTRL_REPORTER,
RT_PROF_CTRL_BUTT
} rtProfCtrlType_t;

/**
* @ingroup profiling_base
* @brief runtime handle.
@@ -166,6 +200,57 @@ RTS_API rtError_t rtProfilerTraceEx(uint64_t id, uint64_t modelId, uint16_t tagI
*/
RTS_API rtError_t rtSetMsprofReporterCallback(MsprofReporterCallback callback);

/**
* @ingroup profiling_base
* @brief add the map of deviceId and GE model index, called by ge
* @param [in] geModelIdx The index of GE model
* @param [in] deviceId The id of device
* @return RT_ERROR_NONE for ok
* @return ACL_ERROR_RT_PARAM_INVALID for error input
*/
RTS_API rtError_t rtSetDeviceIdByGeModelIdx(uint32_t geModelIdx, uint32_t deviceId);

/**
* @ingroup profiling_base
* @brief del the map of deviceId and GE model index, called by ge
* @param [in] geModelIdx The index of GE model
* @param [in] deviceId The id of device
* @return RT_ERROR_NONE for ok
* @return ACL_ERROR_RT_PARAM_INVALID for error input
*/
RTS_API rtError_t rtUnsetDeviceIdByGeModelIdx(uint32_t geModelIdx, uint32_t deviceId);

/**
* @ingroup profiling_base
* @brief find deviceId by GE model index, called by profiling
* @param [in] geModelIdx The index of GE model
* @param [out] deviceId The id of device
* @return RT_ERROR_NONE for ok
* @return ACL_ERROR_RT_PARAM_INVALID for error input
* @return ACL_ERROR_RT_INTERNAL_ERROR for can't find deviceId by geModelIdx
*/
RTS_API rtError_t rtGetDeviceIdByGeModelIdx(uint32_t geModelIdx, uint32_t *deviceId);

/**
* @ingroup profiling_base
* @brief set profling switch, called by profiling
* @param [in] data rtProfCommandHandle
* @param [out] len length of data
* @return RT_ERROR_NONE for ok
* @return ACL_ERROR_RT_PARAM_INVALID for error input
*/
RTS_API rtError_t rtProfSetProSwitch(void *data, uint32_t len);

/**
* @ingroup profiling_base
* @brief register callback of upper app, called by ge or acl
* @param [in] moduleId of APP
* @param [in] callback function when switch or reporter change
* @return RT_ERROR_NONE for ok
* @return ACL_ERROR_RT_PARAM_INVALID for error input
*/
RTS_API rtError_t rtProfRegisterCtrlCallback(uint32_t moduleId, rtProfCtrlHandle callback);

/**
* @ingroup dvrt_base
* @brief Returns the last error from a runtime call.
@@ -356,7 +441,6 @@ RTS_API rtError_t rtLabelCreateExV2(rtLabel_t *label, rtModel_t model, rtStream_
* @return RT_ERROR_INVALID_VALUE for input null ptr
*/
RTS_API rtError_t rtGetTaskIdAndStreamID(uint32_t *taskId, uint32_t *streamId);

#if defined(__cplusplus)
}
#endif


+ 5
- 5
third_party/fwkacllib/inc/runtime/config.h View File

@@ -1,18 +1,18 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
*/

#ifndef __CCE_RUNTIME_CONFIG_H__
#define __CCE_RUNTIME_CONFIG_H__


+ 5
- 5
third_party/fwkacllib/inc/runtime/context.h View File

@@ -1,18 +1,18 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
*/

#ifndef __CCE_RUNTIME_CONTEXT_H__
#define __CCE_RUNTIME_CONTEXT_H__


+ 5
- 5
third_party/fwkacllib/inc/runtime/dev.h View File

@@ -1,18 +1,18 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
*/

#ifndef __CCE_RUNTIME_DEVICE_H__
#define __CCE_RUNTIME_DEVICE_H__


+ 5
- 5
third_party/fwkacllib/inc/runtime/dvfsprofile.h View File

@@ -1,18 +1,18 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
*/

#ifndef __CCE_RUNTIME_DVFSPROFILE_H__
#define __CCE_RUNTIME_DVFSPROFILE_H__


+ 5
- 5
third_party/fwkacllib/inc/runtime/event.h View File

@@ -1,18 +1,18 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
*/

#ifndef __CCE_RUNTIME_EVENT_H__
#define __CCE_RUNTIME_EVENT_H__


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save