Merge pull request !2129 from yanghaoran/r1.8r1.8
| @@ -77,6 +77,8 @@ const char_t *const GRAPH_MEMORY_MAX_SIZE = "ge.graphMemoryMaxSize"; | |||||
| const char_t *const VARIABLE_MEMORY_MAX_SIZE = "ge.variableMemoryMaxSize"; | const char_t *const VARIABLE_MEMORY_MAX_SIZE = "ge.variableMemoryMaxSize"; | ||||
| const char_t *const OPTION_EXEC_REUSE_ZERO_COPY_MEMORY = "ge.exec.reuseZeroCopyMemory"; | const char_t *const OPTION_EXEC_REUSE_ZERO_COPY_MEMORY = "ge.exec.reuseZeroCopyMemory"; | ||||
| const std::string ATOMIC_CLEAN_POLICY = "ge.exec.atomicCleanPolicy"; | |||||
| const char_t *const OPTION_EXEC_LOGICAL_DEVICE_CLUSTER_DEPLOY_MODE = "ge.exec.logicalDeviceClusterDeployMode"; | const char_t *const OPTION_EXEC_LOGICAL_DEVICE_CLUSTER_DEPLOY_MODE = "ge.exec.logicalDeviceClusterDeployMode"; | ||||
| const char_t *const OPTION_EXEC_LOGICAL_DEVICE_ID = "ge.exec.logicalDeviceId"; | const char_t *const OPTION_EXEC_LOGICAL_DEVICE_ID = "ge.exec.logicalDeviceId"; | ||||
| @@ -1 +1 @@ | |||||
| Subproject commit 175dce710e744666c6204540857634f362aafd61 | |||||
| Subproject commit e4d1efc47349f13af1bcdb53ba408118779fc27e | |||||
| @@ -1160,27 +1160,27 @@ REG_OP(MirrorPadGrad) | |||||
| .OP_END_FACTORY_REG(MirrorPadGrad) | .OP_END_FACTORY_REG(MirrorPadGrad) | ||||
| /** | /** | ||||
| *@brief Returns locations of nonzero / true values in a tensor. \n | |||||
| * @brief Returns locations of nonzero / true values in a tensor. \n | |||||
| *@par Inputs: | |||||
| *Including: | |||||
| *x: A Tensor. Must be one of the following types: | |||||
| DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT8, DT_UINT8, DT_INT16, | |||||
| DT_UINT16, DT_INT32, DT_UINT32, DT_INT64, DT_UINT64, DT_BOOL. \n | |||||
| * @par Inputs: | |||||
| * Including: | |||||
| * @li x: A Tensor. Must be one of the following types: | |||||
| DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT8, DT_UINT8, DT_QINT8, | |||||
| DT_QUINT8, DT_INT16, DT_UINT16, DT_INT32, DT_UINT32, DT_QINT32, | |||||
| DT_INT64, DT_UINT64, DT_BOOL, DT_COMPLEX64, DT_COMPLEX128 \n | |||||
| *@par Outputs: | |||||
| *y: A Tensor of type DT_INT64. \n | |||||
| * @par Outputs: | |||||
| * @li y: A Tensor of type DT_INT64. \n | |||||
| *@attention Constraints: | |||||
| *Where runs on the Ascend AI CPU, which delivers poor performance.\n | |||||
| * @attention Constraints: | |||||
| * Where runs on the Ascend AI CPU, which delivers poor performance.\n | |||||
| *@par Third-party framework compatibility | |||||
| *Compatible with the TensorFlow operator Where. | |||||
| * @par Third-party framework compatibility | |||||
| * Compatible with the TensorFlow operator Where. | |||||
| */ | */ | ||||
| REG_OP(Where) | REG_OP(Where) | ||||
| .INPUT(x, TensorType({DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT8, DT_UINT8, DT_INT16, \ | |||||
| DT_UINT16, DT_INT32, DT_UINT32, DT_INT64, DT_UINT64, DT_BOOL})) | |||||
| .INPUT(x, TensorType({BasicType(), DT_BOOL})) | |||||
| .OUTPUT(y, TensorType({DT_INT64})) | .OUTPUT(y, TensorType({DT_INT64})) | ||||
| .OP_END_FACTORY_REG(Where) | .OP_END_FACTORY_REG(Where) | ||||
| @@ -1815,6 +1815,60 @@ REG_OP(SwinAttentionScore) | |||||
| .ATTR(bmm_score_transpose_b, Bool, false) | .ATTR(bmm_score_transpose_b, Bool, false) | ||||
| .ATTR(softmax_axes, ListInt, {}) | .ATTR(softmax_axes, ListInt, {}) | ||||
| .OP_END_FACTORY_REG(SwinAttentionScore) | .OP_END_FACTORY_REG(SwinAttentionScore) | ||||
| /** | |||||
| * @brief Uses "updates" to update tensor "data" by "indices". \n | |||||
| * @par Inputs: | |||||
| * Three inputs, including: | |||||
| * @li var: A Tensor of type BasicType. | |||||
| * @li indices: An ND Tensor of type int32 or int64. | |||||
| * @li updates: An Tensor with the same dtype as 'var'. Same shape as indices. \n | |||||
| * @par Attributes: | |||||
| * @li use_locking: An optional bool. Defaults to "False". If "True", | |||||
| * the operation will be protected by a lock . \n | |||||
| * @par Outputs: | |||||
| * var: A Tensor. Has the same type and format as input "var" . \n | |||||
| * @par Third-party framework compatibility | |||||
| * Compatible with the TensorFlow operator ScatterNdMax. | |||||
| */ | |||||
| REG_OP(ScatterNdMax) | |||||
| .INPUT(var, TensorType::BasicType()) | |||||
| .INPUT(indices, TensorType::IndexNumberType()) | |||||
| .INPUT(updates, TensorType::BasicType()) | |||||
| .OUTPUT(var, TensorType::BasicType()) | |||||
| .ATTR(use_locking, Bool, false) | |||||
| .OP_END_FACTORY_REG(ScatterNdMax) | |||||
| /** | |||||
| * @brief Uses "updates" to update tensor "data" by "indices". \n | |||||
| * @par Inputs: | |||||
| * Three inputs, including: | |||||
| * @li var: A Tensor of type BasicType. | |||||
| * @li indices: A ND Tensor of type int32 or int64. | |||||
| * @li updates: A Tensor with the same dtype as 'var'. Same shape as indices. \n | |||||
| * @par Attributes: | |||||
| * use_locking: An optional bool. Defaults to "False". If "True", | |||||
| * the operation will be protected by a lock . \n | |||||
| * @par Outputs: | |||||
| * var: A Tensor. Has the same type and format as input "var" . \n | |||||
| * @par Third-party framework compatibility | |||||
| * Compatible with the TensorFlow operator ScatterNdMin. | |||||
| */ | |||||
| REG_OP(ScatterNdMin) | |||||
| .INPUT(var, TensorType::BasicType()) | |||||
| .INPUT(indices, TensorType::IndexNumberType()) | |||||
| .INPUT(updates, TensorType::BasicType()) | |||||
| .OUTPUT(var, TensorType::BasicType()) | |||||
| .ATTR(use_locking, Bool, false) | |||||
| .OP_END_FACTORY_REG(ScatterNdMin) | |||||
| } // namespace ge | } // namespace ge | ||||
| #endif // OPS_BUILT_IN_OP_PROTO_INC_MATRIX_CALCULATION_OPS_H_ | #endif // OPS_BUILT_IN_OP_PROTO_INC_MATRIX_CALCULATION_OPS_H_ | ||||
| @@ -170,7 +170,7 @@ MSVP_PROF_API int32_t MsprofInit(uint32_t moduleId, void *data, uint32_t dataLen | |||||
| * @param moduleId [IN] module Id | * @param moduleId [IN] module Id | ||||
| * @param handle [IN] the pointer of callback | * @param handle [IN] the pointer of callback | ||||
| */ | */ | ||||
| MSVP_PROF_API int32_t MsprofRegisterCallback(uint32_t moduleId, ProfCommandHandle handle); | |||||
| MSVP_PROF_API int32_t MsprofRegisterCallback(uint32_t moduleId, ProfCommandHandle callback); | |||||
| /* | /* | ||||
| * @name profReportData | * @name profReportData | ||||
| * @brief start reporter/stop reporter/report date | * @brief start reporter/stop reporter/report date | ||||