Browse Source

!1063 Fix bug of aicpu all_shape compile.

From: @zhao_zhixuan
Reviewed-by: @xchu42,@ji_chen
Signed-off-by: @ji_chen
tags/v1.2.0
mindspore-ci-bot Gitee 4 years ago
parent
commit
b4206b9fe3
14 changed files with 55 additions and 242 deletions
  1. +0
    -2
      ge/CMakeLists.txt
  2. +0
    -1
      ge/ge_inference.mk
  3. +0
    -1
      ge/ge_runner.mk
  4. +2
    -29
      ge/generator/ge_generator.cc
  5. +23
    -0
      ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc
  6. +18
    -0
      ge/graph/load/model_manager/task_info/kernel_task_info.cc
  7. +1
    -13
      ge/graph/manager/graph_manager.cc
  8. +0
    -155
      ge/graph/passes/dynamic_single_op_reset_shape_pass.cc
  9. +0
    -36
      ge/graph/passes/dynamic_single_op_reset_shape_pass.h
  10. +1
    -1
      ge/hybrid/model/node_item.cc
  11. +8
    -3
      ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc
  12. +0
    -1
      tests/ut/ge/CMakeLists.txt
  13. +1
    -0
      tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc
  14. +1
    -0
      tests/ut/ge/graph/load/kernel_task_info_unittest.cc

+ 0
- 2
ge/CMakeLists.txt View File

@@ -189,7 +189,6 @@ set(TRAIN_SRC_LIST
"graph/passes/atomic_addr_clean_pass.cc"
"graph/passes/mark_same_addr_pass.cc"
"graph/passes/mark_graph_unknown_status_pass.cc"
"graph/passes/dynamic_single_op_reset_shape_pass.cc"
"graph/passes/mark_agnostic_pass.cc"
"graph/partition/dynamic_shape_partition.cc"
"graph/partition/stage_partition.cc"
@@ -496,7 +495,6 @@ set(INFER_SRC_LIST
"graph/passes/atomic_addr_clean_pass.cc"
"graph/passes/mark_same_addr_pass.cc"
"graph/passes/mark_graph_unknown_status_pass.cc"
"graph/passes/dynamic_single_op_reset_shape_pass.cc"
"graph/passes/mark_agnostic_pass.cc"
"graph/common/omg_util.cc"
"graph/common/bcast.cc"


+ 0
- 1
ge/ge_inference.mk View File

@@ -111,7 +111,6 @@ OMG_HOST_SRC_FILES := \
graph/passes/atomic_addr_clean_pass.cc \
graph/passes/mark_same_addr_pass.cc \
graph/passes/mark_graph_unknown_status_pass.cc \
graph/passes/dynamic_single_op_reset_shape_pass.cc \
graph/passes/mark_agnostic_pass.cc \
graph/common/omg_util.cc \
graph/common/bcast.cc \


+ 0
- 1
ge/ge_runner.mk View File

@@ -114,7 +114,6 @@ LIBGE_LOCAL_SRC_FILES := \
graph/passes/atomic_addr_clean_pass.cc \
graph/passes/mark_same_addr_pass.cc \
graph/passes/mark_graph_unknown_status_pass.cc \
graph/passes/dynamic_single_op_reset_shape_pass.cc \
graph/passes/mark_agnostic_pass.cc \
graph/partition/dynamic_shape_partition.cc \
graph/partition/stage_partition.cc \


+ 2
- 29
ge/generator/ge_generator.cc View File

@@ -48,7 +48,6 @@ const char *const kVectorEngine = "VectorEngine";
const char *const kAIcoreEngine = "AIcoreEngine";
const char *const kFileNameSuffix = "online";
const char *const kAicpuAllshape = "_AllShape";
const size_t kDynamicDimSize = 1;
const int64_t kDynamicDimValue = -2;

std::map<ge::OpEngineType, std::string> engine_type_map{
@@ -251,30 +250,6 @@ static void GetOpsProtoPath(string &opsproto_path) {
opsproto_path = (path_base + "ops/op_proto/custom/" + ":") + (path_base + "ops/op_proto/built-in/");
}

static Status CheckShapeReset(const OpDescPtr &op_desc, bool &change_shape_flag) {
GE_CHECK_NOTNULL_EXEC(op_desc, return PARAM_INVALID);
change_shape_flag = false;
for (size_t i = 0; i < op_desc->GetAllInputsDesc().size(); i++) {
auto input_desc = op_desc->MutableInputDesc(static_cast<uint32_t>(i));
GE_CHECK_NOTNULL(input_desc);
// pass scalar input desc
auto dims = input_desc->GetShape().GetDims();
if (dims.size() == kDynamicDimSize && dims[0] == kDynamicDimValue) {
change_shape_flag = true;
}
}
for (size_t i = 0; i < op_desc->GetAllOutputsDesc().size(); i++) {
auto output_desc = op_desc->MutableOutputDesc(static_cast<uint32_t>(i));
GE_CHECK_NOTNULL(output_desc);
// pass scalar output desc
auto dims = output_desc->GetShape().GetDims();
if (dims.size() == kDynamicDimSize && dims[0] == kDynamicDimValue) {
change_shape_flag = true;
}
}
return SUCCESS;
}

static Status ResetTensorVecShape(const vector<GeTensor> &inputs, vector<GeTensor> &inputs_dynamic) {
for (auto input : inputs) {
auto input_desc = input.GetTensorDesc();
@@ -289,7 +264,7 @@ static Status ResetTensorVecShape(const vector<GeTensor> &inputs, vector<GeTenso

bool is_const = false;
(void)AttrUtils::GetBool(input_desc, CONST_ATTR_NAME_INPUT, is_const);
if (!is_const && shape_ori.GetDims().size() > 0) {
if (!is_const) {
int64_t storage_format = FORMAT_NCHW;
if (ge::AttrUtils::GetInt(desc, ge::ATTR_NAME_STORAGE_FORMAT, storage_format) &&
!ge::AttrUtils::SetListInt(desc, ge::ATTR_NAME_STORAGE_SHAPE, dynamic_shape_dims)) {
@@ -723,10 +698,8 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &in
GELOGD("The opType in op_desc_tmp is [%s]", op_desc_tmp->GetType().c_str());

bool all_shape = false;
bool dynamic_flag = false;
(void)AttrUtils::GetBool(op_desc, kAicpuAllshape, all_shape);
CheckShapeReset(op_desc, dynamic_flag);
if (dynamic_flag || all_shape) {
if (all_shape) {
GELOGD("Get aicpu all_shape kernel!");
vector<GeTensor> inputs_dynamic;
vector<GeTensor> outputs_dynamic;


+ 23
- 0
ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc View File

@@ -29,6 +29,10 @@
#include "hybrid/node_executor/aicpu/aicpu_ext_info.h"
#include "framework/common/debug/log.h"

namespace {
const char *const kAicpuAllshape = "_AllShape";
} // namespace

namespace ge {
Status KernelExTaskInfo::InitTaskExtInfo(const std::string &ext_info, const OpDescPtr &op_desc) {
if (ext_info.empty()) {
@@ -50,6 +54,25 @@ Status KernelExTaskInfo::InitTaskExtInfo(const std::string &ext_info, const OpDe
GE_CHK_STATUS_RET(ext_handle->UpdateExecuteMode(true), "UpdateExecuteMode failed.");
GELOGD("Update aicpu_task ext_info bit_map execute mode to 1.");

bool all_shape = false;
(void)AttrUtils::GetBool(op_desc, kAicpuAllshape, all_shape);
if (all_shape) {
GELOGD("Aicpu all_shape kernel need to update io shape.");
for (uint32_t i = 0; i < num_inputs; i++) {
auto input_desc = op_desc->MutableInputDesc(i);
GE_CHECK_NOTNULL(input_desc);
GE_CHK_STATUS_RET(ext_handle->UpdateInputShapeAndType(i, *input_desc),
"Input[%u] update input shape failed.", i);
}
if (unknown_type != DEPEND_COMPUTE) {
for (uint32_t j = 0; j < num_outputs; j++) {
auto output_desc = op_desc->MutableOutputDesc(j);
GE_CHECK_NOTNULL(output_desc);
GE_CHK_STATUS_RET(ext_handle->UpdateOutputShapeAndType(j, *output_desc),
"Output[%u] update output shape failed.", j);
}
}
}
auto rt_ret = rtMalloc(&ext_info_addr_, ext_handle->GetExtInfoLen(), RT_MEMORY_HBM);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
GELOGE(RT_FAILED, "rtMalloc ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size());


+ 18
- 0
ge/graph/load/model_manager/task_info/kernel_task_info.cc View File

@@ -43,6 +43,7 @@ constexpr int64_t kInvalidGroupKey = -1;
constexpr uint32_t kSKTSingleSize = 1;
const char *kIsLastNode = "is_last_node";
const char *kIsFirstNode = "is_first_node";
const char *const kAicpuAllshape = "_AllShape";
const int64_t kCloseSkt = 100;
const uint32_t kAddrLen = sizeof(void *);
const int kBaseInt = 10;
@@ -985,6 +986,23 @@ Status KernelTaskInfo::InitAicpuTaskExtInfo(const std::string &ext_info) {
GE_CHK_STATUS_RET(ext_handle->UpdateExecuteMode(true), "UpdateExecuteMode failed.");
GELOGD("Update aicpu_task ext_info bit_map execute mode to 1.");

bool all_shape = false;
(void)AttrUtils::GetBool(op_desc_, kAicpuAllshape, all_shape);
if (all_shape) {
GELOGD("Aicpu all_shape kernel need to update io shape.");
for (uint32_t i = 0; i < num_inputs; i++) {
auto input_desc = op_desc_->MutableInputDesc(i);
GE_CHECK_NOTNULL(input_desc);
GE_CHK_STATUS_RET(ext_handle->UpdateInputShapeAndType(i, *input_desc),
"Input[%u] update input shape failed.", i);
}
for (uint32_t j = 0; j < num_outputs; j++) {
auto output_desc = op_desc_->MutableOutputDesc(j);
GE_CHECK_NOTNULL(output_desc);
GE_CHK_STATUS_RET(ext_handle->UpdateOutputShapeAndType(j, *output_desc),
"Output[%u] update output shape failed.", j);
}
}
auto rt_ret = rtMalloc(&aicpu_ext_info_addr_, ext_handle->GetExtInfoLen(), RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "rtMalloc ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size());


+ 1
- 13
ge/graph/manager/graph_manager.cc View File

@@ -59,7 +59,6 @@
#include "graph/passes/iterator_op_pass.h"
#include "graph/passes/link_gen_mask_nodes_pass.h"
#include "graph/passes/mark_graph_unknown_status_pass.h"
#include "graph/passes/dynamic_single_op_reset_shape_pass.h"
#include "graph/passes/merge_pass.h"
#include "graph/passes/merge_input_memcpy_pass.h"
#include "graph/passes/merge_to_stream_merge_pass.h"
@@ -643,22 +642,11 @@ Status GraphManager::ReplaceSubgraphWithOriGraph(const ComputeGraphPtr &compute_

Status GraphManager::SetSubgraph(uint64_t session_id, ComputeGraphPtr compute_graph, GraphPartitioner &partitioner) {
GE_CHECK_NOTNULL(compute_graph);
PassManager pass_for_dynamic_shape_reset_optimize;
GE_CHK_STATUS_RET(pass_for_dynamic_shape_reset_optimize.AddPass(
"SetSubgraph::AfterSetSubgraph::DynamicSingleOpResetShapePass", new (std::nothrow) DynamicSingleOpResetShapePass))
GE_TIMESTAMP_START(pass_for_dynamic_shape_reset_optimize);
Status ret = pass_for_dynamic_shape_reset_optimize.Run(compute_graph);
GE_TIMESTAMP_END(pass_for_dynamic_shape_reset_optimize, "SetSubgraph::AfterSetSubgraph");
if (ret != SUCCESS && ret != NOT_CHANGED) {
GELOGE(ret, "Run passes when optimize subgraph failed");
return ret;
}

auto sub_graph_map = partitioner.GetSubGraphMap();
GELOGD("Directly optimize subgraph with build mode:%s, and step:%s.",
options_.build_mode.c_str(),
options_.build_step.c_str());
ret = OptimizeSubGraphWithMultiThreads(compute_graph, sub_graph_map, session_id);
Status ret = OptimizeSubGraphWithMultiThreads(compute_graph, sub_graph_map, session_id);
if (ret != SUCCESS) {
GELOGE(ret, "Multiply optimize subgraph failed");
return ret;


+ 0
- 155
ge/graph/passes/dynamic_single_op_reset_shape_pass.cc View File

@@ -1,155 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "graph/passes/dynamic_single_op_reset_shape_pass.h"
#include "common/ge_inner_error_codes.h"
#include "graph/utils/node_utils.h"
#include "graph/utils/graph_utils.h"
#include "graph/utils/tensor_utils.h"
#include "graph/utils/op_desc_utils.h"
#include "graph/utils/type_utils.h"
#include "graph/debug/ge_attr_define.h"

namespace ge {
namespace {
const int64_t kDynamicShapeDim = -2;
const char *const kEngineNameAiCpu = "DNN_VM_AICPU_ASCEND";
const char *const kEngineNameAiCpuTf = "DNN_VM_AICPU";
} // namespace
Status DynamicSingleOpResetShapePass::Run(ComputeGraphPtr graph) {
GE_CHECK_NOTNULL(graph);

std::shared_ptr<GELib> instance = ge::GELib::GetInstance();
if (instance == nullptr || !instance->InitFlag()) {
GELOGE(ge::GE_CLI_GE_NOT_INITIALIZED, "Run CompileNodesPass failed.");
return ge::GE_CLI_GE_NOT_INITIALIZED;
}

// pass if graph has not aicpu node.
bool is_not_aicpu = false;
if (CheckAllAicpuNodes(graph, is_not_aicpu) != SUCCESS) {
GELOGE(ge::GE_CLI_GE_NOT_INITIALIZED, "Check if graph has not aicpu node failed.");
return ge::GE_CLI_GE_NOT_INITIALIZED;
}
if (is_not_aicpu) {
GELOGI("The graph [%s] has not aicpu node, whose aicpu nodes would not be reset dynamic shape",
graph->GetName().c_str());
return SUCCESS;
}

for (const auto &node : graph->GetDirectNode()) {
GE_CHECK_NOTNULL(node->GetOpDesc());
// pass input and output node
if (node->GetType() == DATA || node->GetType() == CONSTANT || node->GetType() == CONSTANTOP ||
node->GetType() == NETOUTPUT) {
continue;
}

// pass node without attr: ATTR_SINGLE_OP_SCENE
bool single_aicpu_unknown = false;
if (!AttrUtils::GetBool(node->GetOpDesc(), ATTR_SINGLE_OP_SCENE, single_aicpu_unknown) ||
!single_aicpu_unknown) {
continue;
}

// reset aicpu shape to unknown shape
auto op_desc = node->GetOpDesc();
if (ResetOpShape(op_desc) != SUCCESS) {
GELOGE(ge::GE_CLI_GE_NOT_INITIALIZED, "Reset node[%s] dynamic shapr failed.", node->GetName().c_str());
return ge::GE_CLI_GE_NOT_INITIALIZED;
}
GELOGD("Reset dynamic aicpu node [%s] shape success!", node->GetName().c_str());
}

GELOGD("Reset dynamic aicpu nodes shape of graph [%s] success!", graph->GetName().c_str());
return SUCCESS;
}

Status DynamicSingleOpResetShapePass::CheckAllAicpuNodes(const ComputeGraphPtr &graph, bool &is_not_aicpu) {
is_not_aicpu = false;
for (const auto &node : graph->GetDirectNode()) {
GE_CHECK_NOTNULL(node->GetOpDesc());
// pass input and output node
if (node->GetType() == DATA || node->GetType() == CONSTANT || node->GetType() == CONSTANTOP ||
node->GetType() == NETOUTPUT) {
continue;
}

// find if there are aicpu nodes.
auto op_desc = node->GetOpDesc();
string engine_name = op_desc->GetOpEngineName();
if (engine_name.empty()) {
GELOGE(GRAPH_FAILED, "Get engine failed of node[%s].", node->GetName().c_str());
return GRAPH_FAILED;
}
if (engine_name != kEngineNameAiCpu && engine_name != kEngineNameAiCpuTf) {
is_not_aicpu = true;
return SUCCESS;
}
}
return SUCCESS;
}

bool DynamicSingleOpResetShapePass::CheckIfConstInput(const GeTensorDescPtr &input_tensor_desc) {
bool is_const = false;
(void)AttrUtils::GetBool(input_tensor_desc, CONST_ATTR_NAME_INPUT, is_const);
return is_const;
}

Status DynamicSingleOpResetShapePass::ResetOpShape(OpDescPtr &op_desc) {
GE_CHECK_NOTNULL(op_desc);
std::vector<int64_t> dynamic_shape_dims = {kDynamicShapeDim};
GeShape dynamic_shape(dynamic_shape_dims);
(void)ResetInputTensorShape(op_desc, dynamic_shape);
(void)ResetOutputTensorShape(op_desc, dynamic_shape);
return SUCCESS;
}

Status DynamicSingleOpResetShapePass::ResetInputTensorShape(OpDescPtr &op_desc,
const GeShape &dynamic_shape) {
GE_CHECK_NOTNULL(op_desc);
for (size_t i = 0; i < op_desc->GetAllInputsDesc().size(); i++) {
auto input_desc = op_desc->MutableInputDesc(static_cast<uint32_t>(i));
GE_CHECK_NOTNULL(input_desc);
// pass scalar input desc
auto dims_ori = input_desc->GetShape().GetDims();
if (dims_ori.size() == 0) {
continue;
}
// pass const input
if (CheckIfConstInput(input_desc)) {
continue;
}
input_desc->SetShape(dynamic_shape);
}
return SUCCESS;
}

Status DynamicSingleOpResetShapePass::ResetOutputTensorShape(OpDescPtr &op_desc, const GeShape &dynamic_shape) {
GE_CHECK_NOTNULL(op_desc);
for (size_t i = 0; i < op_desc->GetAllOutputsDesc().size(); i++) {
auto output_desc = op_desc->MutableOutputDesc(static_cast<uint32_t>(i));
GE_CHECK_NOTNULL(output_desc);
// pass scalar input desc
auto output_dims_ori = output_desc->GetShape().GetDims();
if (output_dims_ori.size() == 0) {
continue;
}
output_desc->SetShape(dynamic_shape);
}
return SUCCESS;
}
} // namespace ge

+ 0
- 36
ge/graph/passes/dynamic_single_op_reset_shape_pass.h View File

@@ -1,36 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef GE_GRAPH_PASSES_DYNAMIC_SINGLE_OP_RESET_SHAPE_PASS_H_
#define GE_GRAPH_PASSES_DYNAMIC_SINGLE_OP_RESET_SHAPE_PASS_H_
#include "graph/graph.h"
#include "inc/graph_pass.h"
#include "init/gelib.h"

namespace ge {
class DynamicSingleOpResetShapePass : public GraphPass {
public:
Status Run(ComputeGraphPtr graph) override;

private:
Status ResetOpShape(OpDescPtr &op_desc);
Status ResetInputTensorShape(OpDescPtr &op_desc, const GeShape &dynamic_shape);
Status ResetOutputTensorShape(OpDescPtr &op_desc, const GeShape &dynamic_shape);
Status CheckAllAicpuNodes(const ComputeGraphPtr &graph, bool &is_not_aicpu);
bool CheckIfConstInput(const GeTensorDescPtr &input_tensor_desc);
};
} // namespace ge
#endif // GE_GRAPH_PASSES_DYNAMIC_SINGLE_OP_RESET_SHAPE_PASS_H_

+ 1
- 1
ge/hybrid/model/node_item.cc View File

@@ -236,8 +236,8 @@ void NodeItem::ResolveUnknownShapeType() {
Status NodeItem::Init() {
GE_CHK_STATUS_RET_NOLOG(InitInputsAndOutputs());
GE_CHK_STATUS_RET_NOLOG(ResolveDynamicState());
ResolveUnknownShapeType();
if (is_dynamic) {
ResolveUnknownShapeType();
GE_CHK_STATUS_RET_NOLOG(ResolveStaticInputsAndOutputs());
GE_CHK_STATUS_RET(ParseFusedSubgraph(*this), "[%s] Failed to parse fused subgraph", node_name.c_str());
}


+ 8
- 3
ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc View File

@@ -28,6 +28,7 @@ namespace hybrid {
namespace {
// mem need release
constexpr uint64_t kReleaseFlag = 1;
const char *const kAicpuAllshape = "_AllShape";
}
REGISTER_NODE_EXECUTOR_BUILDER(NodeExecutorManager::ExecutorType::AICPU_TF, AiCpuNodeExecutor);
REGISTER_NODE_EXECUTOR_BUILDER(NodeExecutorManager::ExecutorType::AICPU_CUSTOM, AiCpuNodeExecutor);
@@ -60,6 +61,7 @@ Status AicpuNodeTaskBase::InitExtInfo(const std::string &kernel_ext_info, int64_
GELOGD("To update aicpu_task ext_info session_info session_id to %lu", session_id);
GE_CHK_STATUS_RET(aicpu_ext_handle_.UpdateSessionInfoSessionId(session_id),
"UpdateSessionInfoSessionId failed.");
GE_CHK_STATUS_RET(aicpu_ext_handle_.UpdateExecuteMode(!node_item_->is_dynamic), "UpdateExecuteMode failed.");

// copy task args buf
GE_CHK_STATUS_RET(AllocTensorBuffer(aicpu_ext_handle_.GetExtInfoLen(), ext_info_addr_dev_),
@@ -137,7 +139,6 @@ Status AicpuNodeTaskBase::UpdateExtInfo() {
return SUCCESS;
}

GE_CHK_STATUS_RET(aicpu_ext_handle_.UpdateExecuteMode(false), "UpdateExecuteMode failed.");
for (auto i = 0; i < node_item_->num_inputs; ++i) {
auto input_desc = node_item_->MutableInputDesc(i);
GE_CHECK_NOTNULL(input_desc);
@@ -177,10 +178,14 @@ Status AicpuNodeTaskBase::UpdateArgs(TaskContext &context) {
}

GE_CHK_STATUS_RET(UpdateIoAddr(context), "Node[%s] update io addr failed.", node_name_.c_str());
if (node_item_->is_dynamic) {
// dynamic node need update ext info.
bool all_shape = false;
const OpDescPtr op_desc = node_item_->GetOpDesc();
(void)AttrUtils::GetBool(op_desc, kAicpuAllshape, all_shape);
if (node_item_->is_dynamic || all_shape) {
// dynamic node and all_shape kernel need update ext info.
GE_CHK_STATUS_RET(UpdateExtInfo(), "Node[%s] update ext info failed.", node_name_.c_str());
}

GELOGD("Node[%s] update args end.", node_name_.c_str());
return SUCCESS;
}


+ 0
- 1
tests/ut/ge/CMakeLists.txt View File

@@ -186,7 +186,6 @@ set(COMMON_SRC_FILES
"${GE_CODE_DIR}/ge/graph/passes/atomic_addr_clean_pass.cc"
"${GE_CODE_DIR}/ge/graph/passes/mark_same_addr_pass.cc"
"${GE_CODE_DIR}/ge/graph/passes/mark_graph_unknown_status_pass.cc"
"${GE_CODE_DIR}/ge/graph/passes/dynamic_single_op_reset_shape_pass.cc"
"${GE_CODE_DIR}/ge/graph/passes/mark_agnostic_pass.cc"
"${GE_CODE_DIR}/ge/graph/passes/dimension_compute_pass.cc"
"${GE_CODE_DIR}/ge/graph/passes/dimension_adjust_pass.cc"


+ 1
- 0
tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc View File

@@ -140,6 +140,7 @@ TEST_F(UtestKernelExTaskInfo, kernel_ex_task_info_calculate_args) {
TEST_F(UtestKernelExTaskInfo, kernel_ex_task_ext_info) {
const string ext_info = {1, 1, 1, 1, 0, 0, 0, 0};
const OpDescPtr op_desc = CreateOpDesc("FrameworkOp", "FrameworkOp");
AttrUtils::SetBool(op_desc, "_AllShape", true);

KernelExTaskInfo kernel_ex_task_info;
EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(ext_info, op_desc), SUCCESS);


+ 1
- 0
tests/ut/ge/graph/load/kernel_task_info_unittest.cc View File

@@ -390,6 +390,7 @@ TEST_F(UtestKernelTaskInfo, init_kernel_taskInfo_with_aicpu_kernel_type_fail) {
rtStreamCreate(&stream, 0);
model.stream_list_ = { stream };
model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp");
AttrUtils::SetBool(model.op_list_[0], "_AllShape", true);

domi::TaskDef task_def;
KernelTaskInfo kernel_task_info;


Loading…
Cancel
Save