Browse Source

!1536 modify code format

Merge pull request !1536 from ldy2021/master
tags/v1.3.0
计晨 Gitee 3 years ago
parent
commit
5d92f4fc21
12 changed files with 166 additions and 94 deletions
  1. +2
    -1
      ge/ge_local_engine/engine/ge_local_engine.cc
  2. +20
    -13
      ge/ge_local_engine/engine/host_cpu_engine.cc
  3. +22
    -10
      ge/ge_local_engine/ops_kernel_store/ge_local_ops_kernel_builder.cc
  4. +2
    -1
      ge/ge_local_engine/ops_kernel_store/op/ge_deleted_op.cc
  5. +4
    -2
      ge/ge_local_engine/ops_kernel_store/op/op_factory.cc
  6. +89
    -46
      ge/generator/ge_generator.cc
  7. +22
    -15
      ge/generator/generator_api.cc
  8. +1
    -1
      ge/hybrid/executor/hybrid_model_async_executor.h
  9. +1
    -1
      ge/hybrid/hybrid_davinci_model.cc
  10. +1
    -1
      ge/hybrid/node_executor/aicore/aicore_op_task.cc
  11. +1
    -1
      ge/hybrid/node_executor/host_cpu/kernel/assign_kernel.cc
  12. +1
    -2
      ge/hybrid/node_executor/node_executor.cc

+ 2
- 1
ge/ge_local_engine/engine/ge_local_engine.cc View File

@@ -35,7 +35,8 @@ Status GeLocalEngine::Initialize(const std::map<string, string> &options) {
if (ops_kernel_store_ == nullptr) {
ops_kernel_store_ = MakeShared<GeLocalOpsKernelInfoStore>();
if (ops_kernel_store_ == nullptr) {
GELOGE(FAILED, "Make GeLocalOpsKernelInfoStore failed.");
REPORT_CALL_ERROR("E19999", "create GeLocalOpsKernelInfoStore failed.");
GELOGE(FAILED, "[Call][MakeShared] Make GeLocalOpsKernelInfoStore failed.");
return FAILED;
}
}


+ 20
- 13
ge/ge_local_engine/engine/host_cpu_engine.cc View File

@@ -43,7 +43,7 @@ namespace {
} \
auto tensor = TensorAdapter::AsTensor(*ge_tensor); \
auto tensor_name = op_desc->GetOutputNameByIndex(i); \
GE_RETURN_WITH_LOG_IF_TRUE(tensor_name.empty(), "Failed to get output name. node = %s, index = %zu", \
GE_RETURN_WITH_LOG_IF_TRUE(tensor_name.empty(), "[Get][OutputName] failed. node = %s, index = %zu", \
op_desc->GetName().c_str(), i); \
named_outputs.emplace(tensor_name, tensor); \
break; \
@@ -61,7 +61,8 @@ Status GetDataNumber(const GeTensorDesc &out_desc, uint64_t &data_num) {
if (out_desc.GetShape().IsUnknownShape()) {
std::vector<std::pair<int64_t, int64_t>> range;
if (out_desc.GetShapeRange(range) != GRAPH_SUCCESS) {
GELOGE(INTERNAL_ERROR, "Get shape range failed.");
REPORT_CALL_ERROR("E19999", "GetShapeRange failed.");
GELOGE(INTERNAL_ERROR, "[Get][ShapeRange] failed.");
return INTERNAL_ERROR;
}
int64_t max_range_size = 1;
@@ -72,7 +73,8 @@ Status GetDataNumber(const GeTensorDesc &out_desc, uint64_t &data_num) {
num_size = max_range_size;
}
if (num_size < 0) {
GELOGE(INTERNAL_ERROR, "Get negative size, num_size=%ld.", num_size);
REPORT_INNER_ERROR("E19999", "Get negative size, num_size=%ld.", num_size);
GELOGE(INTERNAL_ERROR, "[Check][Param] Get negative size, num_size=%ld.", num_size);
return INTERNAL_ERROR;
}
data_num = static_cast<uint64_t>(num_size);
@@ -137,10 +139,10 @@ Status HostCpuEngine::PrepareInputs(const ge::ConstOpDescPtr &op_desc,
map<std::string, const Tensor> &named_inputs) {
auto num_inputs = op_desc->GetInputsSize();
if (num_inputs != inputs.size()) {
GELOGE(PARAM_INVALID,
"Mismatching input sizes. op_desc has %zu input(s), but given %zu",
num_inputs,
inputs.size());
REPORT_INNER_ERROR("E19999", "Mismatching input sizes. op_desc:%s(%s) has %zu input(s), but given %zu",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), num_inputs, inputs.size());
GELOGE(PARAM_INVALID, "[Check][Param] Mismatching input sizes. op_desc:%s(%s) has %zu input(s), but given %zu",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), num_inputs, inputs.size());
return PARAM_INVALID;
}

@@ -149,8 +151,8 @@ Status HostCpuEngine::PrepareInputs(const ge::ConstOpDescPtr &op_desc,
GE_CHECK_NOTNULL(ge_tensor);
auto tensor = TensorAdapter::AsTensor(*ge_tensor);
auto tensor_name = op_desc->GetInputNameByIndex(i);
GE_RETURN_WITH_LOG_IF_TRUE(tensor_name.empty(),
"Failed to get input name. node = %s, index = %zu", op_desc->GetName().c_str(), i);
GE_RETURN_WITH_LOG_IF_TRUE(tensor_name.empty(), "[Get][InputName] failed. node = %s, index = %zu",
op_desc->GetName().c_str(), i);
GELOGD("Successfully inserted input tensor. node = %s, index = %zu, input name = %s",
op_desc->GetName().c_str(), i, tensor_name.c_str());
named_inputs.emplace(tensor_name, tensor);
@@ -173,7 +175,7 @@ Status HostCpuEngine::PrepareOutputs(const ge::ConstOpDescPtr &op_desc,
uint64_t data_num = 0;
if (need_create_flag) {
if (GetDataNumber(out_desc, data_num) != SUCCESS) {
GELOGE(INTERNAL_ERROR, "node:%s, get size for output %zu failed", op_desc->GetName().c_str(), i);
GELOGE(INTERNAL_ERROR, "[Get][Number] node:%s get size for output %zu failed", op_desc->GetName().c_str(), i);
return INTERNAL_ERROR;
}
}
@@ -234,12 +236,16 @@ Status HostCpuEngine::Run(NodePtr &node, const vector<ConstGeTensorPtr> &inputs,
for (size_t i = 0; i < op_desc->GetOutputsSize(); i++) {
auto tensor_name = op_desc->GetOutputNameByIndex(i);
if (tensor_name.empty()) {
GELOGE(INTERNAL_ERROR, "Failed to get output name. node = %s, index = %zu", op_desc->GetName().c_str(), i);
REPORT_INNER_ERROR("E19999", "GetOutputNameByIndex failed, node = %s, index = %zu",
op_desc->GetName().c_str(), i);
GELOGE(INTERNAL_ERROR, "[Get][OutputName] failed. node = %s, index = %zu", op_desc->GetName().c_str(), i);
return INTERNAL_ERROR;
}
auto iter = named_outputs.find(tensor_name);
if (iter == named_outputs.end()) {
GELOGE(INTERNAL_ERROR, "Failed to get output tensor. node = %s, index = %zu, tensor_name = %s",
REPORT_INNER_ERROR("E19999", "get output tensor failed, node = %s, index = %zu, tensor_name = %s",
op_desc->GetName().c_str(), i, tensor_name.c_str());
GELOGE(INTERNAL_ERROR, "[Get][OutputTensor] failed. node = %s, index = %zu, tensor_name = %s",
op_desc->GetName().c_str(), i, tensor_name.c_str());
return INTERNAL_ERROR;
}
@@ -328,7 +334,8 @@ Status HostCpuEngine::LoadLib(const std::string &lib_path) {
if (handle == nullptr) {
const char *error = mmDlerror();
error = (error == nullptr) ? "" : error;
GELOGE(INTERNAL_ERROR, "Failed to invoke dlopen. path = %s, error = %s", lib_path.c_str(), error);
REPORT_CALL_ERROR("E19999", "mmDlopen failed, path = %s, error = %s", lib_path.c_str(), error);
GELOGE(INTERNAL_ERROR, "[Invoke][DlOpen] failed. path = %s, error = %s", lib_path.c_str(), error);
return INTERNAL_ERROR;
}



+ 22
- 10
ge/ge_local_engine/ops_kernel_store/ge_local_ops_kernel_builder.cc View File

@@ -52,7 +52,8 @@ Status GeLocalOpsKernelBuilder::CalcOpRunningParam(Node &ge_node) {
GELOGD("[%s] CalcOpRunningParam In.", ge_node.GetName().c_str());
OpDescPtr op_desc = ge_node.GetOpDesc();
if (op_desc == nullptr) {
GELOGE(FAILED, "CalcOpRunningParam failed, as op desc is null");
REPORT_CALL_ERROR("E19999", "param ge_node has no opdesc, check invalid.");
GELOGE(FAILED, "[Get][OpDesc] CalcOpRunningParam failed, as op desc is null");
return FAILED;
}

@@ -97,15 +98,21 @@ Status GeLocalOpsKernelBuilder::CalcOpRunningParam(Node &ge_node) {
}

if (graph_status != GRAPH_SUCCESS) {
GELOGE(FAILED, "Calc op[%s:%s] out[%zu] mem size failed, format=%s, data_type=%s, error=%u.", node_name.c_str(),
node_type.c_str(), i, TypeUtils::FormatToSerialString(format).c_str(),
REPORT_CALL_ERROR("E19999", "calc op[%s:%s] out[%zu] mem size failed, format=%s, data_type=%s, error=%u.",
node_name.c_str(), node_type.c_str(), i, TypeUtils::FormatToSerialString(format).c_str(),
TypeUtils::DataTypeToSerialString(data_type).c_str(), graph_status);
GELOGE(FAILED, "[Calc][MemSize] for op[%s:%s] out[%zu] failed, format=%s, data_type=%s, error=%u.",
node_name.c_str(), node_type.c_str(), i, TypeUtils::FormatToSerialString(format).c_str(),
TypeUtils::DataTypeToSerialString(data_type).c_str(), graph_status);
return FAILED;
}

if (output_mem_size < 0) {
GELOGE(FAILED,
"Calc op[%s:%s] out[%zu] mem size is negative(not support),"
REPORT_INNER_ERROR("E19999", "Calc op[%s:%s] out[%zu] mem size is negative(not support),"
" format=%s, data_type=%s, mem_size=%ld.",
node_name.c_str(), node_type.c_str(), i, TypeUtils::FormatToSerialString(format).c_str(),
TypeUtils::DataTypeToSerialString(data_type).c_str(), output_mem_size);
GELOGE(FAILED, "[Calc][MemSize] op[%s:%s] out[%zu] mem size is negative(not support),"
" format=%s, data_type=%s, mem_size=%ld.",
node_name.c_str(), node_type.c_str(), i, TypeUtils::FormatToSerialString(format).c_str(),
TypeUtils::DataTypeToSerialString(data_type).c_str(), output_mem_size);
@@ -133,17 +140,20 @@ Status GeLocalOpsKernelBuilder::CalcOpRunningParam(Node &ge_node) {

Status GeLocalOpsKernelBuilder::CalcConstantStrMemSize(const OpDescPtr &op_desc, int64_t &mem_size) {
if (op_desc == nullptr) {
GELOGE(FAILED, "CalcConstantStrMemSize failed, as op desc is null");
REPORT_INNER_ERROR("E19999", "param op_desc is nullptr, check invalid");
GELOGE(FAILED, "[Check][Param] CalcConstantStrMemSize failed, as op desc is null");
return FAILED;
}
ConstGeTensorPtr value = MakeShared<const GeTensor>();
if (value == nullptr) {
GELOGE(FAILED, "make shared ConstGeTensor exception.");
REPORT_CALL_ERROR("E19999", "make shared ConstGeTensor exception.");
GELOGE(FAILED, "[Create][GeTensor] make shared ConstGeTensor exception.");
return FAILED;
}
// Constant op attr name is "value"
if (!AttrUtils::GetTensor(op_desc, kConstantOpAttrName, value)) {
GELOGE(FAILED, "Get Constant op attr value failed");
REPORT_CALL_ERROR("E19999", "get op:%s attr value failed", op_desc->GetName().c_str());
GELOGE(FAILED, "[Get][Value] of Constant op attr failed");
return FAILED;
}
mem_size = static_cast<int64_t>(value->GetData().size());
@@ -165,13 +175,15 @@ Status GeLocalOpsKernelBuilder::GenerateTask(const Node &node, RunContext &conte

auto op = OpFactory::Instance().CreateOp(node, context);
if (op == nullptr) {
GELOGE(FAILED, "CreateOp for node:%s(%s) failed.", name.c_str(), type.c_str());
REPORT_CALL_ERROR("E19999", "create op for node:%s(%s) failed.", name.c_str(), type.c_str());
GELOGE(FAILED, "[Create][Op] for node:%s(%s) failed.", name.c_str(), type.c_str());
return FAILED;
}

Status ret = op->Run();
if (ret != SUCCESS) {
GELOGE(ret, "Node:%s(%s) op run failed.", name.c_str(), type.c_str());
REPORT_CALL_ERROR("E19999", "Node:%s(%s) op run failed.", name.c_str(), type.c_str());
GELOGE(ret, "[Call][Run] for Node:%s(%s) op failed.", name.c_str(), type.c_str());
return ret;
}
GELOGD("Ge local generate task for node:%s(%s) end, tasks.size()=%zu.", name.c_str(), type.c_str(), tasks.size());


+ 2
- 1
ge/ge_local_engine/ops_kernel_store/op/ge_deleted_op.cc View File

@@ -24,7 +24,8 @@ namespace ge_local {
GeDeletedOp::GeDeletedOp(const Node &node, RunContext &run_context) : Op(node, run_context) {}

Status GeDeletedOp::Run() {
GELOGE(FAILED, "Node:%s type is %s, should be deleted by ge.", name_.c_str(), type_.c_str());
REPORT_INNER_ERROR("E19999", "Node:%s type is %s, should be deleted by ge.", name_.c_str(), type_.c_str());
GELOGE(FAILED, "[Delelte][Node] Node:%s type is %s, should be deleted by ge.", name_.c_str(), type_.c_str());
// Do nothing
return FAILED;
}


+ 4
- 2
ge/ge_local_engine/ops_kernel_store/op/op_factory.cc View File

@@ -31,8 +31,10 @@ std::shared_ptr<Op> OpFactory::CreateOp(const Node &node, RunContext &run_contex
if (iter != op_creator_map_.end()) {
return iter->second(node, run_context);
}

GELOGE(FAILED, "Not supported OP, type = %s, name = %s", node.GetType().c_str(), node.GetName().c_str());
REPORT_INNER_ERROR("E19999", "Not supported OP, type = %s, name = %s",
node.GetType().c_str(), node.GetName().c_str());
GELOGE(FAILED, "[Check][Param] Not supported OP, type = %s, name = %s",
node.GetType().c_str(), node.GetName().c_str());
return nullptr;
}



+ 89
- 46
ge/generator/ge_generator.cc View File

@@ -94,7 +94,7 @@ static Status CheckEngineTypeSupport(const NodePtr &node, OpEngineType engine_ty
ErrorManager::GetInstance().ATCReportErrMessage("E14001", {"opname", "optype", "value", "reason"},
{op_desc->GetName(), op_desc->GetType(), "engine type",
"it only support default/AIcoreEngine/VectorEngine"});
GELOGE(FAILED, "[Check][EngineType]value:%d not support, "
GELOGE(FAILED, "[Check][Param] value:%d not support, "
"only support default/AIcoreEngine/VectorEngine now", static_cast<int>(engine_type));
return FAILED;
}
@@ -107,7 +107,8 @@ static Status CheckEngineTypeSupport(const NodePtr &node, OpEngineType engine_ty
// set op engine name and opkernelLib. when engine support
std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
if ((instance_ptr == nullptr) || (!instance_ptr->InitFlag())) {
GELOGE(GE_CLI_GE_NOT_INITIALIZED, "CheckEngineType failed.");
REPORT_INNER_ERROR("E19999", "get gelib failed, as get instance failed or initflag failed.");
GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Get][GELib] CheckEngineType failed, as get gelib failed.");
return FAILED;
}
OpsKernelManager &ops_kernel_manager = instance_ptr->OpsKernelManagerObj();
@@ -115,7 +116,7 @@ static Status CheckEngineTypeSupport(const NodePtr &node, OpEngineType engine_ty
if (op_infos.empty()) {
ErrorManager::GetInstance().ATCReportErrMessage("E14001", {"opname", "optype", "value", "reason"},
{op_desc->GetName(), op_desc->GetType(), "optype", "it can not find"});
GELOGE(FAILED, "CheckEngineType: Can not get op info by op type %s", op_desc->GetType().c_str());
GELOGE(FAILED, "[Get][OpInfo] by op type %s failed.", op_desc->GetType().c_str());
return FAILED;
}
string kernel_name;
@@ -128,7 +129,8 @@ static Status CheckEngineTypeSupport(const NodePtr &node, OpEngineType engine_ty
if (kernel_name.empty()) {
ErrorManager::GetInstance().ATCReportErrMessage("E14001", {"opname", "optype", "value", "reason"},
{op_desc->GetName(), op_desc->GetType(), "engine name" + FmtToStr(op_engine_name), "it can not find"});
GELOGE(FAILED, "CheckEngineType:Can not find ops kernel, engine name: %s.", op_engine_name.c_str());
GELOGE(FAILED, "[Check][Param] Can not find ops kernel, engine name:%s. op:%s(%s)",
op_engine_name.c_str(), op_desc->GetName().c_str(), op_desc->GetType().c_str());
return FAILED;
}
auto &kernel_map = ops_kernel_manager.GetAllOpsKernelInfoStores();
@@ -144,15 +146,14 @@ static Status CheckEngineTypeSupport(const NodePtr &node, OpEngineType engine_ty
} else {
ErrorManager::GetInstance().ATCReportErrMessage(
"E13002", {"optype", "opskernel", "reason"}, {op_desc->GetType(), kernel_name, unsupported_reason});
GELOGE(FAILED, "CheckEngineType: check support failed, Op type %s of ops kernel %s is unsupported, reason:%s",
GELOGE(FAILED, "[Call][CheckSupported] failed, Op type %s of ops kernel %s is unsupported, reason:%s",
op_desc->GetType().c_str(), kernel_name.c_str(), unsupported_reason.c_str());
return FAILED;
}
} else {
ErrorManager::GetInstance().ATCReportErrMessage(
"E13003", {"opname", "optype"}, {op_desc->GetName(), op_desc->GetType()});
GELOGE(FAILED,
"CheckEngineType:Can not find any supported ops kernel info store by kernel_name %s,"
GELOGE(FAILED, "[Check][Param] Can not find any supported ops kernel info store by kernel_name %s,"
"op type is %s, op name is %s",
kernel_name.c_str(), op_desc->GetType().c_str(), op_desc->GetName().c_str());
}
@@ -183,34 +184,47 @@ static Status AddInputs(const ComputeGraphPtr &graph, const NodePtr &node, const
string op_name = node->GetName() + "_in_" + std::to_string(index);
OpDescPtr data_op = MakeShared<ge::OpDesc>(op_name, op_type);
if (data_op == nullptr) {
REPORT_CALL_ERROR("E19999", "create OpDesc failed, name:%s", op_name.c_str());
GELOGE(FAILED, "[Create][OpDesc] failed, name:%s", op_name.c_str());
return FAILED;
}
if (is_const) {
ConstGeTensorPtr tensor_value;
if (!AttrUtils::GetTensor(tensor, ge::ATTR_NAME_WEIGHTS, tensor_value)) {
GELOGE(FAILED, "Get value failed, node name:%s.", tensor.GetName().c_str());
REPORT_CALL_ERROR("E19999", "get attr %s failed, tensor:%s.",
ge::ATTR_NAME_WEIGHTS.c_str(), tensor.GetName().c_str());
GELOGE(FAILED, "[Get][Attr] %s failed, tensor:%s.", ge::ATTR_NAME_WEIGHTS.c_str(), tensor.GetName().c_str());
return FAILED;
}
if (!AttrUtils::SetTensor(data_op, ge::ATTR_NAME_WEIGHTS, tensor_value)) {
GELOGE(FAILED, "Set attr ATTR_NAME_WEIGHTS fail.");
REPORT_CALL_ERROR("E19999", "set attr %s failed, op:%s.", ge::ATTR_NAME_WEIGHTS.c_str(), op_name.c_str());
GELOGE(FAILED, "[Set][Attr] %s failed, op:%s.", ge::ATTR_NAME_WEIGHTS.c_str(), op_name.c_str());
return FAILED;
}
}

(void)AttrUtils::SetBool(data_op, "_is_single_op", true);

GE_CHK_BOOL_EXEC(data_op->AddInputDesc(tensor) == GRAPH_SUCCESS, return FAILED,
"[Add][InputDesc]fail for node:%s", data_op->GetName().c_str());
GE_CHK_BOOL_EXEC(data_op->AddOutputDesc(tensor) == GRAPH_SUCCESS, return FAILED,
"[Add][OutputDesc]fail for node:%s", data_op->GetName().c_str());
GE_CHK_BOOL_EXEC(data_op->AddInputDesc(tensor) == GRAPH_SUCCESS,
REPORT_CALL_ERROR("E19999", "AddInputDesc failed for node:%s", data_op->GetName().c_str());
return FAILED, "[Add][InputDesc] fail for node:%s", data_op->GetName().c_str());
GE_CHK_BOOL_EXEC(data_op->AddOutputDesc(tensor) == GRAPH_SUCCESS,
REPORT_CALL_ERROR("E19999", "AddOutputDesc failed for node:%s", data_op->GetName().c_str());
return FAILED, "[Add][OutputDesc] fail for node:%s", data_op->GetName().c_str());
if (attr && !is_const) {
GE_CHK_BOOL_EXEC(AttrUtils::SetInt(data_op, ATTR_NAME_INDEX, data_index), return FAILED,
"[Set][Attr:%s]fail for node:%s", ATTR_NAME_INDEX.c_str(), data_op->GetName().c_str());
GE_CHK_BOOL_EXEC(AttrUtils::SetInt(data_op, ATTR_NAME_INDEX, data_index),
REPORT_CALL_ERROR("E19999", "set attr %s failed for node:%s",
ATTR_NAME_INDEX.c_str(), data_op->GetName().c_str());
return FAILED,
"[Set][Attr:%s] fail for node:%s", ATTR_NAME_INDEX.c_str(), data_op->GetName().c_str());
++data_index;
}

ge::NodePtr arg_node = graph->AddNode(data_op);
GE_CHK_BOOL_EXEC(arg_node != nullptr, return FAILED, "Insert Data node fail");
GE_CHK_BOOL_EXEC(arg_node != nullptr,
REPORT_CALL_ERROR("E19999", "add node:%s to graph:%s failed", data_op->GetName().c_str(),
graph->GetName().c_str());
return FAILED, "[Add][Node] Insert Data node:%s fail", data_op->GetName().c_str());

GE_CHK_STATUS(GraphUtils::AddEdge(arg_node->GetOutDataAnchor(0), node->GetInDataAnchor(index)),
"[Add][Edge]fail from node:%s to node:%s", data_op->GetName().c_str(), node->GetName().c_str());
@@ -221,6 +235,8 @@ static Status AddInputs(const ComputeGraphPtr &graph, const NodePtr &node, const
static Status AddOutputs(const ComputeGraphPtr &graph, const NodePtr &node, const vector<GeTensor> &outputs) {
OpDescPtr op_desc = MakeShared<ge::OpDesc>(graph->GetName() + "_" + NODE_NAME_NET_OUTPUT, NETOUTPUT);
if (op_desc == nullptr) {
REPORT_CALL_ERROR("E19999", "create OpDesc failed, graph:%s", graph->GetName().c_str());
GELOGE(FAILED, "[Create][OpDesc] failed, graph:%s", graph->GetName().c_str());
return FAILED;
}
(void)AttrUtils::SetBool(op_desc, "_is_single_op", true);
@@ -228,18 +244,23 @@ static Status AddOutputs(const ComputeGraphPtr &graph, const NodePtr &node, cons
for (const auto &out_desc : outputs) {
GeTensorDesc tensor = out_desc.GetTensorDesc();
TensorUtils::SetInputTensor(tensor, true);
GE_CHK_BOOL_EXEC(op_desc->AddInputDesc(tensor) == GRAPH_SUCCESS, return FAILED,
"[Add][InputDesc]fail for node:%s", op_desc->GetName().c_str());
GE_CHK_BOOL_EXEC(op_desc->AddInputDesc(tensor) == GRAPH_SUCCESS,
REPORT_CALL_ERROR("E19999", "AddInputDesc failed for node:%s", op_desc->GetName().c_str());
return FAILED, "[Add][InputDesc]fail for node:%s", op_desc->GetName().c_str());

TensorUtils::SetInputTensor(tensor, false);
TensorUtils::SetOutputTensor(tensor, true);
GE_CHK_BOOL_EXEC(op_desc->AddOutputDesc(tensor) == GRAPH_SUCCESS, return FAILED,
"[Add][OutputDesc]fail for node:%s", op_desc->GetName().c_str());
GE_CHK_BOOL_EXEC(op_desc->AddOutputDesc(tensor) == GRAPH_SUCCESS,
REPORT_CALL_ERROR("E19999", "AddOutputDesc failed for node:%s", op_desc->GetName().c_str());
return FAILED, "[Add][OutputDesc]fail for node:%s", op_desc->GetName().c_str());
count++;
}
GE_CHECK_NOTNULL_EXEC(graph, return PARAM_INVALID);
ge::NodePtr out_node = graph->AddNode(op_desc);
GE_CHK_BOOL_EXEC(out_node != nullptr, return FAILED,
GE_CHK_BOOL_EXEC(out_node != nullptr,
REPORT_CALL_ERROR("E19999", "add node:%s to graph:%u failed.",
op_desc->GetName().c_str(), graph->GetGraphID());
return FAILED,
"[Add][Node:%s]fail in graph:%u", op_desc->GetName().c_str(), graph->GetGraphID());
GE_CHECK_NOTNULL_EXEC(node, return PARAM_INVALID);
for (int32_t i = 0; i < count; ++i) {
@@ -256,7 +277,8 @@ static void GetOpsProtoPath(string &opsproto_path) {
string path = path_env;
string file_path = RealPath(path.c_str());
if (file_path.empty()) {
GELOGE(FAILED, "File path %s is invalid.", path.c_str());
REPORT_CALL_ERROR("E19999", "File path %s is invalid.", path.c_str());
GELOGE(FAILED, "[Call][RealPath] File path %s is invalid.", path.c_str());
return;
}
opsproto_path = (path + "/op_proto/custom/" + ":") + (path + "/op_proto/built-in/");
@@ -288,7 +310,8 @@ static Status ResetTensorVecShape(const vector<GeTensor> &inputs, vector<GeTenso
int64_t storage_format = FORMAT_NCHW;
if (ge::AttrUtils::GetInt(desc, ge::ATTR_NAME_STORAGE_FORMAT, storage_format) &&
!ge::AttrUtils::SetListInt(desc, ge::ATTR_NAME_STORAGE_SHAPE, dynamic_shape_dims)) {
GELOGE(FAILED, "Set attr ATTR_NAME_STORAGE_SHAPE fail.");
REPORT_CALL_ERROR("E19999", "Set attr ATTR_NAME_STORAGE_SHAPE failed to op:%s.", desc.GetName().c_str());
GELOGE(FAILED, "[Set][Attr] ATTR_NAME_STORAGE_SHAPE fail.");
return FAILED;
}
desc.SetShape(dynamic_shape);
@@ -373,7 +396,8 @@ Status GeGenerator::Initialize(const map<string, string> &options) {
Status GeGenerator::Initialize(const map<string, string> &options, OmgContext &omg_context) {
impl_ = ge::MakeShared<Impl>(omg_context);
if (impl_ == nullptr) {
GELOGE(MEMALLOC_FAILED, "Make shared failed");
REPORT_CALL_ERROR("E19999", "create Impl failed.");
GELOGE(MEMALLOC_FAILED, "[Create][Impl] Make shared failed");
return MEMALLOC_FAILED;
}

@@ -388,7 +412,7 @@ Status GeGenerator::Initialize(const map<string, string> &options, OmgContext &o

Status ret = impl_->graph_manager_.Initialize(options);
if (ret != SUCCESS) {
GELOGE(GE_GENERATOR_GRAPH_MANAGER_INIT_FAILED, "Graph manager initialize failed.");
GELOGE(GE_GENERATOR_GRAPH_MANAGER_INIT_FAILED, "[Call][Initialize] Graph manager initialize failed.");
return GE_GENERATOR_GRAPH_MANAGER_INIT_FAILED;
}
// get ek file
@@ -430,7 +454,7 @@ Status GeGenerator::Finalize() {
GE_CHECK_NOTNULL_EXEC(impl_, return PARAM_INVALID);
Status ret = impl_->graph_manager_.Finalize();
if (ret != SUCCESS) {
GELOGE(GE_GENERATOR_GRAPH_MANAGER_FINALIZE_FAILED, "Graph manager finalize failed.");
GELOGE(GE_GENERATOR_GRAPH_MANAGER_FINALIZE_FAILED, "[Call][Finalize] Graph manager finalize failed.");
return GE_GENERATOR_GRAPH_MANAGER_FINALIZE_FAILED;
}
return SUCCESS;
@@ -454,9 +478,9 @@ Status GeGenerator::GenerateInfershapeGraph(const Graph &graph) {

Status ret = impl_->GenerateInfershapeGraph(graph);
if (ret != SUCCESS) {
GELOGE(ret, "Dump infershape json failed");
GELOGE(ret, "[Call][GenerateInfershapeGraph] Dump infershape json failed");
if (impl_->graph_manager_.Finalize() != SUCCESS) {
GELOGE(FAILED, "graph_manager finalize fail.");
GELOGE(FAILED, "[Call][Finalize] graph_manager finalize fail.");
}
return ret;
}
@@ -653,9 +677,9 @@ Status GeGenerator::GenerateModel(const Graph &graph, const string &file_name_pr
impl_->is_offline_ = is_offline;
Status ret = impl_->BuildModel(graph, inputs, ge_root_model);
if (ret != SUCCESS) {
GELOGE(ret, "Build model failed.");
GELOGE(ret, "[Build][Model] failed, ret:%d.", ret);
if (impl_->graph_manager_.Finalize() != SUCCESS) {
GELOGE(FAILED, "graph_manager finalize fail.");
GELOGE(FAILED, "[Call][Finalize] graph_manager finalize fail.");
}
return ret;
}
@@ -679,7 +703,7 @@ Status GeGenerator::GenerateModel(const Graph &graph, const string &file_name_pr
}
ret = impl_->SaveRootModel(file_name_prefix, ge_root_model, model);
if (ret != SUCCESS) {
GELOGE(ret, "Save model failed");
GELOGE(ret, "[Save][RootModel] failed, ret:%d, file:%s", ret, file_name_prefix.c_str());
if (impl_->graph_manager_.Finalize() != SUCCESS) {
GELOGE(FAILED, "graph_manager finalize fail.");
}
@@ -764,14 +788,16 @@ Status GeGenerator::CheckForSingleOp(OpDescPtr &op_desc, const vector<GeTensor>
ErrorManager::GetInstance().ATCReportErrMessage("E14001", {"opname", "optype", "value", "reason"},
{op_desc->GetName(), op_desc->GetType(), "inputs size" + FmtToStr(op_desc->GetAllInputsSize()),
"tensor size is " + FmtToStr(inputs.size())});
GELOGE(PARAM_INVALID, "Tensor size: %zu, Inputs size: %zu", inputs.size(), op_desc->GetAllInputsSize());
GELOGE(PARAM_INVALID, "[Check][Param] Tensor size: %zu, op:%s(%s) Inputs size: %zu, not equal",
inputs.size(), op_desc->GetName().c_str(), op_desc->GetType().c_str(), op_desc->GetAllInputsSize());
return PARAM_INVALID;
}
if (!outputs.empty() && (outputs.size() != op_desc->GetOutputsSize())) {
ErrorManager::GetInstance().ATCReportErrMessage("E14001", {"opname", "optype", "value", "reason"},
{op_desc->GetName(), op_desc->GetType(), "outputs size" + FmtToStr(op_desc->GetOutputsSize()),
"tensor size is " + FmtToStr(outputs.size())});
GELOGE(PARAM_INVALID, "Tensor size: %zu, Outputs size: %zu", outputs.size(), op_desc->GetOutputsSize());
GELOGE(PARAM_INVALID, "[Check][Param] Tensor size: %zu, op:%s(%s) Outputs size: %zu, not equal",
outputs.size(), op_desc->GetName().c_str(), op_desc->GetType().c_str(), op_desc->GetOutputsSize());
return PARAM_INVALID;
}
return SUCCESS;
@@ -786,7 +812,8 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &in
(void)AttrUtils::SetBool(op_desc, ATTR_SINGLE_OP_SCENE, true);

if (CheckForSingleOp(op_desc, inputs, outputs) != SUCCESS) {
GELOGE(PARAM_INVALID, "input param is invalid when build single op!");
GELOGE(PARAM_INVALID, "[Check][Param] input param is invalid when build single op:%s!",
op_desc->GetName().c_str());
return PARAM_INVALID;
}
OmgContext &omg_context = (impl_ == nullptr) ? domi::GetContext() : impl_->omg_context_;
@@ -805,6 +832,7 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &in
fuzz_compile_flag = true;
}
if (!AttrUtils::SetBool(op_desc, ATTR_NAME_FUZZ_BUILD, fuzz_compile_flag)) {
REPORT_CALL_ERROR("E19999", "set ATTR_NAME_FUZZ_BUILD failed for %s.", op_desc->GetName().c_str());
GELOGE(FAILED, "[Set][ATTR_NAME_FUZZ_BUILD] Failed to set attr for %s.", op_desc->GetName().c_str());
return FAILED;
}
@@ -813,7 +841,8 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &in
// 1. Create ComputeGraph.
string name = ge::CurrentTimeInStr() + "_" + model_file_name;
Graph graph;
GE_CHK_STATUS(BuildSingleOpGraph(op_desc, inputs, outputs, name, graph), "make graph fail.");
GE_CHK_STATUS(BuildSingleOpGraph(op_desc, inputs, outputs, name, graph),
"[Build][Graph] for single op:%s fail.", op_desc->GetName().c_str());

// 2. check engine type when compile online
if (model_file_name == kFileNameSuffix) {
@@ -838,7 +867,8 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &in
GE_CHECK_NOTNULL(ge_root_model->GetRootGraph());
map<string, GeModelPtr> name_to_ge_model = ge_root_model->GetSubgraphInstanceNameToModel();
if (name_to_ge_model.empty()) {
GELOGE(PARAM_INVALID, "GetSubgraphInstanceNameToModel is empty.");
REPORT_CALL_ERROR("E19999", "GetSubgraphInstanceNameToModel failed.");
GELOGE(PARAM_INVALID, "[Get][Name] GetSubgraphInstanceNameToModel is empty.");
return PARAM_INVALID;
}
const ComputeGraphPtr root_graph = ge_root_model->GetRootGraph();
@@ -869,7 +899,11 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &in
}
if (!fuzz_build_attrs.empty()) {
GE_CHK_BOOL_EXEC(AttrUtils::SetListNamedAttrs(ge_model, ATTR_NAME_FUZZ_BUILD_RES_ATTRS, fuzz_build_attrs),
return FAILED, "Set ATTR_NAME_FUZZ_BUILD_RES_ATTRS failed.");
REPORT_CALL_ERROR("E19999", "Set model:%s(id:%u) attr:%s failed.",
ge_model->GetName().c_str(), ge_model->GetModelId(),
ATTR_NAME_FUZZ_BUILD_RES_ATTRS.c_str());
return FAILED, "Set model:%s(id:%u) attr:%s failed.",
ge_model->GetName().c_str(), ge_model->GetModelId(), ATTR_NAME_FUZZ_BUILD_RES_ATTRS.c_str());
}
GE_CHK_STATUS_RET_NOLOG(impl_->SaveParams(ge_model, op_desc_tmp->GetType(), op_attrs, inputs, outputs));
} else {
@@ -998,7 +1032,7 @@ Status GeGenerator::Impl::SaveModel(const string &file_name_prefix, GeModelPtr &
model_helper.SetSaveMode(is_offline_);
Status ret = model_helper.SaveToOmModel(model, save_param_, file_name_prefix, model_buff);
if (ret != SUCCESS) {
GELOGE(ret, "Save to om model failed");
GELOGE(ret, "[Call][SaveToOmModel] Save to om model failed");
return ret;
}
return SUCCESS;
@@ -1009,12 +1043,15 @@ Status GeGenerator::Impl::SaveRootModel(const string &file_name_prefix, GeRootMo
bool is_unknown_shape = false;
auto ret = ge_root_model->CheckIsUnknownShape(is_unknown_shape);
if (ret != SUCCESS) {
GELOGE(FAILED, "Check root model is unkonwn shape failed");
REPORT_CALL_ERROR("E19999", "root model(id:%u) CheckIsUnknownShape failed, ret:%d",
ge_root_model->GetModelId(), ret);
GELOGE(FAILED, "[Check][RootModel] is unkonwn shape failed, ret:%d", ret);
return FAILED;
}
GELOGD("begin save root model, cur model is unkonwn shape model ? : %d", is_unknown_shape);
GE_CHK_BOOL_EXEC(!ge_root_model->GetSubgraphInstanceNameToModel().empty(), return FAILED,
"ge root model has no sub model")
GE_CHK_BOOL_EXEC(!ge_root_model->GetSubgraphInstanceNameToModel().empty(),
REPORT_CALL_ERROR("E19999", "root model(id:%u) has no sub model.", ge_root_model->GetModelId());
return FAILED, "[Get][SubModel] ge root model has no sub model")
GeModelPtr model_root = nullptr;
if (is_unknown_shape) {
auto name_to_ge_model = ge_root_model->GetSubgraphInstanceNameToModel();
@@ -1038,7 +1075,8 @@ Status GeGenerator::Impl::SaveRootModel(const string &file_name_prefix, GeRootMo
model_helper.SetSaveMode(is_offline_);
ret = model_helper.SaveToOmRootModel(ge_root_model, save_param_, file_name_prefix, model_buff, is_unknown_shape);
if (ret != SUCCESS) {
GELOGE(ret, "Save to om model failed");
REPORT_CALL_ERROR("E19999", "SaveToOmRootModel failed, ret:%d, model id:%u", ret, ge_root_model->GetModelId());
GELOGE(ret, "[Call][SaveToOmRootModel] failed, ret:%d, model id:%u", ret, ge_root_model->GetModelId());
return ret;
}
return SUCCESS;
@@ -1051,7 +1089,8 @@ Status GeGenerator::Impl::BuildModel(const Graph &graph, const vector<GeTensor>
const std::map<std::string, std::string> options;
Status ret = graph_manager_.AddGraph(graph_id, graph, options, omg_context_);
if (ret != SUCCESS) {
GELOGE(GE_GENERATOR_GRAPH_MANAGER_ADD_GRAPH_FAILED, "GraphManager add graph fail, graph id: %u", graph_id);
REPORT_CALL_ERROR("E19999", "add graph(id:%u) failed, ret:%d", graph_id, ret);
GELOGE(GE_GENERATOR_GRAPH_MANAGER_ADD_GRAPH_FAILED, "[Add][Graph] fail, graph id: %u", graph_id);
(void)graph_manager_.Finalize();
return GE_GENERATOR_GRAPH_MANAGER_ADD_GRAPH_FAILED;
}
@@ -1075,7 +1114,8 @@ Status GeGenerator::Impl::BuildModel(const Graph &graph, const vector<GeTensor>

ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther);
if (ret != SUCCESS) {
GELOGE(GE_GENERATOR_GRAPH_MANAGER_BUILD_GRAPH_FAILED, "GraphManager build graph fail, graph id: %u", graph_id);
REPORT_CALL_ERROR("E19999", "build graph failed, graph id:%u, ret:%d", graph_id, ret);
GELOGE(GE_GENERATOR_GRAPH_MANAGER_BUILD_GRAPH_FAILED, "[Build][Graph] fail, graph id: %u", graph_id);
ret = GE_GENERATOR_GRAPH_MANAGER_BUILD_GRAPH_FAILED;
}

@@ -1091,14 +1131,17 @@ Status GeGenerator::Impl::GenerateInfershapeGraph(const Graph &graph) {
const std::map<std::string, std::string> options;
Status ret = graph_manager_.AddGraph(graph_id, graph, options, omg_context_);
if (ret != SUCCESS) {
GELOGE(GE_GENERATOR_GRAPH_MANAGER_ADD_GRAPH_FAILED, "GraphManager add graph failed, graph id: %u", graph_id);
REPORT_CALL_ERROR("E19999", "add graph failed, graph id:%u, ret:%d", graph_id, ret);
GELOGE(GE_GENERATOR_GRAPH_MANAGER_ADD_GRAPH_FAILED, "[Add][Graph] failed, graph id: %u", graph_id);
(void)graph_manager_.Finalize();
return GE_GENERATOR_GRAPH_MANAGER_ADD_GRAPH_FAILED;
}

ret = graph_manager_.GenerateInfershapeGraph(graph_id);
if (ret != SUCCESS) {
GELOGE(GE_GENERATOR_GRAPH_MANAGER_BUILD_GRAPH_FAILED, "GraphManager generate graph failed");
REPORT_CALL_ERROR("E19999", "GenerateInfershapeGraph failed, graph id:%u, ret:%d", graph_id, ret);
GELOGE(GE_GENERATOR_GRAPH_MANAGER_BUILD_GRAPH_FAILED,
"[Generate][Graph] failed, graph id:%u, ret:%d", graph_id, ret);
return GE_GENERATOR_GRAPH_MANAGER_BUILD_GRAPH_FAILED;
}



+ 22
- 15
ge/generator/generator_api.cc View File

@@ -23,22 +23,24 @@
#include "graph/op_desc.h"
#include "graph/utils/tensor_utils.h"

#define CHECK_PARAM_NOT_NULL(param) \
do { \
if (param == nullptr) { \
GELOGE(ge::PARAM_INVALID, "Param: %s is null.", #param); \
return ge::PARAM_INVALID; \
} \
#define CHECK_PARAM_NOT_NULL(param) \
do { \
if (param == nullptr) { \
REPORT_INNER_ERROR("E19999", "param:%s is null", #param); \
GELOGE(ge::PARAM_INVALID, "[Check][Param] %s is null.", #param); \
return ge::PARAM_INVALID; \
} \
} while (0)

#define CHECK_PARAM_OBJECT(object, param) \
({ \
object *obj_value = reinterpret_cast<object *>(param); \
if (obj_value == nullptr) { \
GELOGE(ge::PARAM_INVALID, "Param: %s is null.", #param); \
return ge::PARAM_INVALID; \
} \
obj_value; \
#define CHECK_PARAM_OBJECT(object, param) \
({ \
object *obj_value = reinterpret_cast<object *>(param); \
if (obj_value == nullptr) { \
REPORT_INNER_ERROR("E19999", "param:%s is null.", #param); \
GELOGE(ge::PARAM_INVALID, "[Check][Param] %s is null.", #param); \
return ge::PARAM_INVALID; \
} \
obj_value; \
})

class OpAttr {
@@ -118,6 +120,8 @@ Status_t OpTaskGernerator(const char *op_type, const OpTensor_t *in_tensor, int
std::string op_name = std::string(op_type) + "_" + std::to_string(ge::GetCurrentTimestamp());
ge::OpDescPtr op_desc = ge::MakeShared<ge::OpDesc>(op_name, op_type);
if (op_desc == nullptr) {
REPORT_CALL_ERROR("E19999", "MakeShared ge::OpDesc failed, as return nullptr");
GELOGE(ge::FAILED, "[Call][MakeShared] create ge::OpDesc failed.");
return ge::FAILED;
}
std::vector<ge::GeTensor> inputs;
@@ -132,7 +136,8 @@ Status_t OpTaskGernerator(const char *op_type, const OpTensor_t *in_tensor, int
ge::TensorUtils::SetOutputTensor(tensor_desc, false);

if (op_desc->AddInputDesc(tensor_desc) != ge::GRAPH_SUCCESS) {
GELOGE(ge::FAILED, "AddInputDesc fail.");
REPORT_CALL_ERROR("E19999", "add inputdesc failed, op:%s", op_desc->GetName().c_str());
GELOGE(ge::FAILED, "[Add][InputDesc] fail, op:%s.", op_desc->GetName().c_str());
return ge::FAILED;
}
inputs.emplace_back(tensor_desc);
@@ -157,6 +162,8 @@ Status_t OpTaskGernerator(const char *op_type, const OpTensor_t *in_tensor, int
OpAttr *op_attr = CHECK_PARAM_OBJECT(OpAttr, attr);
for (const auto &it : op_attr->Attrs()) {
GE_IF_BOOL_EXEC(op_desc->SetAttr(it.first, it.second) != ge::SUCCESS, GELOGE(ge::FAILED, "SetAttr failed.");
REPORT_CALL_ERROR("E19999", "set attr:%s failed, op:%s",
it.first.c_str(), op_desc->GetName().c_str());
return ge::FAILED);
}
}


+ 1
- 1
ge/hybrid/executor/hybrid_model_async_executor.h View File

@@ -61,7 +61,7 @@ class HybridModelAsyncExecutor {

void SetRunningFlag(bool flag) { running_flag_ = flag; }

const GraphExecutionContext * GeContext() { return executor_->GetContext(); }
const GraphExecutionContext *GeContext() { return executor_->GetContext(); }

private:
Status InitInputDesc();


+ 1
- 1
ge/hybrid/hybrid_davinci_model.cc View File

@@ -86,7 +86,7 @@ class HybridDavinciModel::Impl {
return model_.GetDeviceId();
}

const GraphExecutionContext * GeContext() { return executor_.GeContext(); }
const GraphExecutionContext *GeContext() { return executor_.GeContext(); }

uint64_t GetSessionId() {
return model_.GetSessionId();


+ 1
- 1
ge/hybrid/node_executor/aicore/aicore_op_task.cc View File

@@ -309,7 +309,7 @@ Status AiCoreOpTask::InitWithTaskDef(const OpDesc &op_desc, const domi::TaskDef
auto rt_ret = ValidateTaskDef(task_def);
if (rt_ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "op:%s(op_type:%s) failed to validate task def:%s",
op_desc.GetName().c_str(), op_desc.GetType().c_str(), task_def.DebugString().c_str());
op_desc.GetName().c_str(), op_desc.GetType().c_str(), task_def.DebugString().c_str());
GELOGE(rt_ret, "[Invoke][ValidateTaskDef]failed for op:%s(op_type:%s) to validate task def:%s",
op_desc.GetName().c_str(), op_desc.GetType().c_str(), task_def.DebugString().c_str());
return rt_ret;


+ 1
- 1
ge/hybrid/node_executor/host_cpu/kernel/assign_kernel.cc View File

@@ -35,7 +35,7 @@ Status AssignKernel::Compute(TaskContext& context) {
GE_CHECK_NOTNULL(value_tensor);
if (value_tensor->GetSize() > ref_tensor->GetSize()) {
REPORT_INNER_ERROR("E19999", "[%s] value_input_size=%zu bigger than ref_input_size=%zu. check invalid",
node_->GetName().c_str(), value_tensor->GetSize(), ref_tensor->GetSize());
node_->GetName().c_str(), value_tensor->GetSize(), ref_tensor->GetSize());
GELOGE(INTERNAL_ERROR, "[Check][Size][%s] value_input_size=%zu, but ref_input_size=%zu.",
node_->GetName().c_str(), value_tensor->GetSize(), ref_tensor->GetSize());
return INTERNAL_ERROR;


+ 1
- 2
ge/hybrid/node_executor/node_executor.cc View File

@@ -119,8 +119,7 @@ Status NodeExecutorManager::GetExecutor(Node &node, const NodeExecutor **executo
auto executor_type = ResolveExecutorType(node);
const auto it = executors_.find(executor_type);
if (it == executors_.end()) {
REPORT_INNER_ERROR("E19999", "Failed to get executor by type: %d.",
static_cast<int>(executor_type));
REPORT_INNER_ERROR("E19999", "Failed to get executor by type: %d.", static_cast<int>(executor_type));
GELOGE(INTERNAL_ERROR, "[Check][ExecutorType]Failed to get executor by type: %d.",
static_cast<int>(executor_type));
return INTERNAL_ERROR;


Loading…
Cancel
Save