diff --git a/ge/common/dump/dump_op.cc b/ge/common/dump/dump_op.cc index 0becbdc8..4456383c 100755 --- a/ge/common/dump/dump_op.cc +++ b/ge/common/dump/dump_op.cc @@ -20,6 +20,7 @@ #include "common/ge/datatype_util.h" #include "framework/common/debug/ge_log.h" #include "framework/common/util.h" +#include "framework/common/types.h" #include "graph/anchor.h" #include "graph/ge_tensor.h" #include "graph/op_desc.h" @@ -55,8 +56,10 @@ void DumpOp::SetLoopAddr(void *global_step, void *loop_per_iter, void *loop_cond loop_cond_ = reinterpret_cast(loop_cond); } -void DumpOp::SetDynamicModelInfo(const string &dynamic_model_name, uint32_t dynamic_model_id) { +void DumpOp::SetDynamicModelInfo(const string &dynamic_model_name, const string &dynamic_om_name, + uint32_t dynamic_model_id) { dynamic_model_name_ = dynamic_model_name; + dynamic_om_name_ = dynamic_om_name; dynamic_model_id_ = dynamic_model_id; } @@ -200,6 +203,28 @@ Status DumpOp::ExecutorDumpOp(aicpu::dump::OpMappingInfo &op_mapping_info) { return SUCCESS; } +Status DumpOp::SetDumpModelName(aicpu::dump::OpMappingInfo &op_mapping_info) { + std::set model_list = dump_properties_.GetAllDumpModel(); + bool not_find_by_omname = model_list.find(dynamic_om_name_) == model_list.end(); + bool not_find_by_modelname = model_list.find(dynamic_model_name_) == model_list.end(); + std::string dump_model_name = not_find_by_omname ? dynamic_model_name_ : dynamic_om_name_; + if (model_list.find(DUMP_ALL_MODEL) == model_list.end()) { + if (not_find_by_omname && not_find_by_modelname) { + std::string model_list_str; + for (auto &model : model_list) { + model_list_str += "[" + model + "]."; + } + GELOGW("Model %s will not be set to dump, dump list: %s", dump_model_name.c_str(), model_list_str.c_str()); + return FAILED; + } + } + if (!dump_model_name.empty() && dump_properties_.IsDumpOpen()) { + GELOGD("Dump model name is %s", dump_model_name.c_str()); + op_mapping_info.set_model_name(dump_model_name); + } + return SUCCESS; +} + Status DumpOp::LaunchDumpOp() { GELOGI("Start to launch dump op %s", op_desc_->GetName().c_str()); int32_t device_id = 0; @@ -209,8 +234,7 @@ Status DumpOp::LaunchDumpOp() { return RT_ERROR_TO_GE_STATUS(rt_ret); } if (device_id < 0) { - GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, - "Check device_id failed, device_id = %d, which should be not less than 0.", + GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "Check device_id failed, device_id = %d, which should be not less than 0.", device_id); return ACL_ERROR_GE_INTERNAL_ERROR; } @@ -220,11 +244,12 @@ Status DumpOp::LaunchDumpOp() { op_mapping_info.set_flag(kAicpuLoadFlag); op_mapping_info.set_dump_step(dump_properties_.GetDumpStep()); op_mapping_info.set_model_id(dynamic_model_id_); - if (!dynamic_model_name_.empty() && dump_properties_.IsDumpOpen()) { - op_mapping_info.set_model_name(dynamic_model_name_); + + if (SetDumpModelName(op_mapping_info) != SUCCESS) { + return SUCCESS; } SetOpMappingLoopAddr(global_step_, loop_per_iter_, loop_cond_, op_mapping_info); - GELOGI("Dump step is %s ,dump path is %s ,in Launch dump op", dump_properties_.GetDumpStep().c_str(), + GELOGI("Dump step is %s ,dump path is %s in Launch dump op", dump_properties_.GetDumpStep().c_str(), dump_path.c_str()); uint32_t task_id = 0; uint32_t stream_id = 0; @@ -273,4 +298,4 @@ Status DumpOp::LaunchDumpOp() { } return SUCCESS; } -} // namesapce ge +} // namespace ge diff --git a/ge/common/dump/dump_op.h b/ge/common/dump/dump_op.h index d59962e6..4d322bee 100755 --- a/ge/common/dump/dump_op.h +++ b/ge/common/dump/dump_op.h @@ -34,12 +34,13 @@ class DumpOp { vector output_addrs, rtStream_t stream); Status LaunchDumpOp(); void SetLoopAddr(void *global_step, void *loop_per_iter, void *loop_cond); - void SetDynamicModelInfo(const string &dynamic_model_name, uint32_t dynamic_model_id); + void SetDynamicModelInfo(const string &dynamic_model_name, const string &dynamic_om_name, uint32_t dynamic_model_id); private: Status ExecutorDumpOp(aicpu::dump::OpMappingInfo &op_mapping_info); Status DumpOutput(aicpu::dump::Task &task); Status DumpInput(aicpu::dump::Task &task); + Status SetDumpModelName(aicpu::dump::OpMappingInfo &op_mapping_info); DumpProperties dump_properties_; OpDescPtr op_desc_; @@ -54,6 +55,7 @@ class DumpOp { uintptr_t loop_cond_; std::string dynamic_model_name_; + std::string dynamic_om_name_; std::uint32_t dynamic_model_id_; }; } // namespace ge diff --git a/ge/common/dump/dump_properties.cc b/ge/common/dump/dump_properties.cc index 3fbfd16b..65b1e89a 100644 --- a/ge/common/dump/dump_properties.cc +++ b/ge/common/dump/dump_properties.cc @@ -35,14 +35,14 @@ const std::string kDumpStatusOpen = "on"; const uint32_t kAicoreOverflow = (0x1 << 0); const uint32_t kAtomicOverflow = (0x1 << 1); const uint32_t kAllOverflow = (kAicoreOverflow | kAtomicOverflow); -} +} // namespace namespace ge { FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY DumpProperties::DumpProperties(const DumpProperties &other) { CopyFrom(other); } FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY DumpProperties &DumpProperties::operator=( - const DumpProperties &other) { + const DumpProperties &other) { CopyFrom(other); return *this; } @@ -97,7 +97,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::InitByOpti // The following is the new dump scenario of the fusion operator FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::AddPropertyValue( - const std::string &model, const std::set &layers) { + const std::string &model, const std::set &layers) { for (const std::string &layer : layers) { GELOGI("This model %s config to dump layer %s", model.c_str(), layer.c_str()); } @@ -138,7 +138,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::set DumpPrope } FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::set DumpProperties::GetPropertyValue( - const std::string &model) const { + const std::string &model) const { auto iter = model_dump_properties_map_.find(model); if (iter != model_dump_properties_map_.end()) { return iter->second; @@ -147,8 +147,9 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::set DumpPrope } FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool DumpProperties::IsLayerNeedDump( - const std::string &model, const std::string &om_name, const std::string &op_name) const { + const std::string &model, const std::string &om_name, const std::string &op_name) const { // if dump all + GELOGD("model name is %s om name is %s op is %s in layer need dump", model.c_str(), om_name.c_str(), op_name.c_str()); if (model_dump_properties_map_.find(DUMP_ALL_MODEL) != model_dump_properties_map_.end()) { return true; } @@ -203,7 +204,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY const std::string &DumpProperti } FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::SetDumpOpSwitch( - const std::string &dump_op_switch) { + const std::string &dump_op_switch) { dump_op_switch_ = dump_op_switch; } @@ -270,4 +271,4 @@ void DumpProperties::SetDumpDebugOptions() { GELOGI("ge.exec.enableDumpDebug is false or is not set."); } } -} // namespace +} // namespace ge diff --git a/ge/graph/load/model_manager/davinci_model.cc b/ge/graph/load/model_manager/davinci_model.cc index 0e17a15a..75396234 100755 --- a/ge/graph/load/model_manager/davinci_model.cc +++ b/ge/graph/load/model_manager/davinci_model.cc @@ -3221,9 +3221,8 @@ Status DavinciModel::DistributeTask() { task_def.kernel_ex().op_index()); OpDescPtr op = GetOpByIndex(op_index); GE_CHECK_NOTNULL(op); - if (reinterpret_cast(task->GetDumpArgs()) != nullptr) { - bool call_dump = GetDumpProperties().IsLayerNeedDump(name_, om_name_, op->GetName()) && task->CallSaveDumpInfo(); + bool call_dump = OpNeedDump(op->GetName()) && task->CallSaveDumpInfo(); if (call_dump || is_op_debug_reg_) { SaveDumpTask(task->GetTaskID(), task->GetStreamId(), op, task->GetDumpArgs()); } @@ -3243,11 +3242,16 @@ Status DavinciModel::DistributeTask() { return SUCCESS; } -void DavinciModel::SetEndGraphId(uint32_t task_id, uint32_t stream_id) { +bool DavinciModel::ModelNeedDump() { auto all_dump_model = GetDumpProperties().GetAllDumpModel(); - bool findByOmName = all_dump_model.find(om_name_) != all_dump_model.end(); - bool findByModelName = all_dump_model.find(name_) != all_dump_model.end(); - if (all_dump_model.find(ge::DUMP_ALL_MODEL) != all_dump_model.end() || findByOmName || findByModelName) { + bool ret = all_dump_model.find(ge::DUMP_ALL_MODEL) != all_dump_model.end() || + all_dump_model.find(dump_model_name_) != all_dump_model.end() || + all_dump_model.find(om_name_) != all_dump_model.end(); + return ret; +} + +void DavinciModel::SetEndGraphId(uint32_t task_id, uint32_t stream_id) { + if (ModelNeedDump()) { GELOGI("start save end_graph_info to dumper, task_id is %u, stream_id is %u", task_id, stream_id); data_dumper_.SaveEndGraphId(task_id, stream_id); } @@ -4107,7 +4111,10 @@ Status DavinciModel::TransAllVarData(ComputeGraphPtr &graph, uint32_t graph_id) } void DavinciModel::SetDataDumperArgs(const ComputeGraphPtr &graph, const map &variable_by_name) { - data_dumper_.SetModelName(name_); + if(dump_model_name_.empty()) { + dump_model_name_ = name_; + } + data_dumper_.SetModelName(dump_model_name_); data_dumper_.SetModelId(model_id_); data_dumper_.SetOmName(om_name_); data_dumper_.SetComputeGraph(graph); @@ -4308,7 +4315,7 @@ int64_t DavinciModel::GetFixedAddrsSize(string tensor_name) { Status DavinciModel::InitL1DataDumperArgs() { auto all_dump_model = GetDumpProperties().GetAllDumpModel(); bool find_by_om_name = all_dump_model.find(om_name_) != all_dump_model.end(); - bool find_by_model_name = all_dump_model.find(name_) != all_dump_model.end(); + bool find_by_model_name = all_dump_model.find(dump_model_name_) != all_dump_model.end(); bool dump_l1fusion_op = (all_dump_model.find(ge::DUMP_ALL_MODEL) != all_dump_model.end()) || find_by_om_name || find_by_model_name; if (dump_l1fusion_op) { diff --git a/ge/graph/load/model_manager/davinci_model.h b/ge/graph/load/model_manager/davinci_model.h index a83238b6..30240f25 100755 --- a/ge/graph/load/model_manager/davinci_model.h +++ b/ge/graph/load/model_manager/davinci_model.h @@ -248,7 +248,10 @@ class DavinciModel { string Name() const { return name_; } // om_name - string OmName() const { return om_name_; } + const string &OmName() const { return om_name_; } + + // dump_model_name + const string &DumpModelName() const { return dump_model_name_; } // version uint32_t Version() const { return version_; } @@ -483,6 +486,12 @@ class DavinciModel { data_dumper_.DumpShrink(); } + bool OpNeedDump(const string &op_name) { + return GetDumpProperties().IsLayerNeedDump(dump_model_name_, om_name_, op_name); + } + + bool ModelNeedDump(); + void SetEndGraphId(uint32_t task_id, uint32_t stream_id); DavinciModel &operator=(const DavinciModel &model) = delete; @@ -542,6 +551,7 @@ class DavinciModel { // om file name void SetOmName(const string &om_name) { om_name_ = om_name; } + void SetDumpModelName(const string &dump_model_name) { dump_model_name_ = dump_model_name; } void SetDumpProperties(const DumpProperties &dump_properties) { data_dumper_.SetDumpProperties(dump_properties); } const DumpProperties &GetDumpProperties() const { return data_dumper_.GetDumpProperties(); } @@ -888,6 +898,7 @@ class DavinciModel { // used for inference data dump string om_name_; + string dump_model_name_; uint32_t version_; GeModelPtr ge_model_; // release after DavinciModel::Init diff --git a/ge/graph/load/model_manager/model_manager.cc b/ge/graph/load/model_manager/model_manager.cc index 719975cc..0d920604 100755 --- a/ge/graph/load/model_manager/model_manager.cc +++ b/ge/graph/load/model_manager/model_manager.cc @@ -292,7 +292,7 @@ ge::Status ModelManager::SetDynamicSize(uint32_t model_id, const std::vector &ge_root_model, const shared_ptr &listener) { auto hybrid_model = hybrid::HybridDavinciModel::Create(ge_root_model); @@ -300,7 +300,7 @@ ge::Status ModelManager::DoLoadHybridModelOnline(uint32_t model_id, const string hybrid_model->SetListener(listener); hybrid_model->SetModelId(model_id); hybrid_model->SetDeviceId(GetContext().DeviceId()); - hybrid_model->SetModelName(model_name); + hybrid_model->SetOmName(om_name); GE_CHK_STATUS_RET(hybrid_model->Init(), "Failed to init hybrid model. model_id = %u", model_id); auto shared_model = std::shared_ptr(hybrid_model.release()); InsertModel(model_id, shared_model); @@ -332,9 +332,9 @@ Status ModelManager::LoadModelOnline(uint32_t &model_id, const shared_ptrGetSubgraphInstanceNameToModel(); - string model_name = ""; + string om_name; if (IsNeedHybridLoad(*ge_root_model)) { - return DoLoadHybridModelOnline(model_id, model_name, ge_root_model, listener); + return DoLoadHybridModelOnline(model_id, om_name, ge_root_model, listener); } mmTimespec timespec = mmGetTickCount(); diff --git a/ge/graph/load/model_manager/task_info/end_graph_task_info.cc b/ge/graph/load/model_manager/task_info/end_graph_task_info.cc index 673ceb58..a8b042d3 100644 --- a/ge/graph/load/model_manager/task_info/end_graph_task_info.cc +++ b/ge/graph/load/model_manager/task_info/end_graph_task_info.cc @@ -46,10 +46,7 @@ Status EndGraphTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin Status EndGraphTaskInfo::Distribute() { GELOGI("EndGraphTaskInfo Distribute Start."); GE_CHECK_NOTNULL(davinci_model_); - auto all_dump_model = davinci_model_->GetDumpProperties().GetAllDumpModel(); - if (all_dump_model.find(ge::DUMP_ALL_MODEL) != all_dump_model.end() || - all_dump_model.find(davinci_model_->Name()) != all_dump_model.end() || - all_dump_model.find(davinci_model_->OmName()) != all_dump_model.end()) { + if (davinci_model_->ModelNeedDump()) { GELOGI("Start to call rtEndGraphEx"); rtError_t rt_ret = rtEndGraphEx(model_, stream_, kDumpFlag); if (rt_ret != RT_ERROR_NONE) { diff --git a/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc b/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc index 2996d30b..2c0da343 100644 --- a/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc +++ b/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc @@ -277,8 +277,7 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin } void KernelExTaskInfo::InitDumpTask(void *addr, const OpDescPtr &op_desc) { - if (davinci_model_->GetDumpProperties().IsLayerNeedDump(davinci_model_->Name(), davinci_model_->OmName(), - op_desc->GetName())) { + if (davinci_model_->OpNeedDump(op_desc->GetName())) { dump_flag_ = RT_KERNEL_DUMPFLAG; dump_args_ = addr; } diff --git a/ge/graph/load/model_manager/task_info/kernel_task_info.cc b/ge/graph/load/model_manager/task_info/kernel_task_info.cc index 92f06930..ecbcb7a4 100755 --- a/ge/graph/load/model_manager/task_info/kernel_task_info.cc +++ b/ge/graph/load/model_manager/task_info/kernel_task_info.cc @@ -446,10 +446,7 @@ Status KernelTaskInfo::Distribute() { call_skt, task_id_, skt_id_, skt_info.last_task_id, stub_func_name_.c_str(), stub_func_, block_dim_, stream_); // l1 fusion enable and env flag open (kCloseSkt for skt debug) bool open_dump = false; - auto all_dump_model = davinci_model_->GetDumpProperties().GetAllDumpModel(); - if (all_dump_model.find(ge::DUMP_ALL_MODEL) != all_dump_model.end() || - all_dump_model.find(davinci_model_->Name()) != all_dump_model.end() || - all_dump_model.find(davinci_model_->OmName()) != all_dump_model.end()) { + if (davinci_model_->ModelNeedDump()) { open_dump = true; } if (call_skt && (env_flag != kCloseSkt) && !open_dump) { @@ -1088,8 +1085,7 @@ Status KernelTaskInfo::InitAicpuTask(uint32_t op_index, const domi::KernelDef &k } void KernelTaskInfo::InitDumpTask(uint32_t offset) { - if (davinci_model_->GetDumpProperties().IsLayerNeedDump(davinci_model_->Name(), davinci_model_->OmName(), - op_desc_->GetName())) { + if (davinci_model_->OpNeedDump(op_desc_->GetName())) { if (IsL1FusionOp(op_desc_)) { dump_flag_ = RT_FUSION_KERNEL_DUMPFLAG; } else { diff --git a/ge/hybrid/executor/hybrid_model_async_executor.cc b/ge/hybrid/executor/hybrid_model_async_executor.cc index a97336d9..060e8467 100644 --- a/ge/hybrid/executor/hybrid_model_async_executor.cc +++ b/ge/hybrid/executor/hybrid_model_async_executor.cc @@ -46,10 +46,6 @@ void HybridModelAsyncExecutor::SetModelId(uint32_t model_id) { model_id_ = model_id; } -void HybridModelAsyncExecutor::SetModelName(const string &model_name) { - om_name_ = model_name; -} - Status HybridModelAsyncExecutor::EnqueueData(const shared_ptr &data) { if (data_inputer_->Push(data) != SUCCESS) { REPORT_CALL_ERROR("E19999", "Data queue is full, please call again later when %s, model_id %u.", diff --git a/ge/hybrid/executor/hybrid_model_async_executor.h b/ge/hybrid/executor/hybrid_model_async_executor.h index 69d8a3f4..b6942b10 100644 --- a/ge/hybrid/executor/hybrid_model_async_executor.h +++ b/ge/hybrid/executor/hybrid_model_async_executor.h @@ -51,8 +51,6 @@ class HybridModelAsyncExecutor { void SetModelId(uint32_t model_id); - void SetModelName(const string &model_name); - Status Stop(); Status EnqueueData(const std::shared_ptr &data); @@ -97,7 +95,6 @@ class HybridModelAsyncExecutor { std::map input_tensor_desc_; std::vector is_input_dynamic_; std::shared_ptr listener_; - string om_name_; DataDumper data_dumper_; bool is_op_debug_reg_ = false; OpdebugRegister op_debug_register_; diff --git a/ge/hybrid/executor/worker/execution_engine.cc b/ge/hybrid/executor/worker/execution_engine.cc index 8dfdb476..063ea447 100755 --- a/ge/hybrid/executor/worker/execution_engine.cc +++ b/ge/hybrid/executor/worker/execution_engine.cc @@ -211,31 +211,36 @@ Status NodeDoneCallback::DumpDynamicNode() { return PARAM_INVALID; } auto op_desc = node->GetOpDesc(); + GE_CHECK_NOTNULL(graph_context_); + const HybridModel *model = graph_context_->model; + GE_CHECK_NOTNULL(model); + std::string dynamic_model_name = model->GetModelName(); + std::string dynamic_om_name = model->GetOmName(); + uint32_t model_id = model->GetModelId(); + if(!context_->GetDumpProperties().IsLayerNeedDump(dynamic_model_name, dynamic_om_name, op_desc->GetName())) { + GELOGI("[%s] is not in dump list, no need dump", op_desc->GetName().c_str()); + return SUCCESS; + } + dump_op_.SetDynamicModelInfo(dynamic_model_name, dynamic_om_name, model_id); + auto stream = context_->GetStream(); vector input_addrs; vector output_addrs; for (int i = 0; i < context_->NumInputs(); i++) { auto tensor_value = context_->GetInput(i); GE_CHK_BOOL_RET_STATUS(tensor_value != nullptr, PARAM_INVALID, "[Get][Tensor] value is nullptr."); - uint64_t input_addr = reinterpret_cast(tensor_value->GetData()); + uintptr_t input_addr = reinterpret_cast(tensor_value->GetData()); input_addrs.emplace_back(input_addr); } for (int j = 0; j < context_->NumOutputs(); j++) { auto tensor_value = context_->GetOutput(j); GE_CHK_BOOL_RET_STATUS(tensor_value != nullptr, PARAM_INVALID, "[Get][Tensor] value is nullptr."); - uint64_t output_addr = reinterpret_cast(tensor_value->GetData()); + uintptr_t output_addr = reinterpret_cast(tensor_value->GetData()); output_addrs.emplace_back(output_addr); } dump_op_.SetDumpInfo(context_->GetDumpProperties(), op_desc, input_addrs, output_addrs, stream); - GE_CHECK_NOTNULL(graph_context_); - const HybridModel *model = graph_context_->model; - GE_CHECK_NOTNULL(model); - std::string dynamic_model_name = model->GetModelName(); - uint32_t model_id = model->GetModelId(); - dump_op_.SetDynamicModelInfo(dynamic_model_name, model_id); - void *loop_per_iter = nullptr; TensorValue *varible_loop_per_iter = context_->GetVariable(NODE_NAME_FLOWCTRL_LOOP_PER_ITER); if (varible_loop_per_iter != nullptr) { diff --git a/ge/hybrid/hybrid_davinci_model.cc b/ge/hybrid/hybrid_davinci_model.cc index 430dfa85..c741fe7e 100755 --- a/ge/hybrid/hybrid_davinci_model.cc +++ b/ge/hybrid/hybrid_davinci_model.cc @@ -76,9 +76,8 @@ class HybridDavinciModel::Impl { executor_.SetDeviceId(device_id); } - void SetModelName(const string &model_name) { - model_.SetModelName(model_name); - executor_.SetModelName(model_name); + void SetOmName(const string &model_name) { + model_.SetOmName(model_name); } uint64_t GetSessionId() { @@ -181,9 +180,9 @@ void HybridDavinciModel::SetDeviceId(uint32_t device_id) { } } -void HybridDavinciModel::SetModelName(const string &model_name) { +void HybridDavinciModel::SetOmName(const string &om_name) { if (impl_ != nullptr) { - impl_->SetModelName(model_name); + impl_->SetOmName(om_name); } } diff --git a/ge/hybrid/hybrid_davinci_model.h b/ge/hybrid/hybrid_davinci_model.h index 74dca9ed..3b3473ff 100644 --- a/ge/hybrid/hybrid_davinci_model.h +++ b/ge/hybrid/hybrid_davinci_model.h @@ -57,7 +57,7 @@ class HybridDavinciModel { void SetDeviceId(uint32_t device_id); - void SetModelName(const string &model_name); + void SetOmName(const string &om_name); uint64_t GetSessionId(); diff --git a/ge/hybrid/hybrid_davinci_model_stub.cc b/ge/hybrid/hybrid_davinci_model_stub.cc index 5b10fb7a..67a7a101 100644 --- a/ge/hybrid/hybrid_davinci_model_stub.cc +++ b/ge/hybrid/hybrid_davinci_model_stub.cc @@ -61,7 +61,7 @@ void HybridDavinciModel::SetModelId(uint32_t model_id) { void HybridDavinciModel::SetDeviceId(uint32_t device_id) { } -void HybridDavinciModel::SetModelName(const string &model_name) { +void HybridDavinciModel::SetOmName(const string &om_name) { } uint64_t HybridDavinciModel::GetSessionId() { diff --git a/ge/hybrid/model/hybrid_model.h b/ge/hybrid/model/hybrid_model.h index 627ca732..ed1d092e 100644 --- a/ge/hybrid/model/hybrid_model.h +++ b/ge/hybrid/model/hybrid_model.h @@ -71,8 +71,8 @@ class HybridModel { model_id_ = model_id; } - void SetModelName(const string &model_name) { - om_name_ = model_name; + void SetOmName(const string &om_name) { + om_name_ = om_name; } const std::string &GetOmName() const { diff --git a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc index 8922c5ed..0a575d16 100755 --- a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc +++ b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc @@ -171,6 +171,7 @@ Status KnownNodeExecutor::LoadTask(const HybridModel &model, const NodePtr &node // set known node flag as true davinci_model->SetKnownNode(true); davinci_model->SetId(model.GetModelId()); + davinci_model->SetDumpModelName(model.GetModelName()); davinci_model->SetOmName(model.GetOmName()); // set model id as root node's node id davinci_model->SetSubModelId(node->GetOpDesc()->GetId()); diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index 54d5615d..0bc9a6e1 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -167,6 +167,7 @@ set(COMMON_SRC_FILES "${GE_CODE_DIR}/ge/common/helper/model_helper.cc" "${GE_CODE_DIR}/ge/common/dump/dump_manager.cc" "${GE_CODE_DIR}/ge/common/dump/opdebug_register.cc" + "${GE_CODE_DIR}/ge/common/dump/dump_op.cc" "${GE_CODE_DIR}/ge/common/helper/om_file_helper.cc" "${GE_CODE_DIR}/ge/model/ge_root_model.cc" "${GE_CODE_DIR}/ge/common/model_parser/model_parser.cc" @@ -752,6 +753,7 @@ set(MULTI_PARTS_TEST_FILES "graph/transop_util_unittest.cc" "common/datatype_transfer_unittest.cc" "common/dump_manager_unittest.cc" + "common/dump_op_unittest.cc" "common/opdebug_register_unittest.cc" "common/format_transfer_unittest.cc" "common/format_transfer_transpose_unittest.cc" diff --git a/tests/ut/ge/common/dump_op_unittest.cc b/tests/ut/ge/common/dump_op_unittest.cc new file mode 100644 index 00000000..9007ac95 --- /dev/null +++ b/tests/ut/ge/common/dump_op_unittest.cc @@ -0,0 +1,61 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#define protected public +#define private public +#include "common/dump/dump_op.h" +#include "common/debug/log.h" +#include "common/ge_inner_error_codes.h" +#include "common/dump/dump_properties.h" +#undef private +#undef protected + +namespace ge { +class UTEST_dump_op : public testing::Test { + protected: + void SetUp() {} + void TearDown() {} +}; + +TEST_F(UTEST_dump_op, launch_dump_op_success) { + DumpOp dump_op; + DumpProperties dump_properties; + OpDescPtr op_desc = std::make_shared("GatherV2", "GatherV2"); + std::set temp; + dump_properties.model_dump_properties_map_.emplace("model1", temp); + dump_properties.enable_dump_ = "1"; + dump_op.SetDynamicModelInfo("model1", "model2", 1); + dump_op.SetDumpInfo(dump_properties, op_desc, {}, {}, nullptr); + auto ret = dump_op.LaunchDumpOp(); + EXPECT_EQ(ret, ge::SUCCESS); +} + +TEST_F(UTEST_dump_op, launch_dump_op_success_2) { + DumpOp dump_op; + DumpProperties dump_properties; + OpDescPtr op_desc = std::make_shared("GatherV2", "GatherV2"); + std::set temp; + dump_properties.model_dump_properties_map_.emplace("model1", temp); + dump_properties.enable_dump_ = "1"; + dump_op.SetDynamicModelInfo("modle2", "model2", 1); + dump_op.SetDumpInfo(dump_properties, op_desc, {}, {}, nullptr); + auto ret = dump_op.LaunchDumpOp(); + EXPECT_EQ(ret, ge::SUCCESS); +} + +} // namespace ge \ No newline at end of file