@@ -70,7 +70,7 @@ Status HybridModelExecutor::Execute(HybridModelExecutor::ExecuteArgs &args) { | |||||
context_.profiler->Dump(std::cout); | context_.profiler->Dump(std::cout); | ||||
context_.profiler->Reset(); | context_.profiler->Reset(); | ||||
} | } | ||||
root_graph_executor_->ResetContext(); | |||||
root_graph_executor_->ReleaseContext(); | |||||
context_.iteration += 1; | context_.iteration += 1; | ||||
if (ret == END_OF_SEQUENCE) { | if (ret == END_OF_SEQUENCE) { | ||||
@@ -177,6 +177,10 @@ struct NodeState { | |||||
void SetTaskContext(std::shared_ptr<TaskContext> &task_context); | void SetTaskContext(std::shared_ptr<TaskContext> &task_context); | ||||
std::shared_ptr<TaskContext> GetTaskContext(); | std::shared_ptr<TaskContext> GetTaskContext(); | ||||
void SetSkipInferShape(bool skip_infershape) { skip_infershape_ = skip_infershape; } | |||||
bool GetSkipInferShape() const { return skip_infershape_; } | |||||
private: | private: | ||||
bool IsScheduleReady() const; | bool IsScheduleReady() const; | ||||
void SetDataSchedule(const NodeState &node_state, const std::function<void(const NodeItem *)> &ready); | void SetDataSchedule(const NodeState &node_state, const std::function<void(const NodeItem *)> &ready); | ||||
@@ -204,6 +208,7 @@ struct NodeState { | |||||
int merge_index_ = -1; // Use for Execute (Reset after Executed). | int merge_index_ = -1; // Use for Execute (Reset after Executed). | ||||
int switch_index_ = -1; // Use for Schedule (Reset after Prepared). | int switch_index_ = -1; // Use for Schedule (Reset after Prepared). | ||||
int group_ = -1; | int group_ = -1; | ||||
bool skip_infershape_ = false; | |||||
}; | }; | ||||
} // namespace hybrid | } // namespace hybrid | ||||
} // namespace ge | } // namespace ge | ||||
@@ -110,6 +110,7 @@ Status SubgraphExecutor::InitInputsForUnknownShape(const std::vector<TensorValue | |||||
output_desc->SetShape(tensor_desc->GetShape()); | output_desc->SetShape(tensor_desc->GetShape()); | ||||
output_desc->SetOriginShape(tensor_desc->GetOriginShape()); | output_desc->SetOriginShape(tensor_desc->GetOriginShape()); | ||||
output_desc->SetDataType(tensor_desc->GetDataType()); | output_desc->SetDataType(tensor_desc->GetDataType()); | ||||
node_state->SetSkipInferShape(true); | |||||
} | } | ||||
} | } | ||||
@@ -41,7 +41,7 @@ class SubgraphExecutor { | |||||
Status PartialExecuteAsync(int task_group); | Status PartialExecuteAsync(int task_group); | ||||
void ResetContext() { subgraph_context_.reset(nullptr); } | |||||
void ReleaseContext() { subgraph_context_.reset(nullptr); } | |||||
/** | /** | ||||
* Execute subgraph async, output tensor address(not data) and output tensor descriptions are | * Execute subgraph async, output tensor address(not data) and output tensor descriptions are | ||||
@@ -70,7 +70,7 @@ Status ShapeInferenceEngine::InferShape(NodeState &node_state) { | |||||
// Do shape inference | // Do shape inference | ||||
// Skipping infer shape of input node. | // Skipping infer shape of input node. | ||||
GELOGD("[%s] Start to invoke InferShapeAndType", node_item.NodeName().c_str()); | GELOGD("[%s] Start to invoke InferShapeAndType", node_item.NodeName().c_str()); | ||||
if (node_state.GetType() != DATA_TYPE && node_state.GetType() != AIPP_DATA_TYPE) { | |||||
if (node_state.GetSkipInferShape()) { | |||||
RECORD_SHAPE_INFERENCE_EVENT(execution_context_, node_item.NodeName().c_str(), "[InferShapeAndType] Start"); | RECORD_SHAPE_INFERENCE_EVENT(execution_context_, node_item.NodeName().c_str(), "[InferShapeAndType] Start"); | ||||
GE_CHK_STATUS_RET(ShapeRefiner::InferShapeAndTypeForRunning(node_item.node, true), | GE_CHK_STATUS_RET(ShapeRefiner::InferShapeAndTypeForRunning(node_item.node, true), | ||||
"[Invoke][InferShapeAndType] for %s failed.", node_item.NodeName().c_str()); | "[Invoke][InferShapeAndType] for %s failed.", node_item.NodeName().c_str()); | ||||
@@ -49,8 +49,8 @@ const uint32_t kOutputIndexOfData = 0; | |||||
constexpr char const *kAttrSupportDynamicShape = "support_dynamicshape"; | constexpr char const *kAttrSupportDynamicShape = "support_dynamicshape"; | ||||
Status CheckHostMem(const std::vector<string> &dependencies, const NodePtr &node, bool &is_host_mem) { | Status CheckHostMem(const std::vector<string> &dependencies, const NodePtr &node, bool &is_host_mem) { | ||||
auto op_desc = node->GetOpDesc(); | |||||
for (const auto &input_name : dependencies) { | for (const auto &input_name : dependencies) { | ||||
auto op_desc = node->GetOpDesc(); | |||||
int input_index = op_desc->GetInputIndexByName(input_name); | int input_index = op_desc->GetInputIndexByName(input_name); | ||||
if (input_index < 0) { | if (input_index < 0) { | ||||
GELOGE(INTERNAL_ERROR, "[Get][InputIndex]failed, node:[%s] inputname: %s.", | GELOGE(INTERNAL_ERROR, "[Get][InputIndex]failed, node:[%s] inputname: %s.", | ||||
@@ -60,11 +60,7 @@ Status CheckHostMem(const std::vector<string> &dependencies, const NodePtr &node | |||||
return INTERNAL_ERROR; | return INTERNAL_ERROR; | ||||
} | } | ||||
const auto &in_anchor = node->GetInDataAnchor(input_index); | |||||
GE_CHECK_NOTNULL(in_anchor); | |||||
const auto &peer_out_anchor = in_anchor->GetPeerOutAnchor(); | |||||
GE_CHECK_NOTNULL(peer_out_anchor); | |||||
const auto &src_node = peer_out_anchor->GetOwnerNode(); | |||||
const auto &src_node = NodeUtils::GetInDataNodeByIndex(*node, input_index); | |||||
GE_CHECK_NOTNULL(src_node); | GE_CHECK_NOTNULL(src_node); | ||||
auto src_op_desc = src_node->GetOpDesc(); | auto src_op_desc = src_node->GetOpDesc(); | ||||
GE_CHECK_NOTNULL(src_op_desc); | GE_CHECK_NOTNULL(src_op_desc); | ||||
@@ -213,7 +213,7 @@ TEST_F(UtestSingleOpModel, test_build_dynamic_op) { | |||||
// make graph | // make graph | ||||
ut::GraphBuilder builder = ut::GraphBuilder("graph"); | ut::GraphBuilder builder = ut::GraphBuilder("graph"); | ||||
auto data = builder.AddNode("Data", "Data", 0, 1); | |||||
auto data = builder.AddNode("Data", "Data", 1, 1); | |||||
auto transdata = builder.AddNode("Transdata", "Transdata", 1, 1); | auto transdata = builder.AddNode("Transdata", "Transdata", 1, 1); | ||||
auto netoutput = builder.AddNode("Netoutput", "NetOutput", 1, 0); | auto netoutput = builder.AddNode("Netoutput", "NetOutput", 1, 0); | ||||
builder.AddDataEdge(data, 0, transdata, 0); | builder.AddDataEdge(data, 0, transdata, 0); | ||||
@@ -228,11 +228,6 @@ TEST_F(UtestSingleOpModel, test_build_dynamic_op) { | |||||
op_desc->SetOpInferDepends(depend_names); | op_desc->SetOpInferDepends(depend_names); | ||||
(void)AttrUtils::SetBool(op_desc, kAttrSupportDynamicShape, true); | (void)AttrUtils::SetBool(op_desc, kAttrSupportDynamicShape, true); | ||||
auto tensor = std::make_shared<GeTensor>(); | |||||
auto data_desc = data->GetOpDesc(); | |||||
auto tensor_desc = data_desc->MutableInputDesc(0); | |||||
AttrUtils::SetTensor(tensor_desc, "_value", tensor); | |||||
// set task_def | // set task_def | ||||
auto model_task_def = make_shared<domi::ModelTaskDef>(); | auto model_task_def = make_shared<domi::ModelTaskDef>(); | ||||
domi::TaskDef *task_def = model_task_def->add_task(); | domi::TaskDef *task_def = model_task_def->add_task(); | ||||
@@ -249,6 +244,12 @@ TEST_F(UtestSingleOpModel, test_build_dynamic_op) { | |||||
op_desc->impl_->input_name_idx_["Data"] = 0; | op_desc->impl_->input_name_idx_["Data"] = 0; | ||||
model.BuildDynamicOp(res, dynamic_single_op); | model.BuildDynamicOp(res, dynamic_single_op); | ||||
auto tensor = std::make_shared<GeTensor>(); | |||||
auto data_desc = data->GetOpDesc(); | |||||
auto tensor_desc = data_desc->MutableInputDesc(0); | |||||
AttrUtils::SetTensor(tensor_desc, "_value", tensor); | |||||
model.BuildDynamicOp(res, dynamic_single_op); | |||||
} | } | ||||
TEST_F(UtestSingleOpModel, test_host_mem) { | TEST_F(UtestSingleOpModel, test_host_mem) { | ||||