| @@ -136,8 +136,7 @@ Status KnownNodeTask::Init(TaskContext &context) { | |||||
| Status KnownNodeTask::InitDavinciModel(const HybridModel &model, TensorBuffer *weight_buffer) { | Status KnownNodeTask::InitDavinciModel(const HybridModel &model, TensorBuffer *weight_buffer) { | ||||
| GELOGD("[Init][DavinciModel] start"); | GELOGD("[Init][DavinciModel] start"); | ||||
| davinci_model_->InitRuntimeParams(); | davinci_model_->InitRuntimeParams(); | ||||
| GE_CHK_STATUS_RET(davinci_model_->InitVariableMem(), | |||||
| "[Init][VariableMem] failed"); | |||||
| GE_CHK_STATUS_RET(davinci_model_->InitVariableMem(), "[Init][VariableMem] failed"); | |||||
| int32_t device_id = 0; | int32_t device_id = 0; | ||||
| GE_CHK_RT_RET(rtGetDevice(&device_id)); | GE_CHK_RT_RET(rtGetDevice(&device_id)); | ||||
| davinci_model_->SetDeviceId(static_cast<uint32_t>(device_id)); | davinci_model_->SetDeviceId(static_cast<uint32_t>(device_id)); | ||||
| @@ -181,7 +180,7 @@ Status KnownNodeExecutor::PrepareTask(NodeTask &task, TaskContext &context) cons | |||||
| } | } | ||||
| Status KnownNodeExecutor::SetDaviciModel(const HybridModel &model, const NodePtr &node, | Status KnownNodeExecutor::SetDaviciModel(const HybridModel &model, const NodePtr &node, | ||||
| std::shared_ptr<DavinciModel> &davinci_model) const { | |||||
| std::shared_ptr<DavinciModel> &davinci_model) const { | |||||
| // set known node flag as true | // set known node flag as true | ||||
| davinci_model->SetKnownNode(true); | davinci_model->SetKnownNode(true); | ||||
| davinci_model->SetId(model.GetModelId()); | davinci_model->SetId(model.GetModelId()); | ||||