Browse Source

!1897 aicpu op

Merge pull request !1897 from 陈华/aicpu
tags/v1.5.1
i-robot Gitee 3 years ago
parent
commit
66a63a7302
23 changed files with 1396 additions and 26 deletions
  1. +4
    -0
      ge/engine_manager/dnnengine_manager.cc
  2. +52
    -0
      ge/graph/load/model_manager/davinci_model.cc
  3. +6
    -0
      ge/graph/load/model_manager/davinci_model.h
  4. +107
    -2
      ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc
  5. +8
    -0
      ge/graph/load/model_manager/task_info/kernel_ex_task_info.h
  6. +103
    -3
      ge/graph/load/model_manager/task_info/kernel_task_info.cc
  7. +8
    -0
      ge/graph/load/model_manager/task_info/kernel_task_info.h
  8. +30
    -0
      ge/hybrid/node_executor/aicpu/aicpu_ext_info.cc
  9. +5
    -0
      ge/hybrid/node_executor/aicpu/aicpu_ext_info.h
  10. +121
    -0
      ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc
  11. +9
    -1
      ge/hybrid/node_executor/aicpu/aicpu_node_executor.h
  12. +117
    -0
      ge/single_op/task/op_task.cc
  13. +7
    -0
      ge/single_op/task/op_task.h
  14. +82
    -11
      tests/depends/runtime/src/runtime_stub.cc
  15. +70
    -0
      tests/depends/runtime/src/runtime_stub.h
  16. +1
    -0
      tests/ut/ge/CMakeLists.txt
  17. +139
    -2
      tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc
  18. +138
    -2
      tests/ut/ge/graph/load/kernel_task_info_unittest.cc
  19. +224
    -3
      tests/ut/ge/hybrid/node_executor/aicpu/aicpu_node_executor_unittest.cc
  20. +129
    -2
      tests/ut/ge/single_op/single_op_task_unittest.cc
  21. +16
    -0
      third_party/fwkacllib/inc/cce/fwk_adpt_struct.h
  22. +8
    -0
      third_party/fwkacllib/inc/runtime/config.h
  23. +12
    -0
      third_party/fwkacllib/inc/runtime/dev.h

+ 4
- 0
ge/engine_manager/dnnengine_manager.cc View File

@@ -239,6 +239,10 @@ std::string DNNEngineManager::GetDNNEngineName(const ge::NodePtr &node_ptr) {
op_desc->SetOpEngineName(it.engine);
op_desc->SetOpKernelLibName(kernel_name);
// set attrs for taking information when load txt to graph object
if (it.flagAsync) {
GELOGD("Set aicpu blocking op:%s attribute(is_blocking_op):true", op_desc->GetName().c_str());
(void)AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, true);
}
(void) AttrUtils::SetStr(op_desc, ATTR_NAME_ENGINE_NAME_FOR_LX, it.engine);
(void) AttrUtils::SetStr(op_desc, ATTR_NAME_KKERNEL_LIB_NAME_FOR_LX, kernel_name);
GELOGD("DNNEngineManager:Set OpKernelLibName %s and engine name %s to op_desc %s", kernel_name.c_str(),


+ 52
- 0
ge/graph/load/model_manager/davinci_model.cc View File

@@ -238,6 +238,12 @@ DavinciModel::~DavinciModel() {
GE_LOGW_IF(rtEventDestroy(event_list_[i]) != RT_ERROR_NONE, "Destroy event failed, index: %zu", i);
}

for (const auto &it : stream_2_event_) {
if (rtEventDestroy(it.second) != RT_ERROR_NONE) {
GELOGW("Destroy event failed");
}
}

FreeWeightsMem();

FreeFeatureMapMem();
@@ -4648,4 +4654,50 @@ Status DavinciModel::GetTotalMemSizeExcludeZeroCopy(int64_t &total_useful_size)
total_useful_size = runtime_param_.mem_size - runtime_param_.zero_copy_size;
return SUCCESS;
}

Status DavinciModel::GetEventIdForBlockingAicpuOp(const OpDescPtr &op_desc, rtStream_t stream, uint32_t &event_id) {
GELOGI("Get event id for aicpu blocking op:%s", op_desc->GetName().c_str());
auto it = stream_2_event_.find(stream);
if (it != stream_2_event_.end()) {
auto rt_ret = rtGetEventID(it->second, &event_id);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtGetEventID failed for op:%s(%s), ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), rt_ret);
GELOGE(RT_FAILED, "[Call][rtGetEventID] failed for op:%s(%s), ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
} else {
rtEvent_t rt_event = nullptr;
auto rt_ret = rtEventCreateWithFlag(&rt_event, RT_EVENT_WITH_FLAG);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtEventCreateWithFlag failed for op:%s(%s), ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), rt_ret);
GELOGE(RT_FAILED, "[Call][rtEventCreateWithFlag] failed for op:%s(%s), ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
rt_ret = rtGetEventID(rt_event, &event_id);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtGetEventID failed for op:%s(%s), ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), rt_ret);
GELOGE(RT_FAILED, "[Call][rtGetEventID] failed for op:%s(%s), ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
stream_2_event_.emplace(stream, rt_event);
}
return SUCCESS;
}

Status DavinciModel::GetEventByStream(const rtStream_t &stream, rtEvent_t &rt_event) {
auto it = stream_2_event_.find(stream);
if (it == stream_2_event_.end()) {
REPORT_INNER_ERROR("E19999", "Get event failed");
GELOGE(FAILED, "[Get][Event] Get event failed");
return FAILED;
}
rt_event = it->second;
return SUCCESS;
}
} // namespace ge

+ 6
- 0
ge/graph/load/model_manager/davinci_model.h View File

@@ -582,6 +582,10 @@ class DavinciModel {
void SetRunningFlag(bool flag) { running_flg_ = flag; }
Status SetRunAsyncListenerCallback(const RunAsyncCallback &callback);

// for blocking aicpu op
Status GetEventByStream(const rtStream_t &stream, rtEvent_t &rt_event);
Status GetEventIdForBlockingAicpuOp(const OpDescPtr &op_desc, rtStream_t stream, uint32_t &event_id);

private:
// memory address of weights
uint8_t *weights_mem_base_;
@@ -1107,6 +1111,8 @@ class DavinciModel {

// op name to attrs mapping
std::map<std::string, std::map<std::string, std::vector<std::string>>> op_name_to_attrs_;

std::map<rtStream_t, rtEvent_t> stream_2_event_;
};
} // namespace ge
#endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_DAVINCI_MODEL_H_

+ 107
- 2
ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc View File

@@ -26,8 +26,8 @@
#include "external/graph/attr_value.h"
#include "graph/load/model_manager/davinci_model.h"
#include "graph/load/model_manager/model_manager.h"
#include "hybrid/node_executor/aicpu/aicpu_ext_info.h"
#include "framework/common/debug/log.h"
#include "runtime/rt.h"

namespace {
const char *const kAicpuAllshape = "_AllShape";
@@ -43,7 +43,7 @@ Status KernelExTaskInfo::InitTaskExtInfo(const std::string &ext_info, const OpDe
UnknowShapeOpType unknown_type = static_cast<UnknowShapeOpType>(unknown_shape_type_val);
uint32_t num_inputs = op_desc->GetInputsSize();
uint32_t num_outputs = op_desc->GetOutputsSize();
std::unique_ptr<ge::hybrid::AicpuExtInfoHandler> ext_handle(
std::shared_ptr<ge::hybrid::AicpuExtInfoHandler> ext_handle(
new(std::nothrow) ::ge::hybrid::AicpuExtInfoHandler(op_desc->GetName(),
num_inputs,
num_outputs,
@@ -76,6 +76,16 @@ Status KernelExTaskInfo::InitTaskExtInfo(const std::string &ext_info, const OpDe
}
}
}

AttrUtils::GetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, is_blocking_aicpu_op_);
GELOGD("Get op:%s attribute(is_blocking_op), value:%d", op_desc->GetName().c_str(), is_blocking_aicpu_op_);

if (UpdateEventIdForAicpuBlockingOp(op_desc, ext_handle) != SUCCESS) {
GELOGE(FAILED, "[Call][UpdateEventIdForAicpuBlockingOp] failed for op:%s(%s)",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
return FAILED;
}

auto rt_ret = rtMalloc(&ext_info_addr_, ext_handle->GetExtInfoLen(), RT_MEMORY_HBM);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%zu, ret:0x%X", ext_info.size(), rt_ret);
@@ -448,6 +458,101 @@ Status KernelExTaskInfo::Distribute() {
stream_id_ = stream_id;

GELOGI("KernelExTaskInfo Distribute Success. task id: %u, stream id: %u", task_id_, stream_id_);
if (is_blocking_aicpu_op_) {
if (DistributeWaitTaskForAicpuBlockingOp() != SUCCESS) {
GELOGE(FAILED, "[Call][DistributeWaitTaskForAicpuBlockingOp] Call DistributeWaitTaskForAicpuBlockingOp failed");
return FAILED;
}
}
return SUCCESS;
}

Status KernelExTaskInfo::CheckDeviceSupportBlockingAicpuOpProcess(bool &is_support) {
int32_t device_id = 0;
auto rt_ret = rtGetDevice(&device_id);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtGetDevice failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][rtGetDevice] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
int32_t value = 0;
rt_ret = rtGetDeviceCapability(device_id, FEATURE_TYPE_BLOCKING_OPERATOR, RT_MODULE_TYPE_AICPU, &value);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtGetDeviceCapability failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][rtGetDeviceCapability] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
if (value != RT_AICPU_BLOCKING_OP_NOT_SUPPORT && value != RT_AICPU_BLOCKING_OP_SUPPORT) {
REPORT_INNER_ERROR("E19999", "Value should be %d or %d but %d",
RT_AICPU_BLOCKING_OP_NOT_SUPPORT, RT_AICPU_BLOCKING_OP_SUPPORT, value);
GELOGE(FAILED, "[Check][Value] Value should be %d or %d but %d",
RT_AICPU_BLOCKING_OP_NOT_SUPPORT, RT_AICPU_BLOCKING_OP_SUPPORT, value);
return FAILED;
}
is_support = (value == RT_AICPU_BLOCKING_OP_SUPPORT ? true : false);
return SUCCESS;
}

Status KernelExTaskInfo::UpdateEventIdForAicpuBlockingOp(const OpDescPtr &op_desc,
std::shared_ptr<ge::hybrid::AicpuExtInfoHandler> &ext_handle) {
if (is_blocking_aicpu_op_) {
bool is_support = false;
if (CheckDeviceSupportBlockingAicpuOpProcess(is_support) != SUCCESS) {
GELOGE(FAILED, "[Call][CheckDeviceSupportBlockingAicpuOpProcess] Call CheckDeviceSupportBlockingAicpuOpProcess failed");
return FAILED;
}
if (!is_support) {
GELOGD("Device not support blocking aicpu op process");
return SUCCESS;
}
uint32_t event_id = 0;
if (davinci_model_->GetEventIdForBlockingAicpuOp(op_desc, stream_, event_id) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Get event id failed for op:%s(%s).", op_desc->GetName().c_str(),
op_desc->GetType().c_str());
GELOGE(FAILED, "[Get][EventId] Get event id failed for op:%s(%s)", op_desc->GetName().c_str(),
op_desc->GetType().c_str());
return FAILED;
}
if (ext_handle->UpdateEventId(event_id) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Update event id failed for op:%s(%s).", op_desc->GetName().c_str(),
op_desc->GetType().c_str());
GELOGE(FAILED, "[Update][EventId] Update event id failed for op:%s(%s)", op_desc->GetName().c_str(),
op_desc->GetType().c_str());
return FAILED;
}
GELOGI("Update event_id=%u success", event_id);
}
return SUCCESS;
}

Status KernelExTaskInfo::DistributeWaitTaskForAicpuBlockingOp() {
bool is_support = false;
if (CheckDeviceSupportBlockingAicpuOpProcess(is_support) != SUCCESS) {
GELOGE(FAILED, "[Call][CheckDeviceSupportBlockingAicpuOpProcess] Call CheckDeviceSupportBlockingAicpuOpProcess failed");
return FAILED;
}
if (!is_support) {
GELOGD("Device not support blocking aicpu op process.");
return SUCCESS;
}
GELOGD("Distribute wait task begin");
rtEvent_t rt_event = nullptr;
if (davinci_model_->GetEventByStream(stream_, rt_event) != SUCCESS) {
GELOGE(FAILED, "[Call][GetEventByStream] Call GetEventByStream failed");
return FAILED;
}
auto rt_ret = rtStreamWaitEvent(stream_, rt_event);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtStreamWaitEvent failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtApi] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
rt_ret = rtEventReset(rt_event, stream_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtEventReset failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtApi] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
return SUCCESS;
}



+ 8
- 0
ge/graph/load/model_manager/task_info/kernel_ex_task_info.h View File

@@ -19,6 +19,7 @@

#include "graph/load/model_manager/task_info/task_info.h"
#include "graph/op_desc.h"
#include "hybrid/node_executor/aicpu/aicpu_ext_info.h"

namespace ge {
class KernelExTaskInfo : public TaskInfo {
@@ -65,6 +66,12 @@ class KernelExTaskInfo : public TaskInfo {
void InitDumpArgs(void *addr, const OpDescPtr &op_desc);
Status InitTaskExtInfo(const std::string &ext_info, const OpDescPtr &op_desc);

// for blocking aicpu op
Status DistributeWaitTaskForAicpuBlockingOp();
Status CheckDeviceSupportBlockingAicpuOpProcess(bool &is_support);
Status UpdateEventIdForAicpuBlockingOp(const OpDescPtr &op_desc,
std::shared_ptr<ge::hybrid::AicpuExtInfoHandler> &ext_handle);

uint32_t task_id_;
uint32_t stream_id_;
uint32_t dump_flag_;
@@ -79,6 +86,7 @@ class KernelExTaskInfo : public TaskInfo {
uint32_t args_offset_ = 0;
int64_t fixed_addr_offset_ = 0;
int32_t topic_type_flag_ = -1;
bool is_blocking_aicpu_op_ = false;
};
} // namespace ge
#endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_KERNEL_EX_TASK_INFO_H_

+ 103
- 3
ge/graph/load/model_manager/task_info/kernel_task_info.cc View File

@@ -28,11 +28,10 @@
#include "graph/load/model_manager/davinci_model.h"
#include "graph/load/model_manager/model_manager.h"
#include "graph/load/model_manager/model_utils.h"
#include "runtime/kernel.h"
#include "runtime/rt.h"
#include "graph/load/model_manager/task_info/super_kernel/super_kernel.h"
#include "graph/load/model_manager/task_info/super_kernel/super_kernel_factory.h"
#include "cce/aicpu_engine_struct.h"
#include "hybrid/node_executor/aicpu/aicpu_ext_info.h"
#include "framework/common/debug/log.h"

namespace {
@@ -474,6 +473,12 @@ Status KernelTaskInfo::Distribute() {
}
// set for task_id_
UpdateTaskId();
if (is_blocking_aicpu_op_) {
if (DistributeWaitTaskForAicpuBlockingOp() != SUCCESS) {
GELOGE(FAILED, "[Call][DistributeWaitTaskForAicpuBlockingOp] Call DistributeWaitTaskForAicpuBlockingOp failed");
return FAILED;
}
}
GELOGD(
"KernelTaskInfo Distribute Success. sktenable:%d taskid:%d sktid:%d stubfunc_name:%s stubfunc:%p "
"blockdim:%d stream:%p",
@@ -482,6 +487,91 @@ Status KernelTaskInfo::Distribute() {
return SUCCESS;
}

Status KernelTaskInfo::CheckDeviceSupportBlockingAicpuOpProcess(bool &is_support) {
int32_t device_id = 0;
auto rt_ret = rtGetDevice(&device_id);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtGetDevice failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][rtGetDevice] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
int32_t value = 0;
rt_ret = rtGetDeviceCapability(device_id, FEATURE_TYPE_BLOCKING_OPERATOR, RT_MODULE_TYPE_AICPU, &value);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtGetDeviceCapability failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][rtGetDeviceCapability] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
if (value != RT_AICPU_BLOCKING_OP_NOT_SUPPORT && value != RT_AICPU_BLOCKING_OP_SUPPORT) {
REPORT_INNER_ERROR("E19999", "Value should be %d or %d but %d",
RT_AICPU_BLOCKING_OP_NOT_SUPPORT, RT_AICPU_BLOCKING_OP_SUPPORT, value);
GELOGE(FAILED, "[Check][Value] Value should be %d or %d but %d",
RT_AICPU_BLOCKING_OP_NOT_SUPPORT, RT_AICPU_BLOCKING_OP_SUPPORT, value);
return FAILED;
}
is_support = (value == RT_AICPU_BLOCKING_OP_SUPPORT ? true : false);
return SUCCESS;
}

Status KernelTaskInfo::UpdateEventIdForAicpuBlockingOp(std::shared_ptr<ge::hybrid::AicpuExtInfoHandler> &ext_handle) {
if (is_blocking_aicpu_op_) {
bool is_support = false;
if (CheckDeviceSupportBlockingAicpuOpProcess(is_support) != SUCCESS) {
GELOGE(FAILED, "[Call][CheckDeviceSupportBlockingAicpuOpProcess] Call CheckDeviceSupportBlockingAicpuOpProcess failed");
return FAILED;
}
if (!is_support) {
GELOGD("Device not support blocking aicpu op process");
return SUCCESS;
}
uint32_t event_id = 0;
if (davinci_model_->GetEventIdForBlockingAicpuOp(op_desc_, stream_, event_id) != SUCCESS) {
GELOGE(FAILED, "[Get][EventId] Get event id failed for op:%s(%s)", op_desc_->GetName().c_str(),
op_desc_->GetType().c_str());
return FAILED;
}
if (ext_handle->UpdateEventId(event_id) != SUCCESS) {
GELOGE(FAILED, "[Update][EventId] Update event id failed for op:%s(%s)", op_desc_->GetName().c_str(),
op_desc_->GetType().c_str());
return FAILED;
}
GELOGI("Update event_id=%u success", event_id);
}
return SUCCESS;
}

Status KernelTaskInfo::DistributeWaitTaskForAicpuBlockingOp() {
bool is_support = false;
if (CheckDeviceSupportBlockingAicpuOpProcess(is_support) != SUCCESS) {
GELOGE(FAILED, "[Call][CheckDeviceSupportBlockingAicpuOpProcess] Call CheckDeviceSupportBlockingAicpuOpProcess failed");
return FAILED;
}
if (!is_support) {
GELOGD("device not support blocking aicpu op process.");
return SUCCESS;
}
GELOGD("Distribute wait task begin");
rtEvent_t rt_event = nullptr;
if (davinci_model_->GetEventByStream(stream_, rt_event) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call GetEventByStream failed");
GELOGE(FAILED, "[Call][GetEventByStream] Call GetEventByStream failed");
return FAILED;
}
auto rt_ret = rtStreamWaitEvent(stream_, rt_event);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtStreamWaitEvent failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtApi] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
rt_ret = rtEventReset(rt_event, stream_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtEventReset failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtApi] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
return SUCCESS;
}

void KernelTaskInfo::SetIoAddrs(const OpDescPtr &op_desc) {
const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam();
vector<void *> input_data_addrs = ModelUtils::GetInputDataAddrs(rts_param, op_desc);
@@ -1109,7 +1199,7 @@ Status KernelTaskInfo::InitAicpuTaskExtInfo(const std::string &ext_info) {
UnknowShapeOpType unknown_type = static_cast<UnknowShapeOpType>(unknown_shape_type_val);
uint32_t num_inputs = op_desc_->GetInputsSize();
uint32_t num_outputs = op_desc_->GetOutputsSize();
std::unique_ptr<ge::hybrid::AicpuExtInfoHandler> ext_handle(
std::shared_ptr<ge::hybrid::AicpuExtInfoHandler> ext_handle(
new(std::nothrow) ::ge::hybrid::AicpuExtInfoHandler(op_desc_->GetName(),
num_inputs,
num_outputs,
@@ -1145,6 +1235,16 @@ Status KernelTaskInfo::InitAicpuTaskExtInfo(const std::string &ext_info) {
j, op_desc_->GetName().c_str());
}
}

AttrUtils::GetBool(op_desc_, ATTR_NAME_IS_BLOCKING_OP, is_blocking_aicpu_op_);
GELOGD("Get op:%s attribute(is_blocking_op), value:%d", op_desc_->GetName().c_str(), is_blocking_aicpu_op_);

if (UpdateEventIdForAicpuBlockingOp(ext_handle) != SUCCESS) {
GELOGE(FAILED, "[Call][UpdateEventIdForAicpuBlockingOp] failed for op:%s(%s)",
op_desc_->GetName().c_str(), op_desc_->GetType().c_str());
return FAILED;
}

auto rt_ret = rtMalloc(&aicpu_ext_info_addr_, ext_handle->GetExtInfoLen(), RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed for op:%s(%s), size:%zu, ret:0x%X",


+ 8
- 0
ge/graph/load/model_manager/task_info/kernel_task_info.h View File

@@ -24,6 +24,8 @@

#include "graph/load/model_manager/task_info/task_info.h"
#include "graph/op_desc.h"
#include "hybrid/node_executor/aicpu/aicpu_ext_info.h"

namespace ge {
class KernelTaskInfo : public TaskInfo {
public:
@@ -148,6 +150,11 @@ class KernelTaskInfo : public TaskInfo {
bool DoubleCallSKTSaveCheck();
void SetArgs();

// for blocking aicpu op
Status DistributeWaitTaskForAicpuBlockingOp();
Status CheckDeviceSupportBlockingAicpuOpProcess(bool &is_support);
Status UpdateEventIdForAicpuBlockingOp(std::shared_ptr<ge::hybrid::AicpuExtInfoHandler> &ext_handle);

void *stub_func_;
void *args_;
void *sm_desc_;
@@ -187,6 +194,7 @@ class KernelTaskInfo : public TaskInfo {
uint32_t skt_dump_flag_ = RT_KERNEL_DEFAULT;
void *superkernel_device_args_addr_ = nullptr;
void *superkernel_dev_nav_table_ = nullptr;
bool is_blocking_aicpu_op_ = false;

struct AICPUCustomInfo {
void *input_descs = nullptr;


+ 30
- 0
ge/hybrid/node_executor/aicpu/aicpu_ext_info.cc View File

@@ -81,6 +81,9 @@ Status AicpuExtInfoHandler::Parse(const std::string &ext_info) {
case aicpu::FWKAdapter::FWK_ADPT_EXT_TOPIC_TYPE:
GE_CHK_STATUS_RET(ParseExtTopicType(aicpu_ext_info), "[Parse][ExtTopicType] failed.");
break;
case aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT:
GE_CHK_STATUS_RET(ParseExtAsyncWait(aicpu_ext_info), "[Parse][ExtAsyncWait] failed.");
break;
default:
GELOGD("Node[%s] ignore infoType=%d, infoLen=%u.",
node_name_.c_str(), aicpu_ext_info->infoType, aicpu_ext_info->infoLen);
@@ -101,6 +104,22 @@ Status AicpuExtInfoHandler::Parse(const std::string &ext_info) {
return SUCCESS;
}

Status AicpuExtInfoHandler::ParseExtAsyncWait(AicpuExtInfo *aicpu_ext_info) {
if (aicpu_ext_info->infoLen != sizeof(AsyncWaitInfo)) {
REPORT_INNER_ERROR("E19999",
"Node[%s] parse ext async wait info failed as infoLen must be %zu but %u.",
node_name_.c_str(), sizeof(AsyncWaitInfo), aicpu_ext_info->infoLen);
GELOGE(ACL_ERROR_GE_PARAM_INVALID,
"[Check][DataLen]Node[%s] parse ext async wait info failed as infoLen must be %zu but %u.",
node_name_.c_str(), sizeof(AsyncWaitInfo), aicpu_ext_info->infoLen);
return ACL_ERROR_GE_PARAM_INVALID;
}

async_wait_ = reinterpret_cast<AsyncWaitInfo *>(aicpu_ext_info->infoMsg);
GELOGI("Node[%s] parse async wait info success infoLen=%u.", node_name_.c_str(), aicpu_ext_info->infoLen);
return SUCCESS;
}

Status AicpuExtInfoHandler::ParseExtShapeType(AicpuExtInfo *aicpu_ext_info) {
GE_IF_BOOL_EXEC(aicpu_ext_info->infoLen != sizeof(int32_t),
REPORT_INNER_ERROR("E19999", "Node[%s] parse ext shape type failed as infoLen must be %zu but %u.",
@@ -280,6 +299,17 @@ Status AicpuExtInfoHandler::UpdateSessionInfo(uint64_t session_id, uint64_t kern
return SUCCESS;
}

Status AicpuExtInfoHandler::UpdateEventId(uint32_t event_id) {
if (async_wait_ == nullptr) {
REPORT_INNER_ERROR("E19999", "async_wait_ is nullptr.");
GELOGE(FAILED, "[Check][async_wait_] async_wait_ is nullptr.");
return FAILED;
}
async_wait_->waitType = 1;
async_wait_->waitId = event_id;
return SUCCESS;
}

Status AicpuExtInfoHandler::UpdateSessionInfoSessionId(uint64_t session_id) {
if (session_info_ == nullptr) {
GELOGD("There is no session info in ext_info, no need update.");


+ 5
- 0
ge/hybrid/node_executor/aicpu/aicpu_ext_info.h View File

@@ -27,6 +27,7 @@ namespace ge {
namespace hybrid {
using AicpuShapeAndType = aicpu::FWKAdapter::ShapeAndType;
using AicpuExtInfo = aicpu::FWKAdapter::ExtInfo;
using AsyncWaitInfo = aicpu::FWKAdapter::AsyncWait;
using AicpuSessionInfo = SessionInfo;

class AicpuExtInfoHandler {
@@ -59,6 +60,8 @@ class AicpuExtInfoHandler {

Status UpdateExecuteMode(bool flag);

Status UpdateEventId(uint32_t event_id);

Status GetOutputShapeAndType(uint32_t output_index, GeShape &shape, DataType &data_type);

bool IsNeedRefreshIOAddr();
@@ -73,6 +76,7 @@ class AicpuExtInfoHandler {
Status ParseExtBitMap(AicpuExtInfo *aicpu_ext_info);
Status ParseExtUpdateAddr(AicpuExtInfo *aicpu_ext_info);
Status ParseExtTopicType(AicpuExtInfo *aicpu_ext_info);
Status ParseExtAsyncWait(AicpuExtInfo *aicpu_ext_info);

static Status UpdateShapeAndType(const GeShape &shape,
DataType data_type,
@@ -90,6 +94,7 @@ class AicpuExtInfoHandler {
const uint32_t output_num_;
UnknowShapeOpType unknown_type_;
AicpuSessionInfo *session_info_ = nullptr;
AsyncWaitInfo *async_wait_ = nullptr;
uint64_t *bit_map_ = nullptr;
uint32_t *update_addr_ = nullptr;
int32_t topic_type_flag_ = -1;


+ 121
- 0
ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc View File

@@ -22,6 +22,7 @@
#include "graph/utils/node_utils.h"
#include "hybrid/executor/hybrid_execution_context.h"
#include "hybrid/model/hybrid_model.h"
#include "runtime/rt.h"

namespace ge {
namespace hybrid {
@@ -33,6 +34,12 @@ const char *const kAicpuAllshape = "_AllShape";
REGISTER_NODE_EXECUTOR_BUILDER(NodeExecutorManager::ExecutorType::AICPU_TF, AiCpuNodeExecutor);
REGISTER_NODE_EXECUTOR_BUILDER(NodeExecutorManager::ExecutorType::AICPU_CUSTOM, AiCpuNodeExecutor);

AicpuNodeTaskBase::~AicpuNodeTaskBase() {
if (rt_event_ != nullptr) {
(void)rtEventDestroy(rt_event_);
}
}

Status AicpuNodeTaskBase::AllocTensorBuffer(size_t size, std::unique_ptr<TensorBuffer> &tensor_buffer) {
auto allocator = NpuMemoryAllocator::GetAllocator();
GE_CHECK_NOTNULL(allocator);
@@ -64,6 +71,13 @@ Status AicpuNodeTaskBase::InitExtInfo(const std::string &kernel_ext_info, int64_
GE_CHK_STATUS_RET(aicpu_ext_handle_.UpdateSessionInfoSessionId(session_id),
"[Update][SessionInfoSessionId] failed, session_id:%ld.", session_id);

if (is_blocking_aicpu_op_) {
if (UpdateEventIdForBlockingAicpuOp() != SUCCESS) {
GELOGE(FAILED, "[Call][UpdateEventIdForBlockingAicpuOp] Call UpdateEventIdForBlockingAicpuOp failed");
return FAILED;
}
}

// copy task args buf
GE_CHK_STATUS_RET(AllocTensorBuffer(aicpu_ext_handle_.GetExtInfoLen(), ext_info_addr_dev_),
"[Invoke][AllocTensorBuffer]Node[%s] alloc kernel_ext_info buf failed, size=%zu",
@@ -230,6 +244,96 @@ Status AicpuNodeTaskBase::ExecuteAsync(TaskContext &context, std::function<void(
return SUCCESS;
}

Status AicpuNodeTaskBase::UpdateEventIdForBlockingAicpuOp() {
bool is_support = false;
if (CheckDeviceSupportBlockingAicpuOpProcess(is_support) != SUCCESS) {
GELOGE(FAILED, "[Call][CheckDeviceSupportBlockingAicpuOpProcess] Call CheckDeviceSupportBlockingAicpuOpProcess failed");
return FAILED;
}
if (!is_support) {
GELOGD("Device not support blocking aicpu op process");
return SUCCESS;
}
uint32_t event_id = 0;
auto rt_ret = rtEventCreateWithFlag(&rt_event_, RT_EVENT_WITH_FLAG);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtEventCreateWithFlag failed for node:%s, ret:0x%X", node_name_.c_str(),
rt_ret);
GELOGE(RT_FAILED, "[Call][rtEventCreateWithFlag] failed for node:%s, ret:0x%X", node_name_.c_str(), rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
rt_ret = rtGetEventID(rt_event_, &event_id);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtGetEventID failed for node:%s, ret:0x%X", node_name_.c_str(), rt_ret);
GELOGE(RT_FAILED, "[Call][rtGetEventID] failed for node:%s, ret:0x%X", node_name_.c_str(), rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
if (aicpu_ext_handle_.UpdateEventId(event_id) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Update event id failed for node:%s.", node_name_.c_str());
GELOGE(FAILED, "[Update][EventId] Update event id failed for node:%s", node_name_.c_str());
return FAILED;
}
GELOGI("Update event_id=%u success", event_id);
return SUCCESS;
}

Status AicpuNodeTaskBase::CheckDeviceSupportBlockingAicpuOpProcess(bool &is_support) {
int32_t device_id = 0;
auto rt_ret = rtGetDevice(&device_id);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtGetDevice failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][rtGetDevice] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
int32_t value = 0;
rt_ret = rtGetDeviceCapability(device_id, FEATURE_TYPE_BLOCKING_OPERATOR, RT_MODULE_TYPE_AICPU, &value);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtGetDeviceCapability failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][rtGetDeviceCapability] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
if (value != RT_AICPU_BLOCKING_OP_NOT_SUPPORT && value != RT_AICPU_BLOCKING_OP_SUPPORT) {
REPORT_INNER_ERROR("E19999", "Value should be %d or %d but %d",
RT_AICPU_BLOCKING_OP_NOT_SUPPORT, RT_AICPU_BLOCKING_OP_SUPPORT, value);
GELOGE(FAILED, "[Check][Value] Value should be %d or %d but %d",
RT_AICPU_BLOCKING_OP_NOT_SUPPORT, RT_AICPU_BLOCKING_OP_SUPPORT, value);
return FAILED;
}
is_support = (value == RT_AICPU_BLOCKING_OP_SUPPORT ? true : false);
return SUCCESS;
}

Status AicpuNodeTaskBase::DistributeWaitTaskForAicpuBlockingOp(rtStream_t stream) {
bool is_support = false;
if (CheckDeviceSupportBlockingAicpuOpProcess(is_support) != SUCCESS) {
GELOGE(FAILED, "[Call][CheckDeviceSupportBlockingAicpuOpProcess] Call CheckDeviceSupportBlockingAicpuOpProcess failed");
return FAILED;
}
if (!is_support) {
GELOGD("Device not support blocking aicpu op process.");
return SUCCESS;
}
GELOGD("Distribute queue task begin");
if (rt_event_ == nullptr) {
REPORT_INNER_ERROR("E19999", "rt_event_ is nullptr");
GELOGE(FAILED, "[Check][rt_event_] rt_event_ is nullptr");
return FAILED;
}
auto rt_ret = rtStreamWaitEvent(stream, rt_event_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtStreamWaitEvent failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtApi] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
rt_ret = rtEventReset(rt_event_, stream);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtEventReset failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtApi] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
return SUCCESS;
}

Status AicpuTfNodeTask::InitForDependComputeTask() {
if ((unknown_type_ != DEPEND_COMPUTE) || (node_item_->num_outputs == 0)) {
GELOGD("Node[%s] type[%s] unknown_type is %d, output num is %d.",
@@ -325,6 +429,9 @@ Status AicpuTfNodeTask::Init(const HybridModel &model) {

// init ext info
uint64_t ext_session_id = model.GetSessionId();
const OpDescPtr op_desc = node_item_->GetOpDesc();
AttrUtils::GetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, is_blocking_aicpu_op_);
GELOGD("Get op:%s attribute(is_blocking_op), value:%d", op_desc->GetName().c_str(), is_blocking_aicpu_op_);
GE_CHK_STATUS_RET(InitExtInfo(kernel_ext_info, ext_session_id), "[Init][ExtInfo] failed for Node[%s].",
node_name_.c_str());
GE_CHK_STATUS_RET(InitForDependComputeTask(), "[Init][DependComputeTask] failed for Node[%s].", node_name_.c_str());
@@ -642,6 +749,12 @@ Status AicpuTfNodeTask::LaunchTask(TaskContext &context) {
kernel_buf_->GetSize(), flag, context.GetStream()));
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[AicpuTfNodertKernelLaunchEx] End");
GELOGD("Node[%s] launch end.", node_name_.c_str());
if (is_blocking_aicpu_op_) {
if (DistributeWaitTaskForAicpuBlockingOp(context.GetStream()) != SUCCESS) {
GELOGE(FAILED, "[Call][DistributeWaitTaskForAicpuBlockingOp] Call DistributeWaitTaskForAicpuBlockingOp failed");
return FAILED;
}
}
if (need_sync_) {
GELOGD("[%s] Task needs sync", node_name_.c_str());
GE_CHK_STATUS_RET_NOLOG(context.Synchronize());
@@ -760,6 +873,8 @@ Status AicpuNodeTask::Init(const HybridModel &model) {
return FAILED;);

uint64_t ext_session_id = model.GetSessionId();
AttrUtils::GetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, is_blocking_aicpu_op_);
GELOGD("Get op:%s attribute(is_blocking_op), value:%d", op_desc->GetName().c_str(), is_blocking_aicpu_op_);
GE_CHK_STATUS_RET(InitExtInfo(kernel_ext_info, ext_session_id),
"[Init][ExtInfo] failed for Node[%s].", node_name.c_str());

@@ -826,6 +941,12 @@ Status AicpuNodeTask::LaunchTask(TaskContext &context) {
args_.get(), args_size_,
nullptr, context.GetStream(), flag);
GE_CHK_RT_RET(rt_ret);
if (is_blocking_aicpu_op_) {
if (DistributeWaitTaskForAicpuBlockingOp(context.GetStream()) != SUCCESS) {
GELOGE(FAILED, "[Call][DistributeWaitTaskForAicpuBlockingOp] Call DistributeWaitTaskForAicpuBlockingOp failed");
return FAILED;
}
}
GELOGD("Node[%s] launch task end.", node_name_.c_str());
return SUCCESS;
}


+ 9
- 1
ge/hybrid/node_executor/aicpu/aicpu_node_executor.h View File

@@ -35,7 +35,7 @@ class AicpuNodeTaskBase : public NodeTask {
node_item->num_outputs,
node_item->shape_inference_type) {}

~AicpuNodeTaskBase() override = default;
~AicpuNodeTaskBase() override;

using NodeTask::Init;

@@ -61,6 +61,10 @@ class AicpuNodeTaskBase : public NodeTask {

static Status AllocTensorBuffer(size_t size, std::unique_ptr<TensorBuffer> &tensor_buffer);

Status DistributeWaitTaskForAicpuBlockingOp(rtStream_t stream);
Status CheckDeviceSupportBlockingAicpuOpProcess(bool &is_support);
Status UpdateEventIdForBlockingAicpuOp();

protected:
const NodeItem *node_item_;
// just reference.
@@ -78,6 +82,10 @@ class AicpuNodeTaskBase : public NodeTask {

// ext info addr, device mem
std::unique_ptr<TensorBuffer> ext_info_addr_dev_;

// for blocking aicpu op
bool is_blocking_aicpu_op_ = false;
rtEvent_t rt_event_ = nullptr;
};

class AicpuTfNodeTask : public AicpuNodeTaskBase {


+ 117
- 0
ge/single_op/task/op_task.cc View File

@@ -564,6 +564,41 @@ AiCpuBaseTask::~AiCpuBaseTask() {
if (ext_info_addr_dev_ != nullptr) {
(void)rtFree(ext_info_addr_dev_);
}
if (rt_event_ != nullptr) {
(void)rtEventDestroy(rt_event_);
}
}

Status AiCpuBaseTask::UpdateEventIdForBlockingAicpuOp() {
bool is_support = false;
if (CheckDeviceSupportBlockingAicpuOpProcess(is_support) != SUCCESS) {
GELOGE(FAILED, "[Call][CheckDeviceSupportBlockingAicpuOpProcess] Call CheckDeviceSupportBlockingAicpuOpProcess failed");
return FAILED;
}
if (!is_support) {
GELOGD("Device not support blocking aicpu op process");
return SUCCESS;
}
uint32_t event_id = 0;
auto rt_ret = rtEventCreateWithFlag(&rt_event_, RT_EVENT_WITH_FLAG);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtEventCreateWithFlag failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][rtEventCreateWithFlag] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
rt_ret = rtGetEventID(rt_event_, &event_id);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtGetEventID failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][rtGetEventID] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
if (aicpu_ext_handle_->UpdateEventId(event_id) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Update event id=%u failed.", event_id);
GELOGE(FAILED, "[Update][EventId] Update event id failed", event_id);
return FAILED;
}
GELOGI("Update event_id=%u success", event_id);
return SUCCESS;
}

Status AiCpuBaseTask::SetExtInfoAndType(const std::string &kernel_ext_info, uint64_t kernel_id) {
@@ -577,6 +612,9 @@ Status AiCpuBaseTask::SetExtInfoAndType(const std::string &kernel_ext_info, uint
GELOGD("Get unknown_type is %d.", unknown_shape_type_val);
unknown_type_ = static_cast<UnknowShapeOpType>(unknown_shape_type_val);

AttrUtils::GetBool(op_desc_, ATTR_NAME_IS_BLOCKING_OP, is_blocking_aicpu_op_);
GELOGD("Get op:%s attribute(is_blocking_op), value:%d", op_desc_->GetName().c_str(), is_blocking_aicpu_op_);

aicpu_ext_handle_.reset(new(std::nothrow) ::ge::hybrid::AicpuExtInfoHandler(op_desc_->GetName(),
num_inputs_,
num_outputs_,
@@ -595,6 +633,13 @@ Status AiCpuBaseTask::SetExtInfoAndType(const std::string &kernel_ext_info, uint
GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateSessionInfo(ULLONG_MAX, kernel_id, false),
"[Update][SessionInfo] failed.");

if (is_blocking_aicpu_op_) {
if (UpdateEventIdForBlockingAicpuOp() != SUCCESS) {
GELOGE(FAILED, "[Call][UpdateEventIdForBlockingAicpuOp] Call UpdateEventIdForBlockingAicpuOp failed");
return FAILED;
}
}

GE_CHK_RT_RET(rtMalloc(&ext_info_addr_dev_, aicpu_ext_handle_->GetExtInfoLen(), RT_MEMORY_HBM));
GE_CHK_RT_RET(rtMemcpy(ext_info_addr_dev_, aicpu_ext_handle_->GetExtInfoLen(),
aicpu_ext_handle_->GetExtInfo(), aicpu_ext_handle_->GetExtInfoLen(),
@@ -770,6 +815,63 @@ Status AiCpuBaseTask::UpdateIoAddr(const vector<DataBuffer> &inputs, const vecto
return SUCCESS;
}

Status AiCpuBaseTask::CheckDeviceSupportBlockingAicpuOpProcess(bool &is_support) {
int32_t device_id = 0;
auto rt_ret = rtGetDevice(&device_id);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtGetDevice failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][rtGetDevice] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
int32_t value = 0;
rt_ret = rtGetDeviceCapability(device_id, FEATURE_TYPE_BLOCKING_OPERATOR, RT_MODULE_TYPE_AICPU, &value);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtGetDeviceCapability failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][rtGetDeviceCapability] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
if (value != RT_AICPU_BLOCKING_OP_NOT_SUPPORT && value != RT_AICPU_BLOCKING_OP_SUPPORT) {
REPORT_INNER_ERROR("E19999", "Value should be %d or %d but %d",
RT_AICPU_BLOCKING_OP_NOT_SUPPORT, RT_AICPU_BLOCKING_OP_SUPPORT, value);
GELOGE(FAILED, "[Check][Value] Value should be %d or %d but %d",
RT_AICPU_BLOCKING_OP_NOT_SUPPORT, RT_AICPU_BLOCKING_OP_SUPPORT, value);
return FAILED;
}
is_support = (value == RT_AICPU_BLOCKING_OP_SUPPORT ? true : false);
return SUCCESS;
}

Status AiCpuBaseTask::DistributeWaitTaskForAicpuBlockingOp(rtStream_t stream) {
bool is_support = false;
if (CheckDeviceSupportBlockingAicpuOpProcess(is_support) != SUCCESS) {
GELOGE(FAILED, "[Call][CheckDeviceSupportBlockingAicpuOpProcess] Call CheckDeviceSupportBlockingAicpuOpProcess failed");
return FAILED;
}
if (!is_support) {
GELOGD("Device not support blocking aicpu op process.");
return SUCCESS;
}
GELOGI("Distribute queue task begin");
if (rt_event_ == nullptr) {
REPORT_INNER_ERROR("E19999", "rt_event_ is nullptr");
GELOGE(FAILED, "[Check][rt_event_] rt_event_ is nullptr");
return FAILED;
}
auto rt_ret = rtStreamWaitEvent(stream, rt_event_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtStreamWaitEvent failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtApi] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
rt_ret = rtEventReset(rt_event_, stream);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtEventReset failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtApi] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
return SUCCESS;
}

AiCpuTask::~AiCpuTask() {
FreeHbm(args_);
FreeHbm(io_addr_);
@@ -813,6 +915,14 @@ Status AiCpuTask::LaunchKernel(rtStream_t stream) {
GELOGI("[TASK_INFO] %lu/%s", kernel_id_, op_type_.c_str());

GELOGD("Done launch kernel successfully. task = %s", this->op_type_.c_str());

if (is_blocking_aicpu_op_) {
if (DistributeWaitTaskForAicpuBlockingOp(stream) != SUCCESS) {
GELOGE(FAILED, "[Call][DistributeWaitTaskForAicpuBlockingOp] Call DistributeWaitTaskForAicpuBlockingOp failed");
return FAILED;
}
}

return SUCCESS;
}

@@ -1089,6 +1199,13 @@ Status AiCpuCCTask::LaunchKernel(rtStream_t stream) {
}
GELOGI("[TASK_INFO] %lu/%s", kernel_id_, op_type_.c_str());
GELOGD("Invoke rtCpuKernelLaunch succeeded");

if (is_blocking_aicpu_op_) {
if (DistributeWaitTaskForAicpuBlockingOp(stream) != SUCCESS) {
GELOGE(FAILED, "[Call][DistributeWaitTaskForAicpuBlockingOp] Call DistributeWaitTaskForAicpuBlockingOp failed");
return FAILED;
}
}
return SUCCESS;
}



+ 7
- 0
ge/single_op/task/op_task.h View File

@@ -178,6 +178,10 @@ class AiCpuBaseTask : public OpTask {
rtStream_t stream);
Status UpdateOutputShape(vector<GeTensorDesc> &output_desc);
Status UpdateShapeToOutputDesc(const GeShape &shape_new, GeTensorDesc &output_desc);
// for blocking aicpu op
Status DistributeWaitTaskForAicpuBlockingOp(rtStream_t stream);
Status UpdateEventIdForBlockingAicpuOp();
Status CheckDeviceSupportBlockingAicpuOpProcess(bool &is_support);

protected:
size_t num_inputs_ = 0;
@@ -186,6 +190,9 @@ class AiCpuBaseTask : public OpTask {
std::unique_ptr<ge::hybrid::AicpuExtInfoHandler> aicpu_ext_handle_;
void *ext_info_addr_dev_ = nullptr;
vector<bool> input_is_const_;
// for blocking aicpu op
bool is_blocking_aicpu_op_ = false;
rtEvent_t rt_event_ = nullptr;
};

class AiCpuTask : public AiCpuBaseTask {


+ 82
- 11
tests/depends/runtime/src/runtime_stub.cc View File

@@ -16,12 +16,94 @@

#include <cce/dnn.h>
#include <securec.h>
#include "runtime_stub.h"
#include "runtime/rt.h"

#define ADD_STUB_RETURN_VALUE(FUNC, TYPE) std::vector<TYPE> g_Stub_##FUNC##_RETURN

#define GET_STUB_RETURN_VALUE(FUNC, TYPE, DEFAULT) ({ \
TYPE result = DEFAULT; \
if (!g_Stub_##FUNC##_RETURN.empty()) { \
result = g_Stub_##FUNC##_RETURN.back(); \
g_Stub_##FUNC##_RETURN.pop_back(); \
} \
result; \
})

#define DEL_STUB_RETURN_VALUE(FUNC, TYPE) \
do { \
extern std::vector<TYPE> g_Stub_##FUNC##_RETURN; \
g_Stub_##FUNC##_RETURN.clear(); \
} while (0)


#define ADD_STUB_OUTBOUND_VALUE(FUNC, TYPE, NAME) std::vector<TYPE> g_Stub_##FUNC##_OUT_##NAME

#define GET_STUB_OUTBOUND_VALUE(FUNC, TYPE, NAME, DEFAULT) ({ \
TYPE value; \
if (!g_Stub_##FUNC##_OUT_##NAME.empty()) { \
value = g_Stub_##FUNC##_OUT_##NAME.back(); \
g_Stub_##FUNC##_OUT_##NAME.pop_back(); \
} else { \
value = DEFAULT; \
} \
value; \
})

#define DEL_STUB_OUTBOUND_VALUE(FUNC, TYPE, NAME) \
do { \
extern std::vector<TYPE> g_Stub_##FUNC##_OUT_##NAME; \
g_Stub_##FUNC##_OUT_##NAME.clear(); \
} while (0)

#ifdef __cplusplus
extern "C" {
#endif
#define EVENT_LENTH 10

void rtStubTearDown() {
DEL_STUB_RETURN_VALUE(rtGetDevice, rtError_t);
DEL_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t);
DEL_STUB_RETURN_VALUE(rtStreamWaitEvent, rtError_t);
DEL_STUB_RETURN_VALUE(rtEventReset, rtError_t);
DEL_STUB_RETURN_VALUE(rtEventCreate, rtError_t);
DEL_STUB_RETURN_VALUE(rtGetEventID, rtError_t);
}

ADD_STUB_RETURN_VALUE(rtGetDevice, rtError_t);
rtError_t rtGetDevice(int32_t *device) {
return GET_STUB_RETURN_VALUE(rtGetDevice, rtError_t, RT_ERROR_NONE);
}

ADD_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t);
ADD_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value);
rtError_t rtGetDeviceCapability(int32_t device, int32_t moduleType, int32_t featureType, int32_t *value) {
*value = GET_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_SUPPORT);
return GET_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE);
}

ADD_STUB_RETURN_VALUE(rtStreamWaitEvent, rtError_t);
rtError_t rtStreamWaitEvent(rtStream_t stream, rtEvent_t event) {
return GET_STUB_RETURN_VALUE(rtStreamWaitEvent, rtError_t, RT_ERROR_NONE);
}

ADD_STUB_RETURN_VALUE(rtEventReset, rtError_t);
rtError_t rtEventReset(rtEvent_t event, rtStream_t stream) {
return GET_STUB_RETURN_VALUE(rtEventReset, rtError_t, RT_ERROR_NONE);
}

ADD_STUB_RETURN_VALUE(rtEventCreate, rtError_t);
rtError_t rtEventCreate(rtEvent_t *event) {
*event = new int[EVENT_LENTH];
return GET_STUB_RETURN_VALUE(rtEventCreate, rtError_t, RT_ERROR_NONE);
}

ADD_STUB_RETURN_VALUE(rtGetEventID, rtError_t);
rtError_t rtGetEventID(rtEvent_t event, uint32_t *event_id) {
*event_id = 0;
return GET_STUB_RETURN_VALUE(rtEventCreate, rtError_t, RT_ERROR_NONE);
}

rtError_t rtCtxSetCurrent(rtContext_t ctx) { return RT_ERROR_NONE; }

rtError_t rtGetStreamId(rtStream_t stream, int32_t *stream_id) {
@@ -42,11 +124,6 @@ rtError_t rtEventGetTimeStamp(uint64_t *time, rtEvent_t event) {
return RT_ERROR_NONE;
}

rtError_t rtEventCreate(rtEvent_t *event) {
*event = new int[EVENT_LENTH];
return RT_ERROR_NONE;
}

rtError_t rtEventCreateWithFlag(rtEvent_t *event, uint32_t flag) {
return rtEventCreate(event);
}
@@ -112,8 +189,6 @@ rtError_t rtMemcpyAsync(void *dst, uint64_t dest_max, const void *src, uint64_t
return RT_ERROR_NONE;
}

rtError_t rtStreamWaitEvent(rtStream_t stream, rtEvent_t event) { return RT_ERROR_NONE; }

rtError_t rtSetTSDevice(uint32_t tsId) {
return RT_ERROR_NONE;
}
@@ -347,10 +422,6 @@ rtError_t rtStreamSwitchEx(void *ptr, rtCondition_t condition, void *value_ptr,

rtError_t rtStreamActive(rtStream_t active_stream, rtStream_t stream) { return RT_ERROR_NONE; }

rtError_t rtEventReset(rtEvent_t event, rtStream_t stream) { return RT_ERROR_NONE; }

rtError_t rtGetDevice(int32_t *device) { return RT_ERROR_NONE; }

rtError_t rtDatadumpInfoLoad(const void *dump_info, uint32_t length) { return RT_ERROR_NONE; }

rtError_t rtKernelLaunchWithFlag(const void *stub_func, uint32_t block_dim, void *args, uint32_t args_size,


+ 70
- 0
tests/depends/runtime/src/runtime_stub.h View File

@@ -0,0 +1,70 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef __INC_LLT_RUNTIME_STUB_H
#define __INC_LLT_RUNTIME_STUB_H

#include <vector>

#ifdef __cplusplus
extern "C" {
#endif
void rtStubTearDown();

#define RTS_STUB_SETUP() \
do { \
rtStubTearDown(); \
} while (0)

#define RTS_STUB_TEARDOWN() \
do { \
rtStubTearDown(); \
} while (0)

#define RTS_STUB_RETURN_VALUE(FUNC, TYPE, VALUE) \
do { \
g_Stub_##FUNC##_RETURN.emplace(g_Stub_##FUNC##_RETURN.begin(), VALUE); \
} while (0)

#define RTS_STUB_OUTBOUND_VALUE(FUNC, TYPE, NAME, VALUE) \
do { \
g_Stub_##FUNC##_OUT_##NAME.emplace(g_Stub_##FUNC##_OUT_##NAME.begin(), VALUE); \
} while (0)


#define RTS_STUB_RETURN_EXTERN(FUNC, TYPE) extern std::vector<TYPE> g_Stub_##FUNC##_RETURN;
#define RTS_STUB_OUTBOUND_EXTERN(FUNC, TYPE, NAME) extern std::vector<TYPE> g_Stub_##FUNC##_OUT_##NAME;

RTS_STUB_RETURN_EXTERN(rtGetDevice, rtError_t);
RTS_STUB_OUTBOUND_EXTERN(rtGetDevice, int32_t, device)

RTS_STUB_RETURN_EXTERN(rtGetDeviceCapability, rtError_t);
RTS_STUB_OUTBOUND_EXTERN(rtGetDeviceCapability, int32_t, value);

RTS_STUB_RETURN_EXTERN(rtStreamWaitEvent, rtError_t);

RTS_STUB_RETURN_EXTERN(rtEventReset, rtError_t);

RTS_STUB_RETURN_EXTERN(rtEventCreate, rtError_t);
RTS_STUB_OUTBOUND_EXTERN(rtEventCreate, rtEvent_t, event);

RTS_STUB_RETURN_EXTERN(rtGetEventID, rtError_t);
RTS_STUB_OUTBOUND_EXTERN(rtEventCreate, uint32_t, event_id);

#ifdef __cplusplus
}
#endif
#endif // __INC_LLT_RUNTIME_STUB_H

+ 1
- 0
tests/ut/ge/CMakeLists.txt View File

@@ -935,6 +935,7 @@ target_link_libraries(ge_single_op PRIVATE
ascend_protobuf
json
c_sec
runtime_stub
)

# ut binary


+ 139
- 2
tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc View File

@@ -23,15 +23,20 @@

#include "graph/load/model_manager/task_info/kernel_ex_task_info.h"
#include "cce/aicpu_engine_struct.h"
#include "tests/depends/runtime/src/runtime_stub.h"

namespace ge {
extern OpDescPtr CreateOpDesc(string name, string type);

class UtestKernelExTaskInfo : public testing::Test {
protected:
void SetUp() {}
void SetUp() {
RTS_STUB_SETUP();
}

void TearDown() {}
void TearDown() {
RTS_STUB_TEARDOWN();
}
};

// test kernel_ex_task_Release
@@ -209,4 +214,136 @@ TEST_F(UtestKernelExTaskInfo, parse_topic_type_failed_2) {
KernelExTaskInfo kernel_ex_task_info;
EXPECT_NE(kernel_ex_task_info.InitTaskExtInfo(ext_info, op_desc), SUCCESS);
}

TEST_F(UtestKernelExTaskInfo, blocking_aicpu_op) {
int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo);
vector<char> aicpu_ext_info(len, 0);
char *buf = aicpu_ext_info.data();
int offset = 0;
hybrid::AicpuExtInfo *ext_info = reinterpret_cast<hybrid::AicpuExtInfo*>(buf + offset);
ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT;
ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo);
offset += sizeof(hybrid::AicpuExtInfo);
hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast<hybrid::AsyncWaitInfo*>(buf + offset);
async_wait_info->waitType = 0;
async_wait_info->waitId = 0;
async_wait_info->timeOut = 0;
async_wait_info->reserved = 0;

domi::TaskDef task_def;
domi::KernelExDef kernel_ex_def;
kernel_ex_def.set_kernel_ext_info(buf, len);
kernel_ex_def.set_kernel_ext_info_size(len);
domi::KernelExDef *kernel_ex_def_tmp = task_def.mutable_kernel_ex();
*kernel_ex_def_tmp = kernel_ex_def;

const OpDescPtr op_desc = CreateOpDesc("deque", "Deque");
ge::AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, true);

KernelExTaskInfo kernel_ex_task_info;
kernel_ex_task_info.op_desc_ = op_desc;
DavinciModel davinci_model(0, nullptr);
kernel_ex_task_info.davinci_model_ = &davinci_model;
EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(kernel_ex_def.kernel_ext_info(), op_desc), SUCCESS);
EXPECT_EQ(kernel_ex_task_info.Distribute(), SUCCESS);
kernel_ex_task_info.op_desc_ = op_desc;
EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(kernel_ex_def.kernel_ext_info(), op_desc), SUCCESS);
EXPECT_EQ(kernel_ex_task_info.Distribute(), SUCCESS);
}

TEST_F(UtestKernelExTaskInfo, blocking_aicpu_op_fail_01) {
int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo);
vector<char> aicpu_ext_info(len, 0);
char *buf = aicpu_ext_info.data();
int offset = 0;
hybrid::AicpuExtInfo *ext_info = reinterpret_cast<hybrid::AicpuExtInfo*>(buf + offset);
ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT;
ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo);
offset += sizeof(hybrid::AicpuExtInfo);
hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast<hybrid::AsyncWaitInfo*>(buf + offset);
async_wait_info->waitType = 0;
async_wait_info->waitId = 0;
async_wait_info->timeOut = 0;
async_wait_info->reserved = 0;

domi::TaskDef task_def;
domi::KernelExDef kernel_ex_def;
kernel_ex_def.set_kernel_ext_info(buf, len);
kernel_ex_def.set_kernel_ext_info_size(len);
domi::KernelExDef *kernel_ex_def_tmp = task_def.mutable_kernel_ex();
*kernel_ex_def_tmp = kernel_ex_def;

const OpDescPtr op_desc = CreateOpDesc("deque", "Deque");

KernelExTaskInfo kernel_ex_task_info;
kernel_ex_task_info.op_desc_ = op_desc;
DavinciModel davinci_model(0, nullptr);
kernel_ex_task_info.davinci_model_ = &davinci_model;
EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(kernel_ex_def.kernel_ext_info(), op_desc), SUCCESS);

kernel_ex_task_info.is_blocking_aicpu_op_ = true;
EXPECT_EQ(kernel_ex_task_info.Distribute(), FAILED);
}

TEST_F(UtestKernelExTaskInfo, blocking_aicpu_op_fail_02) {
int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo);
vector<char> aicpu_ext_info(len, 0);
char *buf = aicpu_ext_info.data();
int offset = 0;
hybrid::AicpuExtInfo *ext_info = reinterpret_cast<hybrid::AicpuExtInfo*>(buf + offset);
ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT;
ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo);
offset += sizeof(hybrid::AicpuExtInfo);
hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast<hybrid::AsyncWaitInfo*>(buf + offset);
async_wait_info->waitType = 0;
async_wait_info->waitId = 0;
async_wait_info->timeOut = 0;
async_wait_info->reserved = 0;

domi::TaskDef task_def;
domi::KernelExDef kernel_ex_def;
kernel_ex_def.set_kernel_ext_info(buf, len);
kernel_ex_def.set_kernel_ext_info_size(len);
domi::KernelExDef *kernel_ex_def_tmp = task_def.mutable_kernel_ex();
*kernel_ex_def_tmp = kernel_ex_def;

const OpDescPtr op_desc = CreateOpDesc("deque", "Deque");
ge::AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, true);
KernelExTaskInfo kernel_ex_task_info;
kernel_ex_task_info.op_desc_ = op_desc;
DavinciModel davinci_model(0, nullptr);
kernel_ex_task_info.davinci_model_ = &davinci_model;

RTS_STUB_RETURN_VALUE(rtGetDevice, rtError_t, 0x78000001);
EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(kernel_ex_def.kernel_ext_info(), op_desc), FAILED);

RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001);
EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(kernel_ex_def.kernel_ext_info(), op_desc), FAILED);

RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001);
EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(kernel_ex_def.kernel_ext_info(), op_desc), FAILED);

RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE);
RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_SUPPORT + 1);
EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(kernel_ex_def.kernel_ext_info(), op_desc), FAILED);

RTS_STUB_RETURN_VALUE(rtGetDevice, rtError_t, 0x78000001);
EXPECT_EQ(kernel_ex_task_info.Distribute(), FAILED);

EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(kernel_ex_def.kernel_ext_info(), op_desc), SUCCESS);
RTS_STUB_RETURN_VALUE(rtStreamWaitEvent, rtError_t, 0x78000001);
EXPECT_EQ(kernel_ex_task_info.Distribute(), FAILED);

EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(kernel_ex_def.kernel_ext_info(), op_desc), SUCCESS);
RTS_STUB_RETURN_VALUE(rtEventReset, rtError_t, 0x78000001);
EXPECT_EQ(kernel_ex_task_info.Distribute(), FAILED);

RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE);
RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT);
EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(kernel_ex_def.kernel_ext_info(), op_desc), SUCCESS);
RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE);
RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT);
EXPECT_EQ(kernel_ex_task_info.Distribute(), SUCCESS);
}

} // namespace ge

+ 138
- 2
tests/ut/ge/graph/load/kernel_task_info_unittest.cc View File

@@ -22,15 +22,20 @@
#include "graph/load/model_manager/davinci_model.h"
#include "graph/load/model_manager/task_info/kernel_task_info.h"
#include "graph/load/model_manager/task_info/hccl_task_info.h"
#include "tests/depends/runtime/src/runtime_stub.h"

namespace ge {
extern OpDescPtr CreateOpDesc(string name, string type);

class UtestKernelTaskInfo : public testing::Test {
protected:
void SetUp() {}
void SetUp() {
RTS_STUB_SETUP();
}

void TearDown() {}
void TearDown() {
RTS_STUB_TEARDOWN();
}
};

// test KernelTaskInfo Init.
@@ -1240,4 +1245,135 @@ TEST_F(UtestKernelTaskInfo, kernel_task_info_super_kernel_info) {
EXPECT_EQ(kernel_task_info.SKTFinalize(), SUCCESS);
}

TEST_F(UtestKernelTaskInfo, blocking_aicpu_op) {
int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo);
vector<char> aicpu_ext_info(len, 0);
char *buf = aicpu_ext_info.data();
int offset = 0;
hybrid::AicpuExtInfo *ext_info = reinterpret_cast<hybrid::AicpuExtInfo*>(buf + offset);
ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT;
ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo);
offset += sizeof(hybrid::AicpuExtInfo);
hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast<hybrid::AsyncWaitInfo*>(buf + offset);
async_wait_info->waitType = 0;
async_wait_info->waitId = 0;
async_wait_info->timeOut = 0;
async_wait_info->reserved = 0;

domi::TaskDef task_def;
domi::KernelDef kernel_def;
kernel_def.set_kernel_ext_info(buf, len);
kernel_def.set_kernel_ext_info_size(len);

const OpDescPtr op_desc = CreateOpDesc("deque", "Deque");
op_desc->SetId(0);
ge::AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, true);
DavinciModel davinci_model(0, nullptr);
davinci_model.op_list_.emplace(0, op_desc);

KernelTaskInfo kernel_task_info;
kernel_task_info.op_desc_ = op_desc;
kernel_task_info.davinci_model_ = &davinci_model;
EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(kernel_def.kernel_ext_info()), SUCCESS);
EXPECT_EQ(kernel_task_info.Distribute(), SUCCESS);
kernel_task_info.op_desc_ = op_desc;
EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(kernel_def.kernel_ext_info()), SUCCESS);
EXPECT_EQ(kernel_task_info.Distribute(), SUCCESS);
}

TEST_F(UtestKernelTaskInfo, blocking_aicpu_op_fail_01) {
int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo);
vector<char> aicpu_ext_info(len, 0);
char *buf = aicpu_ext_info.data();
int offset = 0;
hybrid::AicpuExtInfo *ext_info = reinterpret_cast<hybrid::AicpuExtInfo*>(buf + offset);
ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT;
ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo);
offset += sizeof(hybrid::AicpuExtInfo);
hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast<hybrid::AsyncWaitInfo*>(buf + offset);
async_wait_info->waitType = 0;
async_wait_info->waitId = 0;
async_wait_info->timeOut = 0;
async_wait_info->reserved = 0;

domi::KernelDef kernel_def;
kernel_def.set_kernel_ext_info(buf, len);
kernel_def.set_kernel_ext_info_size(len);

const OpDescPtr op_desc = CreateOpDesc("deque", "Deque");
op_desc->SetId(0);
DavinciModel davinci_model(0, nullptr);
davinci_model.op_list_.emplace(0, op_desc);

KernelTaskInfo kernel_task_info;
kernel_task_info.davinci_model_ = &davinci_model;
kernel_task_info.op_desc_ = op_desc;

EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(kernel_def.kernel_ext_info()), SUCCESS);

kernel_task_info.is_blocking_aicpu_op_ = true;
EXPECT_EQ(kernel_task_info.Distribute(), FAILED);
}

TEST_F(UtestKernelTaskInfo, blocking_aicpu_op_fail_02) {
int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo);
vector<char> aicpu_ext_info(len, 0);
char *buf = aicpu_ext_info.data();
int offset = 0;
hybrid::AicpuExtInfo *ext_info = reinterpret_cast<hybrid::AicpuExtInfo*>(buf + offset);
ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT;
ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo);
offset += sizeof(hybrid::AicpuExtInfo);
hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast<hybrid::AsyncWaitInfo*>(buf + offset);
async_wait_info->waitType = 0;
async_wait_info->waitId = 0;
async_wait_info->timeOut = 0;
async_wait_info->reserved = 0;

domi::KernelDef kernel_def;
kernel_def.set_kernel_ext_info(buf, len);
kernel_def.set_kernel_ext_info_size(len);

const OpDescPtr op_desc = CreateOpDesc("deque", "Deque");
ge::AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, true);
op_desc->SetId(0);
DavinciModel davinci_model(0, nullptr);
davinci_model.op_list_.emplace(0, op_desc);

KernelTaskInfo kernel_task_info;
kernel_task_info.davinci_model_ = &davinci_model;
kernel_task_info.op_desc_ = op_desc;

RTS_STUB_RETURN_VALUE(rtGetDevice, rtError_t, 0x78000001);
EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(kernel_def.kernel_ext_info()), FAILED);

RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001);
EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(kernel_def.kernel_ext_info()), FAILED);

RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001);
EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(kernel_def.kernel_ext_info()), FAILED);

RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE);
RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_SUPPORT + 1);
EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(kernel_def.kernel_ext_info()), FAILED);

RTS_STUB_RETURN_VALUE(rtGetDevice, rtError_t, 0x78000001);
EXPECT_EQ(kernel_task_info.Distribute(), FAILED);

EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(kernel_def.kernel_ext_info()), SUCCESS);
RTS_STUB_RETURN_VALUE(rtStreamWaitEvent, rtError_t, 0x78000001);
EXPECT_EQ(kernel_task_info.Distribute(), FAILED);

EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(kernel_def.kernel_ext_info()), SUCCESS);
RTS_STUB_RETURN_VALUE(rtEventReset, rtError_t, 0x78000001);
EXPECT_EQ(kernel_task_info.Distribute(), FAILED);

RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE);
RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT);
EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(kernel_def.kernel_ext_info()), SUCCESS);
RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE);
RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT);
EXPECT_EQ(kernel_task_info.Distribute(), SUCCESS);
}

} // namespace ge

+ 224
- 3
tests/ut/ge/hybrid/node_executor/aicpu/aicpu_node_executor_unittest.cc View File

@@ -27,7 +27,7 @@
#include "hybrid/node_executor/aicpu/aicpu_node_executor.h"
#undef protected
#undef private
#include "tests/depends/runtime/src/runtime_stub.h"
using namespace std;
using namespace testing;
@@ -43,8 +43,12 @@ using namespace hybrid;
class UtestAicpuNodeExecutor : public testing::Test {
protected:
void SetUp() {}
void TearDown() {}
void SetUp() {
RTS_STUB_SETUP();
}
void TearDown() {
RTS_STUB_TEARDOWN();
}
};
static NodePtr CreateNode(ComputeGraphPtr graph, const string &name, const string &type, int in_num, int out_num) {
@@ -164,5 +168,222 @@ TEST_F(UtestAicpuNodeExecutor, aicpu_tf_node_task) {
}
TEST_F(UtestAicpuNodeExecutor, aicpu_blocking_node_task) {
ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
GeRootModelPtr ge_root_model = std::make_shared<GeRootModel>(graph);
ge_root_model->SetModelName("test_name");
HybridModel hybrid_model(ge_root_model);
NodePtr node = CreateNode(graph, "deque", FRAMEWORK_OP_TYPE, 1, 1);
ge::AttrUtils::SetBool(node->GetOpDesc(), ATTR_NAME_IS_BLOCKING_OP, true);
std::unique_ptr<NodeItem> new_node;
ASSERT_EQ(NodeItem::Create(node, new_node), SUCCESS);
NodeItem *node_item = new_node.get();
node_item->input_start = 0;
node_item->output_start = 0;
node_item->is_dynamic = true;
node_item->shape_inference_type = DEPEND_SHAPE_RANGE;
GraphItem graph_item;
graph_item.node_items_.emplace_back(node_item);
graph_item.total_inputs_ = 1;
graph_item.total_outputs_ = 1;
GraphExecutionContext graph_execution_context;
SubgraphContext subgraph_context(&graph_item, &graph_execution_context);
ASSERT_EQ(subgraph_context.Init(), SUCCESS);
graph_execution_context.callback_manager = std::unique_ptr<CallbackManager>(new CallbackManager());
auto node_state = subgraph_context.GetOrCreateNodeState(node_item);
ASSERT_NE(node_state, nullptr);
uint64_t value_0 = 512;
TensorValue in_tensor0(&value_0, sizeof(value_0));
subgraph_context.SetInput(*node_item, 0, in_tensor0);
TensorValue out_tensor0(&value_0, sizeof(value_0));
subgraph_context.SetOutput(*node_item, 0, out_tensor0);
int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo);
vector<char> aicpu_ext_info(len, 0);
char *buf = aicpu_ext_info.data();
int offset = 0;
hybrid::AicpuExtInfo *ext_info = reinterpret_cast<hybrid::AicpuExtInfo*>(buf + offset);
ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT;
ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo);
offset += sizeof(hybrid::AicpuExtInfo);
hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast<hybrid::AsyncWaitInfo*>(buf + offset);
async_wait_info->waitType = 0;
async_wait_info->waitId = 0;
async_wait_info->timeOut = 0;
async_wait_info->reserved = 0;
domi::KernelDef kernel_def;
kernel_def.set_kernel_ext_info(buf, len);
kernel_def.set_kernel_ext_info_size(len);
domi::TaskDef task_def;
AicpuTaskStruct args;
args.head.length = sizeof(args);
args.head.ioAddrNum = 2;
kernel_def.set_args(reinterpret_cast<const char *>(&args), args.head.length);
kernel_def.set_args_size(args.head.length);
domi::KernelDef *kernel_def_tmp = task_def.mutable_kernel();
*kernel_def_tmp = kernel_def;
AicpuNodeTask aicpu_node_task(node_item, task_def);
ASSERT_EQ(aicpu_node_task.Init(hybrid_model), SUCCESS);
ASSERT_EQ(aicpu_node_task.LaunchTask(*node_state->GetTaskContext()), SUCCESS);
node_item->shape_inference_type = DEPEND_COMPUTE;
domi::KernelExDef kernel_ex_def;
kernel_ex_def.set_kernel_ext_info(buf, len);
kernel_ex_def.set_kernel_ext_info_size(len);
kernel_ex_def.set_args(reinterpret_cast<const char *>(&args), args.head.length);
kernel_ex_def.set_args_size(args.head.length);
domi::KernelExDef *kernel_ex_def_tmp = task_def.mutable_kernel_ex();
*kernel_ex_def_tmp = kernel_ex_def;
hybrid_model.task_defs_[node] = std::vector<domi::TaskDef>({task_def, task_def});
AicpuTfNodeTask aicpu_tf_node_task(node_item, task_def);
ASSERT_EQ(aicpu_tf_node_task.Init(hybrid_model), SUCCESS);
ASSERT_EQ(aicpu_tf_node_task.LaunchTask(*node_state->GetTaskContext()), SUCCESS);
}
TEST_F(UtestAicpuNodeExecutor, aicpu_blocking_node_task_fail) {
ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
GeRootModelPtr ge_root_model = std::make_shared<GeRootModel>(graph);
ge_root_model->SetModelName("test_name");
HybridModel hybrid_model(ge_root_model);
NodePtr node = CreateNode(graph, "deque", FRAMEWORK_OP_TYPE, 1, 1);
ge::AttrUtils::SetBool(node->GetOpDesc(), ATTR_NAME_IS_BLOCKING_OP, true);
std::unique_ptr<NodeItem> new_node;
ASSERT_EQ(NodeItem::Create(node, new_node), SUCCESS);
NodeItem *node_item = new_node.get();
node_item->input_start = 0;
node_item->output_start = 0;
node_item->is_dynamic = true;
node_item->shape_inference_type = DEPEND_SHAPE_RANGE;
GraphItem graph_item;
graph_item.node_items_.emplace_back(node_item);
graph_item.total_inputs_ = 1;
graph_item.total_outputs_ = 1;
GraphExecutionContext graph_execution_context;
SubgraphContext subgraph_context(&graph_item, &graph_execution_context);
ASSERT_EQ(subgraph_context.Init(), SUCCESS);
graph_execution_context.callback_manager = std::unique_ptr<CallbackManager>(new CallbackManager());
auto node_state = subgraph_context.GetOrCreateNodeState(node_item);
ASSERT_NE(node_state, nullptr);
uint64_t value_0 = 512;
TensorValue in_tensor0(&value_0, sizeof(value_0));
subgraph_context.SetInput(*node_item, 0, in_tensor0);
TensorValue out_tensor0(&value_0, sizeof(value_0));
subgraph_context.SetOutput(*node_item, 0, out_tensor0);
int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo);
vector<char> aicpu_ext_info(len, 0);
char *buf = aicpu_ext_info.data();
int offset = 0;
hybrid::AicpuExtInfo *ext_info = reinterpret_cast<hybrid::AicpuExtInfo*>(buf + offset);
ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT;
ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo);
offset += sizeof(hybrid::AicpuExtInfo);
hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast<hybrid::AsyncWaitInfo*>(buf + offset);
async_wait_info->waitType = 0;
async_wait_info->waitId = 0;
async_wait_info->timeOut = 0;
async_wait_info->reserved = 0;
domi::KernelDef kernel_def;
kernel_def.set_kernel_ext_info(buf, len);
kernel_def.set_kernel_ext_info_size(len);
domi::TaskDef task_def;
AicpuTaskStruct args;
args.head.length = sizeof(args);
args.head.ioAddrNum = 2;
kernel_def.set_args(reinterpret_cast<const char *>(&args), args.head.length);
kernel_def.set_args_size(args.head.length);
domi::KernelDef *kernel_def_tmp = task_def.mutable_kernel();
*kernel_def_tmp = kernel_def;
AicpuNodeTask aicpu_node_task(node_item, task_def);
RTS_STUB_RETURN_VALUE(rtGetDevice, rtError_t, 0x78000001);
ASSERT_EQ(aicpu_node_task.Init(hybrid_model), FAILED);
RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001);
ASSERT_EQ(aicpu_node_task.Init(hybrid_model), FAILED);
RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001);
ASSERT_EQ(aicpu_node_task.Init(hybrid_model), FAILED);
RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE);
RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_SUPPORT + 1);
ASSERT_EQ(aicpu_node_task.Init(hybrid_model), FAILED);
RTS_STUB_RETURN_VALUE(rtGetDevice, rtError_t, 0x78000001);
ASSERT_EQ(aicpu_node_task.LaunchTask(*node_state->GetTaskContext()), FAILED);
ASSERT_EQ(aicpu_node_task.Init(hybrid_model), SUCCESS);
RTS_STUB_RETURN_VALUE(rtStreamWaitEvent, rtError_t, 0x78000001);
ASSERT_EQ(aicpu_node_task.LaunchTask(*node_state->GetTaskContext()), FAILED);
ASSERT_EQ(aicpu_node_task.Init(hybrid_model), SUCCESS);
RTS_STUB_RETURN_VALUE(rtEventReset, rtError_t, 0x78000001);
ASSERT_EQ(aicpu_node_task.LaunchTask(*node_state->GetTaskContext()), FAILED);
RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE);
RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT);
ASSERT_EQ(aicpu_node_task.Init(hybrid_model), SUCCESS);
RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE);
RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT);
ASSERT_EQ(aicpu_node_task.LaunchTask(*node_state->GetTaskContext()), SUCCESS);
node_item->shape_inference_type = DEPEND_COMPUTE;
domi::KernelExDef kernel_ex_def;
kernel_ex_def.set_kernel_ext_info(buf, len);
kernel_ex_def.set_kernel_ext_info_size(len);
kernel_ex_def.set_args(reinterpret_cast<const char *>(&args), args.head.length);
kernel_ex_def.set_args_size(args.head.length);
domi::KernelExDef *kernel_ex_def_tmp = task_def.mutable_kernel_ex();
*kernel_ex_def_tmp = kernel_ex_def;
hybrid_model.task_defs_[node] = std::vector<domi::TaskDef>({task_def, task_def});
AicpuTfNodeTask aicpu_tf_node_task(node_item, task_def);
RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001);
ASSERT_EQ(aicpu_tf_node_task.Init(hybrid_model), FAILED);
RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001);
ASSERT_EQ(aicpu_tf_node_task.Init(hybrid_model), FAILED);
RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE);
RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_SUPPORT + 1);
ASSERT_EQ(aicpu_tf_node_task.Init(hybrid_model), FAILED);
ASSERT_EQ(aicpu_tf_node_task.Init(hybrid_model), SUCCESS);
RTS_STUB_RETURN_VALUE(rtStreamWaitEvent, rtError_t, 0x78000001);
ASSERT_EQ(aicpu_tf_node_task.LaunchTask(*node_state->GetTaskContext()), FAILED);
ASSERT_EQ(aicpu_tf_node_task.Init(hybrid_model), SUCCESS);
RTS_STUB_RETURN_VALUE(rtEventReset, rtError_t, 0x78000001);
ASSERT_EQ(aicpu_tf_node_task.LaunchTask(*node_state->GetTaskContext()), FAILED);
RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE);
RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT);
EXPECT_EQ(aicpu_tf_node_task.Init(hybrid_model), SUCCESS);
RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE);
RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT);
EXPECT_EQ(aicpu_tf_node_task.LaunchTask(*node_state->GetTaskContext()), SUCCESS);
}
} // namespace ge

+ 129
- 2
tests/ut/ge/single_op/single_op_task_unittest.cc View File

@@ -19,6 +19,7 @@

#include "graph/load/model_manager/model_utils.h"
#include "graph/utils/graph_utils.h"
#include "hybrid/node_executor/aicpu/aicpu_ext_info.h"
#include "runtime/rt.h"

#define protected public
@@ -30,6 +31,7 @@
#include "external/register/op_tiling_registry.h"
#undef private
#undef protected
#include "tests/depends/runtime/src/runtime_stub.h"

using namespace std;
using namespace testing;
@@ -38,9 +40,13 @@ using namespace optiling;

class UtestSingleOpTask : public testing::Test {
protected:
void SetUp() {}
void SetUp() {
RTS_STUB_SETUP();
}

void TearDown() {}
void TearDown() {
RTS_STUB_TEARDOWN();
}
};

TEST_F(UtestSingleOpTask, test_build_kernel_task) {
@@ -237,3 +243,124 @@ TEST_F(UtestSingleOpTask, test_aicpu_task_update_io_addr) {
ASSERT_EQ(ret, PARAM_INVALID);
}
}

TEST_F(UtestSingleOpTask, test_blocking_aicpu_op_01) {
int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo);
vector<char> aicpu_ext_info(len, 0);
char *buf = aicpu_ext_info.data();
int offset = 0;
hybrid::AicpuExtInfo *ext_info = reinterpret_cast<hybrid::AicpuExtInfo*>(buf + offset);
ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT;
ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo);
offset += sizeof(hybrid::AicpuExtInfo);
hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast<hybrid::AsyncWaitInfo*>(buf + offset);
async_wait_info->waitType = 0;
async_wait_info->waitId = 0;
async_wait_info->timeOut = 0;
async_wait_info->reserved = 0;

domi::KernelDef kernel_def;
kernel_def.set_kernel_ext_info(buf, len);
kernel_def.set_kernel_ext_info_size(len);

auto op_desc = make_shared<OpDesc>("deque", "Deque");
ge::AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, true);
AiCpuCCTask aicpu_task;
aicpu_task.SetOpDesc(op_desc);
rtStream_t stream;
ASSERT_EQ(rtStreamCreate(&stream, 0), RT_ERROR_NONE);

ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS);
ASSERT_EQ(aicpu_task.LaunchKernel(stream), SUCCESS);
}

TEST_F(UtestSingleOpTask, test_blocking_aicpu_op_02) {
int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo);
vector<char> aicpu_ext_info(len, 0);
char *buf = aicpu_ext_info.data();
int offset = 0;
hybrid::AicpuExtInfo *ext_info = reinterpret_cast<hybrid::AicpuExtInfo*>(buf + offset);
ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT;
ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo);
offset += sizeof(hybrid::AicpuExtInfo);
hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast<hybrid::AsyncWaitInfo*>(buf + offset);
async_wait_info->waitType = 0;
async_wait_info->waitId = 0;
async_wait_info->timeOut = 0;
async_wait_info->reserved = 0;

domi::KernelDef kernel_def;
kernel_def.set_kernel_ext_info(buf, len);
kernel_def.set_kernel_ext_info_size(len);

auto op_desc = make_shared<OpDesc>("deque", "Deque");
ge::AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, true);
AiCpuTask aicpu_task;
aicpu_task.SetOpDesc(op_desc);
rtStream_t stream;
ASSERT_EQ(rtStreamCreate(&stream, 0), RT_ERROR_NONE);

ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS);
ASSERT_EQ(aicpu_task.LaunchKernel(stream), SUCCESS);
}

TEST_F(UtestSingleOpTask, test_blocking_aicpu_op_fail) {
int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo);
vector<char> aicpu_ext_info(len, 0);
char *buf = aicpu_ext_info.data();
int offset = 0;
hybrid::AicpuExtInfo *ext_info = reinterpret_cast<hybrid::AicpuExtInfo*>(buf + offset);
ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT;
ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo);
offset += sizeof(hybrid::AicpuExtInfo);
hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast<hybrid::AsyncWaitInfo*>(buf + offset);
async_wait_info->waitType = 0;
async_wait_info->waitId = 0;
async_wait_info->timeOut = 0;
async_wait_info->reserved = 0;

domi::KernelDef kernel_def;
kernel_def.set_kernel_ext_info(buf, len);
kernel_def.set_kernel_ext_info_size(len);

auto op_desc = make_shared<OpDesc>("deque", "Deque");
ge::AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, true);
AiCpuTask aicpu_task;
aicpu_task.SetOpDesc(op_desc);
rtStream_t stream;
ASSERT_EQ(rtStreamCreate(&stream, 0), RT_ERROR_NONE);

ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS);
ASSERT_EQ(aicpu_task.LaunchKernel(stream), SUCCESS);

RTS_STUB_RETURN_VALUE(rtGetDevice, rtError_t, 0x78000001);
ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), FAILED);

RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001);
ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), FAILED);

RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001);
ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), FAILED);

RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE);
RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_SUPPORT + 1);
ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), FAILED);

RTS_STUB_RETURN_VALUE(rtGetDevice, rtError_t, 0x78000001);
ASSERT_EQ(aicpu_task.LaunchKernel(stream), FAILED);

ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS);
RTS_STUB_RETURN_VALUE(rtStreamWaitEvent, rtError_t, 0x78000001);
ASSERT_EQ(aicpu_task.LaunchKernel(stream), FAILED);

ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS);
RTS_STUB_RETURN_VALUE(rtEventReset, rtError_t, 0x78000001);
ASSERT_EQ(aicpu_task.LaunchKernel(stream), FAILED);

RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE);
RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT);
EXPECT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS);
RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE);
RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT);
EXPECT_EQ(aicpu_task.LaunchKernel(stream), SUCCESS);
}

+ 16
- 0
third_party/fwkacllib/inc/cce/fwk_adpt_struct.h View File

@@ -62,6 +62,7 @@ enum FWKTaskExtInfoType {
FWK_ADPT_EXT_SESSION_INFO,
FWK_ADPT_EXT_BITMAP,
FWK_ADPT_EXT_TOPIC_TYPE,
FWK_ADPT_EXT_ASYNCWAIT,
FWK_ADPT_EXT_INVALID
};

@@ -80,6 +81,12 @@ enum FWKExtUpdateAddrType {
FWK_ADPT_UPDATE_INPUT_OUTPUT
};

enum FWKExtWaitType {
FWK_ADPT_WAIT_TYPE_NULL = 0,
FWK_ADPT_WAIT_TYPE_EVENT,
FWK_ADPT_WAIT_TYPE_INVALID
};

#pragma pack(push, 1)
// API Parameter Structure
struct StrFWKKernel {
@@ -133,6 +140,15 @@ struct ResultSummary {
uint64_t raw_data_size; // size of raw data
};
#pragma pack(pop)

#pragma pack(push, 1)
struct AsyncWait {
uint8_t waitType; // wait type, FWK_ADPT_WAIT_TYPE_EVENT: event wait
uint32_t waitId; // wait id, GE refresh
uint32_t timeOut; // reserved
uint64_t reserved;
};
#pragma pack(pop)
} // end namespace FWKAdapter
} // namespace aicpu



+ 8
- 0
third_party/fwkacllib/inc/runtime/config.h View File

@@ -52,6 +52,14 @@ typedef enum tagRtAicpuScheType {
SCHEDULE_HARDWARE, /* HWTS Schedule */
} rtAicpuScheType;

typedef enum tagRtDeviceCapabilityType {
RT_SCHEDULE_SOFTWARE = 0, // SoftWare Schedule
RT_SCHEDULE_SOFTWARE_OPT,
RT_SCHEDULE_HARDWARE, // HWTS Schedule
RT_AICPU_BLOCKING_OP_NOT_SUPPORT,
RT_AICPU_BLOCKING_OP_SUPPORT, // 1910/1980/1951 ts support AICPU blocking operation
} rtDeviceCapabilityType;

typedef enum tagRtVersion {
VER_BEGIN = 0,
VER_NA = VER_BEGIN,


+ 12
- 0
third_party/fwkacllib/inc/runtime/dev.h View File

@@ -65,6 +65,7 @@ typedef enum tagRtFeatureType {

typedef enum tagRtDeviceFeatureType {
FEATURE_TYPE_SCHE,
FEATURE_TYPE_BLOCKING_OPERATOR,
FEATURE_TYPE_END,
} rtDeviceFeatureType_t;

@@ -78,6 +79,17 @@ typedef enum tagMemoryInfo {
MEMORY_INFO_RSV
} rtMemoryInfo_t;

typedef enum tagRtDeviceModuleType {
RT_MODULE_TYPE_SYSTEM = 0,
RT_MODULE_TYPE_AICPU,
RT_MODULE_TYPE_CCPU,
RT_MODULE_TYPE_DCPU,
RT_MODULE_TYPE_AICORE,
RT_MODULE_TYPE_TSCPU,
RT_MODULE_TYPE_PCIE,
RT_MODULE_TYPE_VECTOR_CORE
} tagRtDeviceModuleType_t;

/**
* @ingroup dvrt_dev
* @brief get total device number.


Loading…
Cancel
Save