Browse Source

!2045 回退 'Pull Request !2028 : Fix bug of single_op.'

Merge pull request !2045 from 王涛/revert-merge-2028-master
tags/v1.5.1
计晨 Gitee 3 years ago
parent
commit
d2c553f95d
5 changed files with 7 additions and 53 deletions
  1. +1
    -3
      ge/single_op/single_op.cc
  2. +3
    -22
      ge/single_op/task/op_task.cc
  3. +2
    -3
      ge/single_op/task/op_task.h
  4. +1
    -1
      ge/single_op/task/tbe_task_builder.cc
  5. +0
    -24
      tests/ut/ge/single_op/single_op_task_unittest.cc

+ 1
- 3
ge/single_op/single_op.cc View File

@@ -433,13 +433,11 @@ Status DynamicSingleOp::ExecuteAsync(const vector<GeTensorDesc> &input_desc,
if (!inputs_size.empty()) { if (!inputs_size.empty()) {
StreamResource *stream_resource = SingleOpManager::GetInstance().GetResource(resource_id_, stream_); StreamResource *stream_resource = SingleOpManager::GetInstance().GetResource(resource_id_, stream_);
GE_CHK_STATUS_RET_NOLOG(UpdateInputsBufferAddr(stream_resource, stream_, inputs_size, update_buffers)); GE_CHK_STATUS_RET_NOLOG(UpdateInputsBufferAddr(stream_resource, stream_, inputs_size, update_buffers));
GE_CHK_STATUS_RET_NOLOG(SetHostTensorValue(input_desc, input_buffers));
} }


if (hybrid_model_executor_ != nullptr) { if (hybrid_model_executor_ != nullptr) {
GELOGD("Execute multi-task dynamic single op by hybrid model executor"); GELOGD("Execute multi-task dynamic single op by hybrid model executor");
if (!inputs_size.empty()) {
GE_CHK_STATUS_RET_NOLOG(SetHostTensorValue(input_desc, input_buffers));
}
hybrid::HybridModelExecutor::ExecuteArgs args; hybrid::HybridModelExecutor::ExecuteArgs args;
GE_CHK_STATUS_RET_NOLOG(InitHybridModelArgs(update_buffers, output_buffers, input_desc, args)); GE_CHK_STATUS_RET_NOLOG(InitHybridModelArgs(update_buffers, output_buffers, input_desc, args));




+ 3
- 22
ge/single_op/task/op_task.cc View File

@@ -293,9 +293,6 @@ Status TbeOpTask::UpdateNodeByShape(const vector<GeTensorDesc> &input_desc, cons
} }


Status TbeOpTask::EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size) { Status TbeOpTask::EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size) {
node_ = node;
tiling_buffer_ = tiling_buffer;
max_tiling_size_ = max_tiling_size;
if (tiling_buffer != nullptr) { if (tiling_buffer != nullptr) {
uintptr_t *arg_base = nullptr; uintptr_t *arg_base = nullptr;
size_t arg_num = 0; size_t arg_num = 0;
@@ -313,6 +310,9 @@ Status TbeOpTask::EnableDynamicSupport(const NodePtr &node, void *tiling_buffer,
} }
arg_base[tiling_index] = reinterpret_cast<uintptr_t>(tiling_buffer); arg_base[tiling_index] = reinterpret_cast<uintptr_t>(tiling_buffer);
} }
node_ = node;
tiling_buffer_ = tiling_buffer;
max_tiling_size_ = max_tiling_size;
return SUCCESS; return SUCCESS;
} }


@@ -481,25 +481,6 @@ void TbeOpTask::GetIoAddr(uintptr_t *&arg_base, size_t &arg_count) {
} }
} }


Status AtomicAddrCleanOpTask::EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size) {
node_ = node;
tiling_buffer_ = tiling_buffer;
max_tiling_size_ = max_tiling_size;
if (tiling_buffer != nullptr) {
uintptr_t *arg_base = nullptr;
size_t arg_num = 0;
GetIoAddr(arg_base, arg_num);
uint32_t tiling_index = atomic_output_indices_.size();
if (arg_num == 0 || arg_num < tiling_index) {
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "[Check][Size]Tiling index %u, arg number %zu is invalid.",
tiling_index, arg_num);
return ACL_ERROR_GE_INTERNAL_ERROR;
}
arg_base[tiling_index] = reinterpret_cast<uintptr_t>(tiling_buffer);
}
return SUCCESS;
}

Status AtomicAddrCleanOpTask::UpdateNodeByShape(const vector<GeTensorDesc> &input_desc, Status AtomicAddrCleanOpTask::UpdateNodeByShape(const vector<GeTensorDesc> &input_desc,
const vector<GeTensorDesc> &output_desc) { const vector<GeTensorDesc> &output_desc) {
return SUCCESS; return SUCCESS;


+ 2
- 3
ge/single_op/task/op_task.h View File

@@ -97,7 +97,7 @@ class TbeOpTask : public OpTask {
const void *GetArgs() const; const void *GetArgs() const;
size_t GetArgSize() const; size_t GetArgSize() const;
const std::string &GetStubName() const; const std::string &GetStubName() const;
virtual Status EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size);
Status EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size);
const std::string &GetTaskType() const override; const std::string &GetTaskType() const override;
void SetHandle(void *handle); void SetHandle(void *handle);


@@ -149,7 +149,6 @@ class TbeOpTask : public OpTask {
class AtomicAddrCleanOpTask : public TbeOpTask { class AtomicAddrCleanOpTask : public TbeOpTask {
public: public:
Status InitAtomicAddrCleanIndices(); Status InitAtomicAddrCleanIndices();
Status EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size) override;


private: private:
Status UpdateNodeByShape(const vector<GeTensorDesc> &input_desc, Status UpdateNodeByShape(const vector<GeTensorDesc> &input_desc,
@@ -157,8 +156,8 @@ class AtomicAddrCleanOpTask : public TbeOpTask {
Status UpdateIoAddr(const vector<DataBuffer> &inputs, const vector<DataBuffer> &outputs) override; Status UpdateIoAddr(const vector<DataBuffer> &inputs, const vector<DataBuffer> &outputs) override;
Status UpdateTilingArgs(rtStream_t stream) override; Status UpdateTilingArgs(rtStream_t stream) override;
Status CalcTilingInfo(optiling::utils::OpRunInfo &run_info) override; Status CalcTilingInfo(optiling::utils::OpRunInfo &run_info) override;

std::vector<int> atomic_output_indices_; std::vector<int> atomic_output_indices_;

}; };


class AiCpuBaseTask : public OpTask { class AiCpuBaseTask : public OpTask {


+ 1
- 1
ge/single_op/task/tbe_task_builder.cc View File

@@ -425,7 +425,7 @@ Status TbeTaskBuilder::InitTilingInfo(TbeOpTask &task) {
GELOGD("[%s] Done allocating tiling buffer, size=%ld.", op_desc_->GetName().c_str(), max_size); GELOGD("[%s] Done allocating tiling buffer, size=%ld.", op_desc_->GetName().c_str(), max_size);
} }


GE_CHK_STATUS_RET_NOLOG(task.EnableDynamicSupport(node_, tiling_buffer, static_cast<uint32_t>(max_size)));
task.EnableDynamicSupport(node_, tiling_buffer, static_cast<uint32_t>(max_size));
return SUCCESS; return SUCCESS;
} }




+ 0
- 24
tests/ut/ge/single_op/single_op_task_unittest.cc View File

@@ -237,27 +237,3 @@ TEST_F(UtestSingleOpTask, test_aicpu_task_update_io_addr) {
ASSERT_EQ(ret, PARAM_INVALID); ASSERT_EQ(ret, PARAM_INVALID);
} }
} }

TEST_F(UtestSingleOpTask, test_dynamic_support) {
auto graph = make_shared<ComputeGraph>("graph");
auto op_desc = make_shared<OpDesc>("Add", "Add");
auto node = graph->AddNode(op_desc);
AtomicAddrCleanOpTask atomic_task;
TbeOpTask tbe_task;

tbe_task.arg_size_ = sizeof(void *) * 1;
tbe_task.args_.reset(new (std::nothrow) uint8_t[tbe_task.arg_size_]);
atomic_task.arg_size_ = sizeof(void *) * 1;
atomic_task.args_.reset(new (std::nothrow) uint8_t[atomic_task.arg_size_]);
ASSERT_EQ(tbe_task.EnableDynamicSupport(node, (void *)0x0001, 1), ACL_ERROR_GE_INTERNAL_ERROR);
ASSERT_EQ(atomic_task.EnableDynamicSupport(node, (void *)0x0001, 1), ACL_ERROR_GE_INTERNAL_ERROR);

tbe_task.arg_size_ = sizeof(void *) * 2;
tbe_task.args_.reset(new (std::nothrow) uint8_t[tbe_task.arg_size_]);
atomic_task.arg_size_ = sizeof(void *) * 2;
atomic_task.args_.reset(new (std::nothrow) uint8_t[atomic_task.arg_size_]);
ASSERT_EQ(tbe_task.EnableDynamicSupport(node, (void *)0x0001, 1), SUCCESS);
ASSERT_EQ(atomic_task.EnableDynamicSupport(node, (void *)0x0001, 1), SUCCESS);
tbe_task.tiling_buffer_ = nullptr;
atomic_task.tiling_buffer_ = nullptr;
}

Loading…
Cancel
Save