Browse Source

fix sc

tags/v1.3.0
wjm 3 years ago
parent
commit
2abf8be621
4 changed files with 84 additions and 55 deletions
  1. +66
    -53
      ge/graph/preprocess/graph_preprocess.cc
  2. +2
    -1
      ge/graph/preprocess/graph_preprocess.h
  3. +1
    -1
      ge/ir_build/ge_ir_build.cc
  4. +15
    -0
      tests/ut/ge/graph/preprocess/graph_preprocess_unittest.cc

+ 66
- 53
ge/graph/preprocess/graph_preprocess.cc View File

@@ -1420,9 +1420,10 @@ Status GraphPrepare::AdjustDataOpOutput(const NodePtr &node) {
return SUCCESS;
}

Status GraphPrepare::CheckInternalFormat(const NodePtr &input_node, const GeTensorDesc &desc, bool tune_flag) {
Status GraphPrepare::CheckInternalFormat(const NodePtr &input_node, const GeTensorDesc &desc) {
auto format = desc.GetFormat();
auto origin_format = desc.GetOriginFormat();
auto tune_flag = (options_.build_mode == BUILD_MODE_TUNING) && (options_.build_step == BUILD_STEP_AFTER_BUILDER);
bool need_check_internal_format = (!IsTansDataOpData(input_node)) && (!options_.is_single_op) && (!tune_flag);
if (need_check_internal_format) {
bool is_internal = TypeUtils::IsInternalFormat(format) || TypeUtils::IsInternalFormat(origin_format);
@@ -1439,6 +1440,63 @@ Status GraphPrepare::CheckInternalFormat(const NodePtr &input_node, const GeTens
return SUCCESS;
}

Status GraphPrepare::UpdateDataInputOutputDesc(GeAttrValue::INT index, OpDescPtr &op, GeTensorDesc &desc) {
auto data_type = desc.GetDataType();
uint32_t length = 1;
bool type_ret = TypeUtils::GetDataTypeLength(data_type, length);
if (!type_ret) {
std::string reason = "Input datatype[" + TypeUtils::DataTypeToSerialString(data_type) + "] of index:" +
std::to_string(index) + " input tensor is not support";
REPORT_INPUT_ERROR("E19025", std::vector<std::string>({"reason"}), std::vector<std::string>({reason}));
GELOGE(PARAM_INVALID, "[Check][Param] Input datatype %s is not support.",
TypeUtils::DataTypeToSerialString(data_type).c_str());
return FAILED;
}
int64_t desc_shape = desc.GetShape().GetShapeSize();
FMK_INT64_UINT32_MULCHECK(desc_shape, length);
int64_t shape_size = desc_shape * length;
GE_IF_BOOL_EXEC(shape_size == 0 && desc.GetShape().GetDimNum() == 0, shape_size = static_cast<int64_t>(length));
int64_t size = 0;
GE_IF_BOOL_EXEC(ge::TensorUtils::GetSize(desc, size) != GRAPH_SUCCESS,
REPORT_CALL_ERROR("E19999", "Get size of user input tensor failed, index:%ld", index);
GELOGE(INTERNAL_ERROR, "[Get][Size] of user input tensor failed, index:%ld", index); return FAILED);
bool size_check = (size != 0 && shape_size != size);
if (size_check) {
std::string reason = "input tensor[index:" + std::to_string(index) + "]'s data size[" + std::to_string(size) +
"] != shape_size[" + std::to_string(size) + "], check invalid";
REPORT_INPUT_ERROR("E19025", std::vector<std::string>({"reason"}), std::vector<std::string>({reason}));
GELOGE(PARAM_INVALID, "[Check][Param] input data size = %ld, shape_size = %ld.", size, shape_size);
return FAILED;
}
ge::TensorUtils::SetSize(desc, shape_size);

auto tune_flag = (options_.build_mode == BUILD_MODE_TUNING) && (options_.build_step == BUILD_STEP_AFTER_BUILDER);
if (!tune_flag) {
graphStatus graph_ret = op->UpdateInputDesc(0, desc);
if (graph_ret != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Update input desc of op:%s(%s) failed, index:0",
op->GetName().c_str(), op->GetType().c_str());
GELOGE(graph_ret, "[Update][InputDesc] of op:%s(%s) failed, index:0",
op->GetName().c_str(), op->GetType().c_str());
return graph_ret;
}
// Size will be recalculated in the build stage
ge::TensorUtils::SetSize(desc, 0);
graph_ret = op->UpdateOutputDesc(0, desc);
if (graph_ret != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Update output desc of op:%s(%s) failed, index:0",
op->GetName().c_str(), op->GetType().c_str());
GELOGE(graph_ret, "[Update][OutputDesc] of op:%s(%s) failed, index:0",
op->GetName().c_str(), op->GetType().c_str());
return graph_ret;
}
} else {
GELOGI("data %s skip update info in tune mode", op->GetName().c_str());
}

return SUCCESS;
}

Status GraphPrepare::UpdateInput(const std::vector<GeTensor> &user_input,
const std::map<string, string> &graph_option) {
// Get shape range of input in dynamic_execute mode
@@ -1471,63 +1529,18 @@ Status GraphPrepare::UpdateInput(const std::vector<GeTensor> &user_input,
}
GeTensorDesc desc(user_input[index].GetTensorDesc());
// data maybe internal format [FRACTAL_NZ] at singleop process such as GEMM.
auto tune_flag = (options_.build_mode == BUILD_MODE_TUNING) && (options_.build_step == BUILD_STEP_AFTER_BUILDER);
ret = CheckInternalFormat(input_node, desc, tune_flag);
ret = CheckInternalFormat(input_node, desc);
if (ret != SUCCESS) {
GELOGE(INTERNAL_ERROR, "[Check][InternalFormat] on %s failed", op->GetName().c_str());
return ret;
}
auto data_type = desc.GetDataType();
uint32_t length = 1;
bool type_ret = TypeUtils::GetDataTypeLength(data_type, length);
if (!type_ret) {
std::string reason = "Input datatype[" + TypeUtils::DataTypeToSerialString(data_type) + "] of index:" +
std::to_string(index) + " input tensor is not support";
REPORT_INPUT_ERROR("E19025", std::vector<std::string>({"reason"}), std::vector<std::string>({reason}));
GELOGE(PARAM_INVALID, "[Check][Param] Input datatype %s is not support.",
TypeUtils::DataTypeToSerialString(data_type).c_str());
return FAILED;
}
int64_t desc_shape = desc.GetShape().GetShapeSize();
FMK_INT64_UINT32_MULCHECK(desc_shape, length);
int64_t shape_size = desc_shape * length;
GE_IF_BOOL_EXEC(shape_size == 0 && desc.GetShape().GetDimNum() == 0, shape_size = static_cast<int64_t>(length));
int64_t size = 0;
GE_IF_BOOL_EXEC(ge::TensorUtils::GetSize(desc, size) != GRAPH_SUCCESS,
REPORT_CALL_ERROR("E19999", "Get size of user input tensor failed, index:%ld", index);
GELOGE(INTERNAL_ERROR, "[Get][Size] of user input tensor failed, index:%ld", index);
return FAILED);
bool size_check = (size != 0 && shape_size != size);
if (size_check) {
std::string reason = "input tensor[index:" + std::to_string(index) + "]'s data size[" + std::to_string(size) +
"] != shape_size[" + std::to_string(size) + "], check invalid";
REPORT_INPUT_ERROR("E19025", std::vector<std::string>({"reason"}), std::vector<std::string>({reason}));
GELOGE(PARAM_INVALID, "[Check][Param] input data size = %ld, shape_size = %ld.", size, shape_size);
return FAILED;
}
ge::TensorUtils::SetSize(desc, shape_size);
if (!tune_flag) {
graphStatus graph_ret = op->UpdateInputDesc(0, desc);
if (graph_ret != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Update input desc of op:%s(%s) failed, index:0",
op->GetName().c_str(), op->GetType().c_str());
GELOGE(graph_ret, "[Update][InputDesc] of op:%s(%s) failed, index:0",
op->GetName().c_str(), op->GetType().c_str());
return graph_ret;
}
// Size will be recalculated in the build stage
ge::TensorUtils::SetSize(desc, 0);
graph_ret = op->UpdateOutputDesc(0, desc);
if (graph_ret != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Update output desc of op:%s(%s) failed, index:0",
op->GetName().c_str(), op->GetType().c_str());
GELOGE(graph_ret, "[Update][OutputDesc] of op:%s(%s) failed, index:0",
op->GetName().c_str(), op->GetType().c_str());
return graph_ret;
}
} else {
GELOGI("data %s skip update info in tune mode", op->GetName().c_str());

ret = UpdateDataInputOutputDesc(index, op, desc);
if (ret != SUCCESS) {
GELOGE(FAILED, "[Update][DataInputOutputDesc] on %s failed", op->GetName().c_str());
return ret;
}

if (!dynamic_shape_range_vec.empty()) {
ret = UpdateDynamicInputShapeRange(index, dynamic_shape_range_vec, op, desc);
GE_CHK_STATUS_RET(ret, "[Update][DynamicInputShapeRange] on %s failed.", op->GetName().c_str());


+ 2
- 1
ge/graph/preprocess/graph_preprocess.h View File

@@ -63,7 +63,8 @@ class GraphPrepare {
Status CheckRefOp();
Status SetRtContext(rtContext_t rt_context, rtCtxMode_t mode);
Status AdjustDataOpOutput(const NodePtr &node);
Status CheckInternalFormat(const NodePtr &input_node, const GeTensorDesc &desc, bool tune_flag);
Status CheckInternalFormat(const NodePtr &input_node, const GeTensorDesc &desc);
Status UpdateDataInputOutputDesc(GeAttrValue::INT index, OpDescPtr &op, GeTensorDesc &desc);
Status UpdateInput(const std::vector<GeTensor> &user_input, const std::map<string, string> &graph_option);
Status CheckAndUpdateInput(const std::vector<GeTensor> &user_input, const std::map<string, string> &graph_option);
Status CheckConstOp();


+ 1
- 1
ge/ir_build/ge_ir_build.cc View File

@@ -559,8 +559,8 @@ graphStatus Impl::Init(const Graph &graph, const std::map<std::string, std::stri
std::string output_type = GetParam(ge::ir_option::OUTPUT_TYPE);
GE_CHK_BOOL_EXEC(ge::CheckOutputTypeParamValid(output_type) == ge::SUCCESS,
return ge::GRAPH_PARAM_INVALID, "[Check][OutputType] failed!");
// check insert_op_conf

// check insert_op_conf
std::string insert_op_conf = GetParam(ge::ir_option::INSERT_OP_FILE);
GE_CHK_BOOL_EXEC(ge::CheckInsertOpConfParamValid(std::string(insert_op_conf)) == ge::SUCCESS,
return ge::GRAPH_PARAM_INVALID, "[Check][InsertOpConf] failed!");


+ 15
- 0
tests/ut/ge/graph/preprocess/graph_preprocess_unittest.cc View File

@@ -179,6 +179,21 @@ TEST_F(UtestGraphPreproces, test_dynamic_input_shape_parse) {
EXPECT_EQ(intput2_result_shape_range.size(), 0);
}

TEST_F(UtestGraphPreproces, test_update_input_fail) {
ge::GraphPrepare graph_prepare;
graph_prepare.compute_graph_ = BuildGraph1();

ge::GeTensorDesc tensor1;
tensor1.SetFormat(ge::FORMAT_NCHW);
tensor1.SetShape(ge::GeShape({3, 12, 5, 5}));
tensor1.SetDataType(ge::DT_UNDEFINED);
GeTensor input1(tensor1);
std::vector<GeTensor> user_input = {input1};
std::map<string,string> graph_option;
auto ret = graph_prepare.UpdateInput(user_input, graph_option);
EXPECT_EQ(ret, ge::FAILED);
}

TEST_F(UtestGraphPreproces, test_check_user_input) {
ge::GraphPrepare graph_prepare;
graph_prepare.compute_graph_ = BuildGraph1();


Loading…
Cancel
Save