Browse Source

!586 sync ge_dev to master 20220629

Merge pull request !586 from 唐豪杰/ge_dev
pull/591/MERGE
zhangfan Gitee 3 years ago
parent
commit
b6e592d1dc
No known key found for this signature in database GPG Key ID: 173E9B9CA92EEF8F
16 changed files with 60 additions and 61 deletions
  1. +8
    -7
      parser/caffe/caffe_parser.cc
  2. +8
    -9
      parser/caffe/caffe_parser.h
  3. +6
    -6
      parser/common/acl_graph_parser_util.cc
  4. +4
    -3
      parser/common/acl_graph_parser_util.h
  5. +1
    -1
      parser/common/model_saver.cc
  6. +20
    -20
      parser/common/parser_fp16_t.cc
  7. +0
    -2
      parser/common/pass.h
  8. +3
    -3
      parser/common/pre_checker.cc
  9. +1
    -1
      parser/common/proto_file_parser.cc
  10. +1
    -1
      parser/common/register_tbe.cc
  11. +1
    -1
      parser/common/register_tbe.h
  12. +1
    -1
      parser/onnx/subgraph_adapter/if_subgraph_adapter.cc
  13. +1
    -1
      parser/onnx/subgraph_adapter/if_subgraph_adapter.h
  14. +1
    -1
      parser/tensorflow/tensorflow_fusion_custom_parser_adapter.cc
  15. +1
    -1
      parser/tensorflow/tensorflow_fusion_custom_parser_adapter.h
  16. +3
    -3
      parser/tensorflow/tensorflow_parser.cc

+ 8
- 7
parser/caffe/caffe_parser.cc View File

@@ -74,6 +74,7 @@ using std::ifstream;

namespace {
const size_t kMaxErrStrLen = 128U;
std::map<std::vector<std::string>, std::vector<std::string>> params_share_map;
} // namespace

namespace ge {
@@ -282,7 +283,7 @@ Status CheckPathValid(const char *model_path, const string &custom_proto, string
const set<string> CaffeWeightsParser::skiped_layer_type_ = {"Split", "SoftmaxWithLoss", "Accuracy", "Data",
"Dropout", "MultiLabelLMDB", "Python", "AnnotatedData"};

Status CaffeModelParser::ParseInput(domi::caffe::NetParameter &proto_message, bool &input_data_flag) {
Status CaffeModelParser::ParseInput(domi::caffe::NetParameter &proto_message, bool &input_data_flag) const {
if (proto_message.input_size() > 0) {
GELOGI("This net exsit input.");

@@ -456,7 +457,7 @@ Status CaffeModelParser::CustomProtoParse(const char *model_path, const string &
return ret;
}

Status CaffeModelParser::ReadModelWithoutWarning(const char *model_path, google::protobuf::Message *message) {
Status CaffeModelParser::ReadModelWithoutWarning(const char *model_path, google::protobuf::Message *message) const {
int32_t copy_fd = mmDup(STDERR_FILENO);
if (copy_fd < 0) {
char_t err_buf[kMaxErrStrLen + 1U] = {};
@@ -536,7 +537,7 @@ Status CaffeModelParser::ReadCaffeModelFromText(const char *model_path, google::

Status CaffeModelParser::ParseLayerParameter(const google::protobuf::Descriptor *layer_descriptor,
const google::protobuf::Message *message,
vector<ge::Operator> &operators) {
vector<ge::Operator> &operators) const {
auto field_name = layer_descriptor->FindFieldByName(kFieldName);
CAFFE_CHECK_NULL_AND_REPROT_ERRORMSG(field_name, "Does not find name in google::protobuf::Descriptor");
auto field_type = layer_descriptor->FindFieldByName(kFieldType);
@@ -624,7 +625,7 @@ void CaffeModelParser::AddOutputInfoToContext(string layer_name, int32_t top_ind
ge::GetParserContext().user_out_nodes.push_back(std::make_pair(layer_name, top_index));
}

Status CaffeModelParser::ParseOutputNodeTopInfo(const domi::caffe::NetParameter &proto_message) {
Status CaffeModelParser::ParseOutputNodeTopInfo(const domi::caffe::NetParameter &proto_message) const {
if (ge::GetParserContext().user_out_tensors.empty()) {
return SUCCESS;
}
@@ -932,7 +933,7 @@ Status CaffeModelParser::AddTensorDescToOpDesc(ge::OpDescPtr &op_desc, const dom
}

Status CaffeModelParser::AddTensorDescToOpDescByIr(ge::OpDescPtr &op_desc, const domi::caffe::LayerParameter &layer,
const string &op_type) {
const string &op_type) const {
if (std::find(kAddTensorIrSkipNodes.begin(), kAddTensorIrSkipNodes.end(), op_type) != kAddTensorIrSkipNodes.end()) {
op_desc = ge::parser::MakeShared<ge::OpDesc>(layer.name(), op_type);
GE_CHECK_NOTNULL(op_desc);
@@ -1202,7 +1203,7 @@ std::string CaffeModelParser::RemapTopNameByLayer(const domi::caffe::LayerParame
return (top_name + "_" + layer.name() + "_" + std::to_string(index));
}

Status CaffeModelParser::PreCheck(const domi::caffe::NetParameter &net) {
Status CaffeModelParser::PreCheck(const domi::caffe::NetParameter &net) const {
// Add layer in the model to PreChecker and check the general parameters
PreChecker::Instance().SetModelName(net.name());
for (int i = 0; i < net.layer_size(); i++) {
@@ -1977,7 +1978,7 @@ Status CaffeWeightsParser::ParseLayerField(const google::protobuf::Reflection *r
}

Status CaffeWeightsParser::ConvertBlobsProto(const google::protobuf::Message *message,
google::protobuf::Message *blobs) {
google::protobuf::Message *blobs) const {
const google::protobuf::Reflection *blobs_reflection = message->GetReflection();
CAFFE_CHECK_NULL_AND_REPROT_ERRORMSG(blobs_reflection, "Get Reflection failed in google::protobuf::Message");
vector<const google::protobuf::FieldDescriptor *> field_desc;


+ 8
- 9
parser/caffe/caffe_parser.h View File

@@ -52,12 +52,11 @@ using std::string;
using std::unordered_map;
using std::vector;
using domi::Status;
static std::map<std::vector<std::string>, std::vector<std::string>> params_share_map;

class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser {
public:
CaffeModelParser() {}
virtual ~CaffeModelParser() override {}
~CaffeModelParser() override {}

/**
* @ingroup domi_omg
@@ -145,7 +144,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser {
* @return SUCCESS build successfully
* @return FAILED build failed
*/
Status PreCheck(const domi::caffe::NetParameter &net);
Status PreCheck(const domi::caffe::NetParameter &net) const;

/**
* @ingroup domi_omg
@@ -156,7 +155,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser {
* @return SUCCESS build successfully
* @return FAILED build failed
*/
Status ParseInput(domi::caffe::NetParameter &proto_message, bool &input_data_flag);
Status ParseInput(domi::caffe::NetParameter &proto_message, bool &input_data_flag) const;

/*
* @ingroup domi_omg
@@ -192,7 +191,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser {
* @return SUCCESS read file successfully
* @return FAILED read file failed
*/
Status ReadModelWithoutWarning(const char *model_path, google::protobuf::Message *message);
Status ReadModelWithoutWarning(const char *model_path, google::protobuf::Message *message) const;

/*
* @ingroup domi_omg
@@ -214,7 +213,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser {
* @return FAILED parse layer failed
*/
Status ParseLayerParameter(const google::protobuf::Descriptor *layer_descriptor,
const google::protobuf::Message *message, std::vector<ge::Operator> &operators);
const google::protobuf::Message *message, std::vector<ge::Operator> &operators) const;

/*
* @ingroup domi_omg
@@ -301,7 +300,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser {
Status AddTensorDescToOpDesc(ge::OpDescPtr &op_desc, const domi::caffe::LayerParameter &layer) const;

Status AddTensorDescToOpDescByIr(ge::OpDescPtr &op_desc, const domi::caffe::LayerParameter &layer,
const string &op_type);
const string &op_type) const;

Status AddUserOutNodesTop();

@@ -321,7 +320,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser {

void AddOutputInfoToContext(string layer_name, int32_t top_index) const;

Status ParseOutputNodeTopInfo(const domi::caffe::NetParameter &proto_message);
Status ParseOutputNodeTopInfo(const domi::caffe::NetParameter &proto_message) const;

Status SaveDataLayerTops(const domi::caffe::LayerParameter &layer);

@@ -405,7 +404,7 @@ class PARSER_FUNC_VISIBILITY CaffeWeightsParser : public domi::WeightsParser {
google::protobuf::Message *layer);

Status ConvertBlobsProto(const google::protobuf::Message *message,
google::protobuf::Message *blobs);
google::protobuf::Message *blobs) const;

Status ConvertBlobShapeProto(const google::protobuf::Message *message,
google::protobuf::Message *dest_message) const;


+ 6
- 6
parser/common/acl_graph_parser_util.cc View File

@@ -268,7 +268,7 @@ void AclGrphParseUtil::SetDefaultFormat() {
}
}

domi::Status AclGrphParseUtil::ParseAclOutputNodes(const string &out_nodes) {
domi::Status AclGrphParseUtil::ParseAclOutputNodes(const string &out_nodes) const {
try {
ge::GetParserContext().out_nodes_map.clear();
ge::GetParserContext().user_out_nodes.clear();
@@ -494,7 +494,7 @@ domi::Status AclGrphParseUtil::GetOutputLeaf(NodePtr node,
}

domi::Status AclGrphParseUtil::GetDefaultOutInfo(ge::ComputeGraphPtr &compute_graph,
std::vector<std::pair<ge::NodePtr, int32_t>> &output_nodes_info) {
std::vector<std::pair<ge::NodePtr, int32_t>> &output_nodes_info) const {
std::vector<std::pair<std::string, int32_t>> default_out_nodes = ge::GetParserContext().default_out_nodes;
if (!default_out_nodes.empty()) {
for (size_t i = 0; i < default_out_nodes.size(); ++i) {
@@ -589,7 +589,7 @@ domi::Status AclGrphParseUtil::CheckOptions(const std::map<AscendString, AscendS
}

string key_str = key_ascend;
auto it = ge::ir_option::ir_parser_suppported_options.find(key_str);
std::set<std::string>::const_iterator it = ge::ir_option::ir_parser_suppported_options.find(key_str);
if (it == ge::ir_option::ir_parser_suppported_options.end()) {
ErrorManager::GetInstance().ATCReportErrMessage("E10016", {"parameter", "opname"}, {"parser_params", key_str});
GELOGE(PARAM_INVALID, "[Check][Param] Input options include unsupported option(%s).Please check!", key_ascend);
@@ -654,7 +654,7 @@ domi::Status AclGrphParseUtil::ParseParamsBeforeGraph(const std::map<AscendStrin
}

domi::Status AclGrphParseUtil::ParseParamsAfterGraph(ge::Graph &graph,
const std::map<AscendString, AscendString> &parser_params) {
const std::map<AscendString, AscendString> &parser_params) const {
// support paragrams: input_fp16_nodes, is_input_adjust_hw_layout,
ComputeGraphPtr compute_graph = GraphUtils::GetComputeGraph(graph);
GE_CHECK_NOTNULL(compute_graph);
@@ -946,7 +946,7 @@ FMK_FUNC_HOST_VISIBILITY bool ValidateStr(const std::string &filePath, const std
regex_t reg;
int cflags = REG_EXTENDED | REG_NOSUB;
int ret = regcomp(&reg, mode.c_str(), cflags);
if (ret) {
if (ret != 0) {
regerror(ret, &reg, ebuff, kMaxBuffSize);
GELOGW("regcomp failed, reason: %s", ebuff);
regfree(&reg);
@@ -954,7 +954,7 @@ FMK_FUNC_HOST_VISIBILITY bool ValidateStr(const std::string &filePath, const std
}

ret = regexec(&reg, filePath.c_str(), 0, nullptr, 0);
if (ret) {
if (ret != 0) {
regerror(ret, &reg, ebuff, kMaxBuffSize);
GELOGE(ge::PARAM_INVALID, "[Invoke][RegExec] failed, reason: %s", ebuff);
regfree(&reg);


+ 4
- 3
parser/common/acl_graph_parser_util.h View File

@@ -44,7 +44,8 @@ class AclGrphParseUtil {
domi::Status SetOutputNodeInfo(ge::Graph &graph, const std::map<AscendString, AscendString> &parser_params);
domi::Status ParseParamsBeforeGraph(const std::map<AscendString, AscendString> &parser_params,
std::string &graph_name);
domi::Status ParseParamsAfterGraph(ge::Graph &graph, const std::map<AscendString, AscendString> &parser_params);
domi::Status ParseParamsAfterGraph(ge::Graph &graph, const std::map<AscendString,
AscendString> &parser_params) const;

private:
bool parser_initialized = false;
@@ -53,7 +54,7 @@ class AclGrphParseUtil {
void CreateOutputNodesInfo(std::vector<std::pair<ge::NodePtr, int32_t>> &output_nodes_info,
std::vector<std::string> &output_nodes_name) const;
static void SetDefaultFormat();
domi::Status ParseAclOutputNodes(const std::string &out_nodes);
domi::Status ParseAclOutputNodes(const std::string &out_nodes) const;
domi::Status ParseAclOutputFp16NodesFormat(const std::string &is_output_fp16) const;
domi::Status ParseAclEnableScope(const std::string &enable_scope_fusion_passes) const;
static void AddAttrsForInputNodes(const vector<string> &adjust_fp16_format_vec, const string &fp16_nodes_name,
@@ -61,7 +62,7 @@ class AclGrphParseUtil {
domi::Status ParseAclInputFp16Nodes(const ComputeGraphPtr &graph, const string &input_fp16_nodes,
const string &is_input_adjust_hw_layout) const;
domi::Status GetDefaultOutInfo(ge::ComputeGraphPtr &compute_graph,
std::vector<std::pair<ge::NodePtr, int32_t>> &output_nodes_info);
std::vector<std::pair<ge::NodePtr, int32_t>> &output_nodes_info) const;
};

namespace parser {


+ 1
- 1
parser/common/model_saver.cc View File

@@ -77,7 +77,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelSaver::SaveJsonToFi
const char *model_char = model_str.c_str();
uint32_t len = static_cast<uint32_t>(model_str.length());
// Write data to file
mmSsize_t mmpa_ret = mmWrite(fd, const_cast<void *>((const void *)model_char), len);
mmSsize_t mmpa_ret = mmWrite(fd, const_cast<void *>(static_cast<const void *>(model_char)), len);
if (mmpa_ret == EN_ERROR || mmpa_ret == EN_INVALID_PARAM) {
char_t err_buf[kMaxErrStrLen + 1U] = {};
const auto err_msg = mmGetErrorFormatMessage(mmGetErrorCode(), &err_buf[0], kMaxErrStrLen);


+ 20
- 20
parser/common/parser_fp16_t.cc View File

@@ -48,7 +48,7 @@ static bool IsRoundOne(uint64_t man, uint16_t trunc_len) {
uint64_t mask0 = 0x4;
uint64_t mask1 = 0x2;
uint64_t mask2;
uint16_t shift_out = static_cast<uint16_t>(trunc_len - kDim2);
uint16_t shift_out = static_cast<uint16_t>(trunc_len - static_cast<uint16_t>(kDim2));
mask0 = mask0 << shift_out;
mask1 = mask1 << shift_out;
mask2 = mask1 - 1;
@@ -89,7 +89,7 @@ static float Fp16ToFloat(const uint16_t &fp_val) {
int16_t hf_exp;
ExtractFp16(fp_val, hf_sign, hf_exp, hf_man);

while (hf_man && !(hf_man & kFp16ManHideBit)) {
while ((hf_man != 0U) && ((hf_man & kFp16ManHideBit) == 0U)) {
hf_man <<= 1;
hf_exp--;
}
@@ -120,7 +120,7 @@ static double Fp16ToDouble(const uint16_t &fp_val) {
int16_t hf_exp;
ExtractFp16(fp_val, hf_sign, hf_exp, hf_man);

while (hf_man && !(hf_man & kFp16ManHideBit)) {
while ((hf_man != 0U) && ((hf_man & kFp16ManHideBit) == 0U)) {
hf_man <<= 1;
hf_exp--;
}
@@ -128,7 +128,7 @@ static double Fp16ToDouble(const uint16_t &fp_val) {
uint64_t e_ret;
uint64_t m_ret;
uint64_t s_ret = hf_sign;
if (!hf_man) {
if (hf_man == 0U) {
e_ret = 0;
m_ret = 0;
} else {
@@ -256,7 +256,7 @@ static uint8_t Fp16ToUInt8(const uint16_t &fp_val) {
shift_out++;
}
}
if (!overflow_flag) {
if (overflow_flag == 0U) {
bool need_round = IsRoundOne(long_int_m, shift_out + kFp16ManLen);
m_ret = static_cast<uint8_t>((long_int_m >> (kFp16ManLen + shift_out)) & kBitLen8Max);
if (need_round && m_ret != kBitLen8Max) {
@@ -290,7 +290,7 @@ static uint16_t GetUint16ValByMan(uint16_t s_ret, const uint64_t &long_int_m, co
if (m_ret == 0) {
s_ret = 0;
}
return static_cast<uint16_t>((s_ret << kBitShift15) | (m_ret));
return static_cast<uint16_t>((s_ret << static_cast<uint16_t>(kBitShift15)) | (m_ret));
}

/// @ingroup fp16_t math conversion static method
@@ -431,7 +431,7 @@ static int32_t Fp16ToInt32(const uint16_t &fp_val) {
s_ret = 0;
}
// Generate final result
ret_v = (s_ret << kBitShift31) | (m_ret);
ret_v = (s_ret << static_cast<uint16_t>(kBitShift31)) | (m_ret);
}

return *(ge::PtrToPtr<uint32_t, uint32_t>(&ret_v));
@@ -565,7 +565,7 @@ static uint16_t Fp16Add(uint16_t v_1, uint16_t v_2) {
m_trunc = (m_b << (static_cast<uint16_t>(kBitShift32) - static_cast<uint16_t>(e_tmp)));
m_b = RightShift(m_b, e_tmp);
} else if (e_a < e_b) {
m_trunc = (m_a << (kBitShift32 - static_cast<uint16_t>(e_tmp)));
m_trunc = (m_a << (static_cast<uint16_t>(kBitShift32) - static_cast<uint16_t>(e_tmp)));
m_a = RightShift(m_a, e_tmp);
}
// calculate mantissav
@@ -603,7 +603,7 @@ static uint16_t Fp16Mul(uint16_t v_1, uint16_t v_2) {
m_a = m_a_tmp;
m_b = m_b_tmp;

e_ret = ((e_a + e_b) - kFp16ExpBias) - kDim10;
e_ret = ((e_a + e_b) - kFp16ExpBias) - static_cast<int16_t>(kDim10);
mul_m = m_a * m_b;
s_ret = s_a ^ s_b;

@@ -905,7 +905,7 @@ fp16_t &fp16_t::operator=(const float &f_val) {
fp16_t &fp16_t::operator=(const int8_t &i_val) {
uint16_t s_ret, e_ret, m_ret;

s_ret = static_cast<uint16_t>(((static_cast<uint8_t>(i_val)) & 0x80) >> kDim7);
s_ret = static_cast<uint16_t>(((static_cast<uint8_t>(i_val)) & 0x80) >> static_cast<uint8_t>(kDim7));
m_ret = static_cast<uint16_t>(((static_cast<uint8_t>(i_val)) & kInt8Max));

if (m_ret == 0) {
@@ -952,14 +952,14 @@ static void SetValByUint16Val(const uint16_t &input_val, const uint16_t &sign, u
uint16_t len = static_cast<uint16_t>(GetManBitLength(m_tmp));
if (static_cast<bool>(m_tmp)) {
int16_t e_ret;
if (len > kDim11) {
if (len > static_cast<uint16_t>(kDim11)) {
e_ret = kFp16ExpBias + kFp16ManLen;
uint16_t e_tmp = len - static_cast<uint16_t>(kDim11);
uint32_t trunc_mask = 1;
for (int i = 1; i < e_tmp; i++) {
trunc_mask = (trunc_mask << 1) + 1;
}
uint32_t m_trunc = (m_tmp & trunc_mask) << (kBitShift32 - e_tmp);
uint32_t m_trunc = (m_tmp & trunc_mask) << (static_cast<uint16_t>(kBitShift32) - e_tmp);
for (int i = 0; i < e_tmp; i++) {
m_tmp = (m_tmp >> 1);
e_ret = e_ret + 1;
@@ -991,7 +991,7 @@ fp16_t &fp16_t::operator=(const int16_t &i_val) {
val = 0;
} else {
uint16_t ui_val = *(ge::PtrToPtr<const int16_t, const int16_t>(&i_val));
auto s_ret = static_cast<uint16_t>(ui_val >> kBitShift15);
auto s_ret = static_cast<uint16_t>(ui_val >> static_cast<uint16_t>(kBitShift15));
if (static_cast<bool>(s_ret)) {
int16_t iValM = -i_val;
ui_val = *(ge::PtrToPtr<int16_t, uint16_t>(&iValM));
@@ -1018,7 +1018,7 @@ fp16_t &fp16_t::operator=(const uint16_t &ui_val) {
for (int i = 1; i < e_tmp; i++) {
trunc_mask = (trunc_mask << 1) + 1;
}
m_trunc = (m_ret & trunc_mask) << (kBitShift32 - e_tmp);
m_trunc = (m_ret & trunc_mask) << (static_cast<uint16_t>(kBitShift32) - e_tmp);
for (int i = 0; i < e_tmp; i++) {
m_ret = (m_ret >> 1);
e_ret = e_ret + 1;
@@ -1040,7 +1040,7 @@ fp16_t &fp16_t::operator=(const uint16_t &ui_val) {
}
} else {
e_ret = static_cast<int16_t>(kFp16ExpBias);
m_ret = m_ret << (kDim11 - len);
m_ret = m_ret << (static_cast<uint16_t>(kDim11) - len);
e_ret = e_ret + (len - 1);
}
val = FP16_CONSTRUCTOR(0u, static_cast<uint16_t>(e_ret), m_ret);
@@ -1062,7 +1062,7 @@ static void SetValByUint32Val(const uint32_t &input_val, const uint16_t &sign, u
for (int i = 1; i < e_tmp; i++) {
trunc_mask = (trunc_mask << 1) + 1;
}
m_trunc = (m_tmp & trunc_mask) << (kBitShift32 - e_tmp);
m_trunc = (m_tmp & trunc_mask) << (static_cast<uint16_t>(kBitShift32) - e_tmp);
for (int i = 0; i < e_tmp; i++) {
m_tmp = (m_tmp >> 1);
e_ret = e_ret + 1;
@@ -1085,7 +1085,7 @@ static void SetValByUint32Val(const uint32_t &input_val, const uint16_t &sign, u
}
} else {
e_ret = static_cast<int16_t>(kFp16ExpBias);
m_tmp = m_tmp << (kDim11 - len);
m_tmp = m_tmp << (static_cast<uint16_t>(kDim11) - len);
e_ret = e_ret + (len - 1);
}
auto m_ret = static_cast<uint16_t>(m_tmp);
@@ -1097,7 +1097,7 @@ fp16_t &fp16_t::operator=(const int32_t &i_val) {
val = 0;
} else {
uint32_t ui_val = *(ge::PtrToPtr<const int32_t, const uint32_t>(&i_val));
auto s_ret = static_cast<uint16_t>(ui_val >> kBitShift31);
auto s_ret = static_cast<uint16_t>(ui_val >> static_cast<uint16_t>(kBitShift31));
if (static_cast<bool>(s_ret)) {
int32_t iValM = -i_val;
ui_val = *(ge::PtrToPtr<int32_t, uint32_t>(&iValM));
@@ -1124,7 +1124,7 @@ fp16_t &fp16_t::operator=(const uint32_t &ui_val) {
for (int i = 1; i < e_tmp; i++) {
trunc_mask = (trunc_mask << 1) + 1;
}
m_trunc = (m_tmp & trunc_mask) << static_cast<uint32_t>(kBitShift32 - e_tmp);
m_trunc = (m_tmp & trunc_mask) << static_cast<uint32_t>(static_cast<uint16_t>(kBitShift32) - e_tmp);
for (uint16_t i = 0; i < e_tmp; i++) {
m_tmp = (m_tmp >> 1);
e_ret = e_ret + 1;
@@ -1147,7 +1147,7 @@ fp16_t &fp16_t::operator=(const uint32_t &ui_val) {
}
} else {
e_ret = static_cast<int16_t>(kFp16ExpBias);
m_tmp = m_tmp << (kDim11 - len);
m_tmp = m_tmp << (static_cast<uint16_t>(kDim11) - len);
e_ret = e_ret + (len - 1);
}
auto m_ret = static_cast<uint16_t>(m_tmp);


+ 0
- 2
parser/common/pass.h View File

@@ -19,8 +19,6 @@

#include <memory>

#include "common/fmk_error_codes.h"

namespace ge {
///
/// @ingroup domi_omg


+ 3
- 3
parser/common/pre_checker.cc View File

@@ -218,9 +218,9 @@ Status PreChecker::Save(const string &file) {

// Constructing JSON information of operators in order of network
for (auto id : ops_) {
auto iter = op_map_.find(id);
GE_CHK_BOOL_RET_STATUS(iter != op_map_.end(), FAILED, "[Check][Param] don't find this op.");
Info &info = iter->second;
std::map<OpId, Info>::const_iterator iter = op_map_.find(id);
GE_CHK_BOOL_RET_STATUS(iter != op_map_.cend(), FAILED, "[Check][Param] don't find this op.");
const Info &info = iter->second;

// Initialization operator general information
nlohmann::json op = {{kKeyOpName, info.name}, {kKeyOpType, info.type}};


+ 1
- 1
parser/common/proto_file_parser.cc View File

@@ -67,7 +67,7 @@ bool GetIdentifier(const std::string &line, int &identifier) {
break;
}
if (line[i] >= kMinNum && line[i] <= kMaxNum) {
identifier = identifier * kDecimalMulti + line[i] - kMinNum;
identifier = identifier * kDecimalMulti + static_cast<int>(line[i]) - static_cast<int>(kMinNum);
}
if (identifier > kMaxIdentifier || identifier < 0) {
return false;


+ 1
- 1
parser/common/register_tbe.cc View File

@@ -75,7 +75,7 @@ bool OpRegistrationTbe::Finalize(const OpRegistrationData &reg_data, bool is_tra
return ret;
}

bool OpRegistrationTbe::RegisterParser(const OpRegistrationData &reg_data) {
bool OpRegistrationTbe::RegisterParser(const OpRegistrationData &reg_data) const {
if (reg_data.GetFrameworkType() == domi::TENSORFLOW) {
std::shared_ptr<OpParserFactory> factory = OpParserFactory::Instance(domi::TENSORFLOW);
if (factory == nullptr) {


+ 1
- 1
parser/common/register_tbe.h View File

@@ -27,7 +27,7 @@ class OpRegistrationTbe {
bool Finalize(const OpRegistrationData &reg_data, bool is_train = false);

private:
bool RegisterParser(const OpRegistrationData &reg_data);
bool RegisterParser(const OpRegistrationData &reg_data) const;
};
} // namespace ge


+ 1
- 1
parser/onnx/subgraph_adapter/if_subgraph_adapter.cc View File

@@ -45,7 +45,7 @@ domi::Status IfSubgraphAdapter::AdaptAndFindAllSubgraphs(

domi::Status IfSubgraphAdapter::ParseIfNodeSubgraphs(
ge::onnx::NodeProto *parent_node, std::vector<ge::onnx::GraphProto *> &onnx_graphs,
std::map<std::string, ge::onnx::GraphProto *> &name_to_onnx_graph, const std::string &parent_graph_name) {
std::map<std::string, ge::onnx::GraphProto *> &name_to_onnx_graph, const std::string &parent_graph_name) const {
if (parent_node->attribute_size() != kIfNodeAttrSize) {
GELOGE(FAILED, "[Parse][Node] Invalid graph, if node attribute size:%d must be 2.", parent_node->attribute_size());
REPORT_INNER_ERROR("E19999", "Invalid graph, if node attribute size:%d must be 2.", parent_node->attribute_size());


+ 1
- 1
parser/onnx/subgraph_adapter/if_subgraph_adapter.h View File

@@ -32,7 +32,7 @@ class PARSER_FUNC_VISIBILITY IfSubgraphAdapter : public SubgraphAdapter {
private:
domi::Status ParseIfNodeSubgraphs(ge::onnx::NodeProto *parent_node, std::vector<ge::onnx::GraphProto *> &onnx_graphs,
std::map<std::string, ge::onnx::GraphProto *> &name_to_onnx_graph,
const std::string &parent_graph_name);
const std::string &parent_graph_name) const;
domi::Status GetSubgraphsAllInputs(ge::onnx::GraphProto &onnx_graph, std::set<std::string> &all_inputs) const;
void AddInputNodeForGraph(const std::set<std::string> &all_inputs, ge::onnx::GraphProto &onnx_graph) const;
void AddInputForParentNode(const std::set<std::string> &all_inputs, ge::onnx::NodeProto &parent_node) const;


+ 1
- 1
parser/tensorflow/tensorflow_fusion_custom_parser_adapter.cc View File

@@ -59,7 +59,7 @@ Status TensorFlowFusionCustomParserAdapter::ParseParams(const vector<const NodeD
}

Status TensorFlowFusionCustomParserAdapter::ParseParams(const std::vector<ge::Operator> &v_input_const,
ge::NodePtr &node) {
ge::NodePtr &node) const {
GE_CHECK_NOTNULL(node);
auto op_dest = node->GetOpDesc();
GE_CHECK_NOTNULL(op_dest);


+ 1
- 1
parser/tensorflow/tensorflow_fusion_custom_parser_adapter.h View File

@@ -42,7 +42,7 @@ class PARSER_FUNC_VISIBILITY TensorFlowFusionCustomParserAdapter : public Tensor
* @return FAILED parse failed
* @author
*/
Status ParseParams(const std::vector<ge::Operator> &v_input_const, ge::NodePtr &node);
Status ParseParams(const std::vector<ge::Operator> &v_input_const, ge::NodePtr &node) const;
};
} // namespace ge



+ 3
- 3
parser/tensorflow/tensorflow_parser.cc View File

@@ -3059,7 +3059,7 @@ Status TensorFlowModelParser::TrimGraphByInput(const domi::tensorflow::GraphDef
GE_CHECK_NOTNULL(current_node);
for (const string &input_name : current_node->input()) {
string input_node_name = NodeNameFromInput(input_name);
if (!delete_nodes.count(input_node_name)) {
if (delete_nodes.count(input_node_name) == 0U) {
next_inputs.insert(input_node_name);
}
}
@@ -3072,7 +3072,7 @@ Status TensorFlowModelParser::TrimGraphByInput(const domi::tensorflow::GraphDef
if (static_cast<bool>(input_nodes.count(node.name()))) {
*(filtered_graph_def.mutable_node()->Add()) = node;
}
if (!delete_nodes.count(node.name())) {
if (delete_nodes.count(node.name()) == 0U) {
*(filtered_graph_def.mutable_node()->Add()) = node;
}
}
@@ -3135,7 +3135,7 @@ Status TensorFlowModelParser::TrimGraphByOutput(const domi::tensorflow::GraphDef
GE_CHECK_NOTNULL(current_node);
for (const string &input_name : current_node->input()) {
string input_node_name = NodeNameFromInput(input_name);
if (!required_nodes.count(input_node_name)) {
if (required_nodes.count(input_node_name) == 0U) {
next_inputs.insert(input_node_name);
}
}


Loading…
Cancel
Save