Browse Source

!799 UI The strings in the i18n file are optimized and indentation problems

Merge pull request !799 from 夏易凡/0908master
tags/v1.1.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
41419e41d8
3 changed files with 18 additions and 21 deletions
  1. +0
    -1
      mindinsight/ui/src/components/benchmarkBarChart.vue
  2. +8
    -9
      mindinsight/ui/src/locales/en-us.json
  3. +10
    -11
      mindinsight/ui/src/locales/zh-cn.json

+ 0
- 1
mindinsight/ui/src/components/benchmarkBarChart.vue View File

@@ -187,7 +187,6 @@ export default {
series: [],
};
},
},
};
</script>


+ 8
- 9
mindinsight/ui/src/locales/en-us.json View File

@@ -509,7 +509,7 @@
"largeDataTip": "The requested data is too large. Try another dimension.",
"continueTo": "Continue to"
},
"explain": {
"explain": {
"explain": "Model Explanation",
"explainSummary": "Explanation List",
"explainSummaryCurrentFolder": "Root path of the explanation log:",
@@ -533,11 +533,11 @@
"FP": "FP: indicates false positive. The tag is a negative sample, and the classification is a positive sample;",
"TN": "TN: indicates true negative. The tag is a negative sample, and the classification is a negative sample;",
"mainTipTitle": "Function description:",
"mainTipPartOne":"This function visualizes the basis for model classification. After the to-be-explained model, the image, and the tag are selected, a contribution degree of each pixel in the original image to the selected tag is calculated by using an explanation method, and visualization is performed by using a saliency map similar to a heatmap. A brighter color indicates that the corresponding area contributes more to the selected tag of the model prediction. The darker the color, the smaller the contribution of the area to the selected tag.",
"mainTipPartTwo":"A saliency map helps you understand the features related to the specified tag during deep neural network inference. When the inference basis and expectation of a model are different, you can debug the model by referring to the saliency map so that the model can perform inference based on proper features.",
"mainTipPartThree":"For details about how to generate saliency maps, see section 3.2 'Local Methods' in",
"mainTipPartFour":"https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=9050829",
"noExplainer":"Select Explanation Method"
"mainTipPartOne": "This function visualizes the basis for model classification. After the to-be-explained model, the image, and the tag are selected, a contribution degree of each pixel in the original image to the selected tag is calculated by using an explanation method, and visualization is performed by using a saliency map similar to a heatmap. A brighter color indicates that the corresponding area contributes more to the selected tag of the model prediction. The darker the color, the smaller the contribution of the area to the selected tag.",
"mainTipPartTwo": "A saliency map helps you understand the features related to the specified tag during deep neural network inference. When the inference basis and expectation of a model are different, you can debug the model by referring to the saliency map so that the model can perform inference based on proper features.",
"mainTipPartThree": "For details about how to generate saliency maps, see section 3.2 'Local Methods' in",
"mainTipPartFour": "https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=9050829",
"noExplainer": "Select Explanation Method"
},
"metric": {
"scoreSystem": "Scoring System",
@@ -559,7 +559,7 @@
"weightError": "Weight calculation error",
"weightSum": "The sum of weights must be 1",
"radarChart": "Explanation Method Comparison Radar Chart",
"comprehensiveTooltip": "The comprehensive assessment provides configurable weights for different scoring dimensions and calculates the comprehensive scores to sort and filter the explanation methods. The explanation method has advantages and disadvantages in different scoring dimensions. You can refer to the description of each dimension, adjust the weight based on the actual requirements, and assess the explanation method with emphasis. By default, weights for each dimension are the same.",
"comprehensiveTooltip": "The page of comprehensive assessment provides configurable weights for different scoring dimensions and calculates the comprehensive scores to sort and filter the explanation methods. The explanation method has advantages and disadvantages in different scoring dimensions. You can refer to the description of each dimension, adjust the weight based on the actual requirements, and assess the explanation method with emphasis. By default, weights for each dimension are the same.",
"classifyTooltip": "Classification assessment groups datasets by tag and measures the explanation methods based on data with different tags.",
"scoreSystemtooltipOne": "Function description:",
"scoreSystemtooltiptwo": "The scoring system provides multiple dimensions for scoring explanation methods.",
@@ -593,7 +593,6 @@
"50545013": "The requested data is too large. Try another dimension.",
"50545014": "The queried tensor data has been replaced by new data. Please refresh.",
"50548001": "Ascend AI Processor information query timed out.",

"5054B080": "Incorrect parameter type. Please check the input parameter type.",
"5054B081": "Incorrect parameter value. Please check the input parameter.",
"5054B180": "Failed to create the watchpoint. Please stop training and try again.",
@@ -602,4 +601,4 @@
"5054B183": "Backend training is in progress or has ended. Please try again later",
"5054B184": "The operation is too fast, the backend service has been suspended."
}
}
}

+ 10
- 11
mindinsight/ui/src/locales/zh-cn.json View File

@@ -526,19 +526,19 @@
"forecastTag": "预测标签",
"tag": "标签",
"confidence": "概率",
"forecastTagTip": "当推理图片带有正确标签,标签行会显示下列四种旗标",
"forecastTagTip": "当推理图片带有正确标签,标签行会显示下列四种旗标",
"TP": "TP:代表Ture Positive,标签为正样本,分类为正样本;",
"FN": "FN:代表False Negative,标签为正样本,分类为负样本;",
"FP": "FP:代表Fasle Positive,标签为负样本,分类为正样本;",
"TN": "TN:代表Ture Negative,标签为负样本,分类为负样本;",
"mainTipTitle": "功能说明:",
"mainTipPartOne":"本功能对模型分类的依据进行可视化。选定待解释模型、图片和标签后,解释方法计算得到原始图像中每个像素对选定标签的贡献度,以类似热力图的显著图进行可视。显著图颜色越亮,表示对应区域对于模型预测选定标签的贡献越多;颜色越暗,该区域对选定标签的贡献越小。",
"mainTipPartTwo":"显著图可以帮助我们了解深度神经网络推理时,和指定标签有关系的特征。当模型推理依据和期望不同时,可以参考显著图对模型进行调试,让模型依据合理的特征进行推理。",
"mainTipPartThree":"主流生成显著图的解释方法可以参考论文3.2节Local Methods:",
"mainTipPartFour":"https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=9050829",
"noExplainer":"请选择解释方法"
},
"metric": {
"mainTipPartOne": "本功能对模型分类的依据进行可视化。选定待解释模型、图片和标签后,解释方法计算得到原始图像中每个像素对选定标签的贡献度,以类似热力图的显著图进行可视。显著图颜色越亮,表示对应区域对于模型预测选定标签的贡献越多;颜色越暗,该区域对选定标签的贡献越小。",
"mainTipPartTwo": "显著图可以帮助我们了解深度神经网络推理时,和指定标签有关系的特征。当模型推理依据和期望不同时,可以参考显著图对模型进行调试,让模型依据合理的特征进行推理。",
"mainTipPartThree": "主流生成显著图的解释方法可以参考论文3.2节Local Methods:",
"mainTipPartFour": "https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=9050829",
"noExplainer": "请选择解释方法"
},
"metric": {
"scoreSystem": "评分体系",
"comprehensive": "综合评估",
"classify": "分类评估",
@@ -558,7 +558,7 @@
"weightError": "权重计算出错",
"weightSum": "权重总和必须为1",
"radarChart": "解释方法对比雷达图",
"comprehensiveTooltip": "综合评估为不同评分维度提供可配置的权重,计算解释方法的综合得分,以便对解释方法进行排序筛选。解释方法在不同评分维度上各有优势,可以参考各个维度的介绍,根据实际需求对权重进行调整,有侧重地评估解释方法。本功能默认对各个维度设置相同的权重。",
"comprehensiveTooltip": "综合评估页面为不同评分维度提供可配置的权重,计算解释方法的综合得分,以便对解释方法进行排序筛选。解释方法在不同评分维度上各有优势,可以参考各个维度的介绍,根据实际需求对权重进行调整,有侧重地评估解释方法。本功能默认对各个维度设置相同的权重。",
"classifyTooltip": "分类评估将数据集按照标签进行分组,在不同标签的数据上分别度量解释方法。",
"scoreSystemtooltipOne": "功能说明:",
"scoreSystemtooltiptwo": "评分体系提供多个维度对解释方法进行评分。",
@@ -592,7 +592,6 @@
"50545013": "请求的数据过大,请使用其他维度重试。",
"50545014": "查询的张量数据已被新数据替换,请刷新。",
"50548001": "昇腾AI处理器信息查询超时",

"5054B080": "参数类型错误,请检查输入参数类型",
"5054B081": "参数值错误,请检查输入参数",
"5054B180": "监测点创建失败,请暂停训练后重试",
@@ -601,4 +600,4 @@
"5054B183": "后台训练运行中,请稍后重试",
"5054B184": "操作过快,后台服务已暂停。"
}
}
}

Loading…
Cancel
Save