diff --git a/mindinsight/ui/src/assets/images/explain-fn.svg b/mindinsight/ui/src/assets/images/explain-fn.svg new file mode 100644 index 00000000..53915620 --- /dev/null +++ b/mindinsight/ui/src/assets/images/explain-fn.svg @@ -0,0 +1,35 @@ + + + + fn + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + FN + + + + \ No newline at end of file diff --git a/mindinsight/ui/src/assets/images/explain-forecast.svg b/mindinsight/ui/src/assets/images/explain-forecast.svg new file mode 100644 index 00000000..9c63188b --- /dev/null +++ b/mindinsight/ui/src/assets/images/explain-forecast.svg @@ -0,0 +1,33 @@ + + + + yuce + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/mindinsight/ui/src/assets/images/explain-fp.svg b/mindinsight/ui/src/assets/images/explain-fp.svg new file mode 100644 index 00000000..57ae612d --- /dev/null +++ b/mindinsight/ui/src/assets/images/explain-fp.svg @@ -0,0 +1,35 @@ + + + + fp + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + FP + + + + \ No newline at end of file diff --git a/mindinsight/ui/src/assets/images/explain-reality.svg b/mindinsight/ui/src/assets/images/explain-reality.svg new file mode 100644 index 00000000..54434120 --- /dev/null +++ b/mindinsight/ui/src/assets/images/explain-reality.svg @@ -0,0 +1,33 @@ + + + + 真实 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/mindinsight/ui/src/assets/images/explain-tn.svg b/mindinsight/ui/src/assets/images/explain-tn.svg new file mode 100644 index 00000000..36dc439a --- /dev/null +++ b/mindinsight/ui/src/assets/images/explain-tn.svg @@ -0,0 +1,35 @@ + + + + tn + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + TN + + + + \ No newline at end of file diff --git a/mindinsight/ui/src/assets/images/explain-tp.svg b/mindinsight/ui/src/assets/images/explain-tp.svg new file mode 100644 index 00000000..88fb2055 --- /dev/null +++ b/mindinsight/ui/src/assets/images/explain-tp.svg @@ -0,0 +1,35 @@ + + + + tp + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + TP + + + + \ No newline at end of file diff --git a/mindinsight/ui/src/components/benchmarkBarChart.vue b/mindinsight/ui/src/components/benchmarkBarChart.vue new file mode 100644 index 00000000..b4fc6fd7 --- /dev/null +++ b/mindinsight/ui/src/components/benchmarkBarChart.vue @@ -0,0 +1,200 @@ + + + + + + diff --git a/mindinsight/ui/src/components/header.vue b/mindinsight/ui/src/components/header.vue index a7acd253..3f86bc03 100644 --- a/mindinsight/ui/src/components/header.vue +++ b/mindinsight/ui/src/components/header.vue @@ -30,6 +30,7 @@ limitations under the License. mode="horizontal"> {{$t("summaryManage.summaryList")}} {{$t("debugger.debugger")}} + {{$t("explain.explain")}} @@ -189,8 +190,12 @@ export default { // get active menu item getActive() { const str = this.$route.path.split('/'); - if (str.length > 1 && str[1] === 'debugger') { - return this.$route.path; + if (str.length > 1) { + if (str[1] === 'debugger') { + return this.$route.path; + } else if (str[1] === 'explain') { + return `/${str[1]}`; + } } return '/summary-manage'; }, diff --git a/mindinsight/ui/src/components/superposeImg.vue b/mindinsight/ui/src/components/superposeImg.vue new file mode 100644 index 00000000..27d18c39 --- /dev/null +++ b/mindinsight/ui/src/components/superposeImg.vue @@ -0,0 +1,117 @@ + + + + diff --git a/mindinsight/ui/src/locales/en-us.json b/mindinsight/ui/src/locales/en-us.json index 7e1acbda..4cd0c717 100644 --- a/mindinsight/ui/src/locales/en-us.json +++ b/mindinsight/ui/src/locales/en-us.json @@ -494,7 +494,7 @@ "curValue": "Current Value", "compareToPre": "Compare with Previous Step", "stepTip": "(The value 0 indicates the initial training state.)", - "toSummeryList": "Go to Training List", + "toSummeryList": "Go to Summary List", "clientIp": "Client IP", "deviceId": "Device ID", "currentStep": "Step", @@ -508,6 +508,64 @@ "value": "Value", "largeDataTip": "The requested data is too large. Try another dimension.", "continueTo": "Continue to" + }, + "explain": { + "explain": "Model Explanation", + "explainSummary": "Explanation List", + "explainSummaryCurrentFolder": "Root path of the explanation log:", + "summaryPath": "Explanation Log Path", + "title": "Saliency Map Visualization", + "explainMethod": "Explanation Methods", + "viewScore": "View Score", + "fetch": "Filter", + "minConfidence": "Probability Threshold", + "imgSort": "Sort Images By", + "default": "Default", + "byProbability": "Probabilities in descending order", + "superposeImg": "Overlay on Original Image", + "originalPicture": "Original Image", + "forecastTag": "Prediction Tag", + "tag": "Tag", + "confidence": "Probability", + "forecastTagTip": "When the inference image has the correct tag, the following four flags are displayed in the tag row", + "TP": "TP: indicates true positive. The tag is a positive sample, and the classification is a positive sample;", + "FN": "FN: indicates false negative. The tag is a positive sample, and the classification is a negative sample;", + "FP": "FP: indicates false positive. The tag is a negative sample, and the classification is a positive sample;", + "TN": "TN: indicates true negative. The tag is a negative sample, and the classification is a negative sample;", + "mainTipTitle": "Function description:", + "mainTipPartOne":"This function visualizes the basis for model classification. After the to-be-explained model, the image, and the tag are selected, a contribution degree of each pixel in the original image to the selected tag is calculated by using an explanation method, and visualization is performed by using a saliency map similar to a heatmap. A brighter color indicates that the corresponding area contributes more to the selected tag of the model prediction. The darker the color, the smaller the contribution of the area to the selected tag.", + "mainTipPartTwo":"A saliency map helps you understand the features related to the specified tag during deep neural network inference. When the inference basis and expectation of a model are different, you can debug the model by referring to the saliency map so that the model can perform inference based on proper features.", + "mainTipPartThree":"For details about how to generate saliency maps, see section 3.2 'Local Methods' in", + "mainTipPartFour":"https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=9050829", + "noExplainer":"Select Explanation Method" + }, + "metric": { + "scoreSystem": "Scoring System", + "comprehensive": "Comprehensive Assessment", + "classify": "Classification Assessment", + "singleMethod": "Single method and multiple indicators", + "multiMethod": "Single indicator and multiple methods", + "interpretation": "Explanation", + "measurement": "Measurement", + "seeInterpretation": "You are viewing scores of the explanation method ", + "seeMeasurement": "You are viewing scores of the measurement method", + "showGrade": "of different tag types", + "evaluationScore": "Assessment Score", + "weightAllocatgion": "Weight Configuration", + "compositeScore": "Comprehensive Score", + "metric": "Dimension", + "inputWeightScore": "Enter a weight score between 0 and 1", + "weightSumNotNull": "The sum of weights cannot be empty", + "weightError": "Weight calculation error", + "weightSum": "The sum of weights must be 1", + "radarChart": "Explanation Method Comparison Radar Chart", + "comprehensiveTooltip": "The comprehensive assessment provides configurable weights for different scoring dimensions and calculates the comprehensive scores to sort and filter the explanation methods. The explanation method has advantages and disadvantages in different scoring dimensions. You can refer to the description of each dimension, adjust the weight based on the actual requirements, and assess the explanation method with emphasis. By default, weights for each dimension are the same.", + "classifyTooltip": "Classification assessment groups datasets by tag and measures the explanation methods based on data with different tags.", + "scoreSystemtooltipOne": "Function description:", + "scoreSystemtooltiptwo": "The scoring system provides multiple dimensions for scoring explanation methods.", + "scoreSystemtooltipthree": "Scoring dimensions:", + "scoreSystemtooltipfour": " - Faithfulness: evaluates the faithfulness of the explanation result to the model, that is, whether the saliency map correctly reflects the model classification basis. Modify the corresponding positions in the original image according to a certain rule (for example, from bright to dark) and transfer the modified image to the model for inference in sequence. If the brighter the area of the saliency map is, the greater the impact on the selected tag is, the higher the faithfulness of the interpretation result is. Conversely, the explanation result may not reflect the inference basis of the model. Currently, there are three mainstream solutions: naive faithfulness, insertion AUC, and deletion AUC.", + "scoreSystemtooltipfive": " - Localization: uses the bounding box of the dataset to assess the positioning accuracy of the highlighted area in the saliency map. Select an objective and related tag. If the explanation method accurately highlights the area related to the selected tag in the image, the highlighted area should overlap the bounding box of the selected objective. Localization assesses the location capability of the explanation result by assessing the overlapped area of the highlighted area and bounding box." }, "error": { "50540000": "System error.", diff --git a/mindinsight/ui/src/locales/zh-cn.json b/mindinsight/ui/src/locales/zh-cn.json index df7fcf8e..dc21d8d3 100644 --- a/mindinsight/ui/src/locales/zh-cn.json +++ b/mindinsight/ui/src/locales/zh-cn.json @@ -508,6 +508,64 @@ "largeDataTip": "请求的数据过大,请使用其他维度重试。", "continueTo": "运行到该节点" }, + "explain": { + "explain": "模型解释", + "explainSummary": "解释列表", + "explainSummaryCurrentFolder": "解释日志根路径:", + "summaryPath": "解释日志路径", + "title": "显著图可视化", + "explainMethod": "解释方法", + "viewScore": "查看评分", + "fetch": "筛选", + "minConfidence": "概率阈值", + "imgSort": "图片排序", + "default": "默认", + "byProbability": "概率值降序", + "superposeImg": "叠加于原图", + "originalPicture": "原始图片", + "forecastTag": "预测标签", + "tag": "标签", + "confidence": "概率", + "forecastTagTip": "当推理图片带有正确标签,标签行会显示下列四种旗标", + "TP": "TP:代表Ture Positive,标签为正样本,分类为正样本;", + "FN": "FN:代表False Negative,标签为正样本,分类为负样本;", + "FP": "FP:代表Fasle Positive,标签为负样本,分类为正样本;", + "TN": "TN:代表Ture Negative,标签为负样本,分类为负样本;", + "mainTipTitle": "功能说明:", + "mainTipPartOne":"本功能对模型分类的依据进行可视化。选定待解释模型、图片和标签后,解释方法计算得到原始图像中每个像素对选定标签的贡献度,以类似热力图的显著图进行可视。显著图颜色越亮,表示对应区域对于模型预测选定标签的贡献越多;颜色越暗,该区域对选定标签的贡献越小。", + "mainTipPartTwo":"显著图可以帮助我们了解深度神经网络推理时,和指定标签有关系的特征。当模型推理依据和期望不同时,可以参考显著图对模型进行调试,让模型依据合理的特征进行推理。", + "mainTipPartThree":"主流生成显著图的解释方法可以参考论文3.2节Local Methods:", + "mainTipPartFour":"https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=9050829", + "noExplainer":"请选择解释方法" + }, + "metric": { + "scoreSystem": "评分体系", + "comprehensive": "综合评估", + "classify": "分类评估", + "singleMethod": "单方法多指标", + "multiMethod": "单指标多方法", + "interpretation": "解释方法", + "measurement": "度量方法", + "seeInterpretation": "您正在查看解释方法", + "seeMeasurement": "您正在查看度量方法", + "showGrade": "在不同标签类型上的分数", + "evaluationScore": "评价分数", + "weightAllocatgion": "权重配置", + "compositeScore": "综合得分", + "metric": "评分维度", + "inputWeightScore": "请输入权重分数(0-1)", + "weightSumNotNull": "权重总和不能为空", + "weightError": "权重计算出错", + "weightSum": "权重总和必须为1", + "radarChart": "解释方法对比雷达图", + "comprehensiveTooltip": "综合评估为不同评分维度提供可配置的权重,计算解释方法的综合得分,以便对解释方法进行排序筛选。解释方法在不同评分维度上各有优势,可以参考各个维度的介绍,根据实际需求对权重进行调整,有侧重地评估解释方法。本功能默认对各个维度设置相同的权重。", + "classifyTooltip": "分类评估将数据集按照标签进行分组,在不同标签的数据上分别度量解释方法。", + "scoreSystemtooltipOne": "功能说明:", + "scoreSystemtooltiptwo": "评分体系提供多个维度对解释方法进行评分。", + "scoreSystemtooltipthree": "评分维度:", + "scoreSystemtooltipfour": " -Faithfulness: 该维度评估解释结果对模型的忠实度,即显著图是否正确反映模型分类依据。给定显著图,按照一定规则(如从亮到暗)对原始图像中的相应位置进行修改,将修改后的图像传给模型依次进行推理,如果显著图越亮的区域对选定标签影响越大,则解释结果忠实度越高;反之,解释结果可能无法反映模型的推理依据。当前有三种主流方案度量解释的faithfulness:Naive Faithfulness,Insertion AUC,Deletion AUC。", + "scoreSystemtooltipfive": " -Localization: 该维度借助数据集的bounding box对显著图高亮区域的定位准确性进行评估,选定一组目标和相关标签,如果解释方法准确高亮了图像中和选定标签相关的区域,高亮区域和选定目标的bounding box应当有较大重合度。Localization通过评估高亮区域和bounding box的重合度,对解释结果的定位能力进行评估。" + }, "error": { "50540000": "系统错误", "50540001": "参数类型错误,请检查请求参数类型是否都符合要求", diff --git a/mindinsight/ui/src/router.js b/mindinsight/ui/src/router.js index 57a8655a..e8feee76 100644 --- a/mindinsight/ui/src/router.js +++ b/mindinsight/ui/src/router.js @@ -125,5 +125,18 @@ export default new Router({ path: '/debugger', component: () => import('./views/debugger/debugger.vue'), }, + { + path: '/explain', + component: () => import('./views/explain/summary-list.vue'), + }, + { + path: '/explain/saliency-map', + component: () => + import('./views/explain/saliency-map.vue'), + }, + { + path: '/explain/xai-metric', + component: () => import('./views/explain/xai-metric.vue'), + }, ], }); diff --git a/mindinsight/ui/src/services/fetcher.js b/mindinsight/ui/src/services/fetcher.js index 7b02921b..0206813c 100644 --- a/mindinsight/ui/src/services/fetcher.js +++ b/mindinsight/ui/src/services/fetcher.js @@ -78,12 +78,18 @@ axios.interceptors.response.use( regardError: ['50545013', '50545014', '5054500D'], }; + if (ignoreCode.ignoreError.includes(errorCode)) { + if (errorData[errorCode]) { + Vue.prototype.$message.error(errorData[errorCode]); + } + setTimeout(()=>{ + router.push('/'); + }, 3000); + return Promise.reject(error); + } if ( path.includes('-dashboard') || - ignoreCode.regardError.includes(errorCode) || - (ignoreCode.ignoreError.includes(errorCode) && - error.config.headers.ignoreError) - ) { + ignoreCode.regardError.includes(errorCode)) { return Promise.reject(error); } if (errorData[errorCode]) { diff --git a/mindinsight/ui/src/services/request-service.js b/mindinsight/ui/src/services/request-service.js index c71edd86..80e5b9a8 100644 --- a/mindinsight/ui/src/services/request-service.js +++ b/mindinsight/ui/src/services/request-service.js @@ -389,4 +389,43 @@ export default { data: params, }); }, + // explain list + getExplainList(params) { + return axios({ + method: 'get', + url: 'v1/mindinsight/explainer/explain-jobs', + params: params, + }); + }, + // Explain query train base information + queryTrainInfo(params) { + return axios({ + method: 'get', + url: '/v1/mindinsight/explainer/explain-job', + params: params, + }); + }, + // Explain query page table information + queryPageInfo(params) { + return axios({ + method: 'post', + url: '/v1/mindinsight/explainer/saliency', + data: params, + }); + }, + // Explain query similar pictures + querySimilarPic(params) { + return axios({ + method: 'get', + url: '/v1/mindinsight/explainer/similar', + params: params, + }); + }, + getEvaluation(params) { + return axios({ + method: 'get', + url: 'v1/mindinsight/explainer/evaluation', + params: params, + }); + }, }; diff --git a/mindinsight/ui/src/views/explain/saliency-map.vue b/mindinsight/ui/src/views/explain/saliency-map.vue new file mode 100644 index 00000000..61e1c48b --- /dev/null +++ b/mindinsight/ui/src/views/explain/saliency-map.vue @@ -0,0 +1,1136 @@ + + + + + + + diff --git a/mindinsight/ui/src/views/explain/summary-list.vue b/mindinsight/ui/src/views/explain/summary-list.vue new file mode 100644 index 00000000..67c55475 --- /dev/null +++ b/mindinsight/ui/src/views/explain/summary-list.vue @@ -0,0 +1,490 @@ + + + + + + diff --git a/mindinsight/ui/src/views/explain/xai-metric.vue b/mindinsight/ui/src/views/explain/xai-metric.vue new file mode 100644 index 00000000..63503a31 --- /dev/null +++ b/mindinsight/ui/src/views/explain/xai-metric.vue @@ -0,0 +1,827 @@ + + + + + +