| @@ -246,7 +246,7 @@ func QueryUserStaticDataByTableName(start int, pageSize int, tableName string, q | |||
| } | |||
| log.Info("query return total:" + fmt.Sprint(allCount)) | |||
| userBusinessAnalysisAllList := make([]*UserBusinessAnalysisAll, 0) | |||
| if err := statictisSess.Table(tableName).Where(cond).OrderBy("commit_count desc,id desc").Limit(pageSize, start). | |||
| if err := statictisSess.Table(tableName).Where(cond).OrderBy("user_index desc,id desc").Limit(pageSize, start). | |||
| Find(&userBusinessAnalysisAllList); err != nil { | |||
| return nil, 0 | |||
| } | |||
| @@ -448,6 +448,9 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS | |||
| var indexTotal int64 | |||
| indexTotal = 0 | |||
| insertCount := 0 | |||
| userIndexMap := make(map[int64]float64, 0) | |||
| maxUserIndex := 0.0 | |||
| minUserIndex := 100000000.0 | |||
| dateRecordBatch := make([]UserBusinessAnalysisAll, 0) | |||
| for { | |||
| sess.Select("`user`.*").Table("user").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) | |||
| @@ -494,7 +497,13 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS | |||
| dateRecordAll.GpuBenchMarkJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_GpuBenchMarkJob", CloudBrainTaskItemMap) | |||
| dateRecordAll.CommitModelCount = getMapValue(dateRecordAll.ID, AiModelManageMap) | |||
| dateRecordAll.UserIndex = getUserIndexFromAnalysisAll(dateRecordAll, ParaWeight) | |||
| userIndexMap[dateRecordAll.ID] = dateRecordAll.UserIndex | |||
| if maxUserIndex < dateRecordAll.UserIndex { | |||
| maxUserIndex = dateRecordAll.UserIndex | |||
| } | |||
| if minUserIndex > dateRecordAll.UserIndex { | |||
| minUserIndex = dateRecordAll.UserIndex | |||
| } | |||
| dateRecordBatch = append(dateRecordBatch, dateRecordAll) | |||
| if len(dateRecordBatch) >= BATCH_INSERT_SIZE { | |||
| insertTable(dateRecordBatch, tableName, statictisSess) | |||
| @@ -523,9 +532,19 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS | |||
| } | |||
| } | |||
| //normalization | |||
| for k, v := range userIndexMap { | |||
| tmpResult := (v - minUserIndex) / (maxUserIndex - minUserIndex) | |||
| updateUserIndex(tableName, statictisSess, k, tmpResult) | |||
| } | |||
| log.Info("refresh data finished.tableName=" + tableName + " total record:" + fmt.Sprint(insertCount)) | |||
| } | |||
| func updateUserIndex(tableName string, statictisSess *xorm.Session, userId int64, userIndex float64) { | |||
| updateSql := "UPDATE public." + tableName + " set user_index=" + fmt.Sprint(userIndex) + " where id=" + fmt.Sprint(userId) | |||
| statictisSess.Exec(updateSql) | |||
| } | |||
| func insertTable(dateRecords []UserBusinessAnalysisAll, tableName string, statictisSess *xorm.Session) { | |||
| insertBatchSql := "INSERT INTO public." + tableName + | |||
| @@ -50,6 +50,7 @@ func SendDecompressAttachToLabelOBS(attach string) error { | |||
| _, err := redisclient.Do("Publish", setting.DecompressOBSTaskName, attach) | |||
| if err != nil { | |||
| log.Critical("redis Publish failed.") | |||
| return err | |||
| } | |||
| log.Info("LabelDecompressOBSQueue(%s) success", attach) | |||
| @@ -438,6 +438,7 @@ var ( | |||
| //home page | |||
| RecommentRepoAddr string | |||
| ESSearchURL string | |||
| INDEXPOSTFIX string | |||
| //notice config | |||
| UserNameOfNoticeRepo string | |||
| RepoNameOfNoticeRepo string | |||
| @@ -1268,6 +1269,7 @@ func NewContext() { | |||
| sec = Cfg.Section("homepage") | |||
| RecommentRepoAddr = sec.Key("Address").MustString("https://git.openi.org.cn/OpenIOSSG/promote/raw/branch/master/") | |||
| ESSearchURL = sec.Key("ESSearchURL").MustString("http://192.168.207.94:9200") | |||
| INDEXPOSTFIX = sec.Key("INDEXPOSTFIX").MustString("") | |||
| sec = Cfg.Section("notice") | |||
| UserNameOfNoticeRepo = sec.Key("USER_NAME").MustString("OpenIOSSG") | |||
| @@ -30,6 +30,8 @@ type FileInfo struct { | |||
| } | |||
| type FileInfoList []FileInfo | |||
| const MAX_LIST_PARTS = 1000 | |||
| func (ulist FileInfoList) Swap(i, j int) { ulist[i], ulist[j] = ulist[j], ulist[i] } | |||
| func (ulist FileInfoList) Len() int { return len(ulist) } | |||
| func (ulist FileInfoList) Less(i, j int) bool { | |||
| @@ -97,29 +99,48 @@ func CompleteObsMultiPartUpload(uuid, uploadID, fileName string) error { | |||
| input.Bucket = setting.Bucket | |||
| input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") | |||
| input.UploadId = uploadID | |||
| output, err := ObsCli.ListParts(&obs.ListPartsInput{ | |||
| Bucket: setting.Bucket, | |||
| Key: input.Key, | |||
| UploadId: uploadID, | |||
| }) | |||
| if err != nil { | |||
| log.Error("ListParts failed:", err.Error()) | |||
| return err | |||
| } | |||
| for _, partInfo := range output.Parts { | |||
| input.Parts = append(input.Parts, obs.Part{ | |||
| PartNumber: partInfo.PartNumber, | |||
| ETag: partInfo.ETag, | |||
| partNumberMarker := 0 | |||
| for { | |||
| output, err := ObsCli.ListParts(&obs.ListPartsInput{ | |||
| Bucket: setting.Bucket, | |||
| Key: input.Key, | |||
| UploadId: uploadID, | |||
| MaxParts: MAX_LIST_PARTS, | |||
| PartNumberMarker: partNumberMarker, | |||
| }) | |||
| if err != nil { | |||
| log.Error("ListParts failed:", err.Error()) | |||
| return err | |||
| } | |||
| partNumberMarker = output.NextPartNumberMarker | |||
| log.Info("uuid:%s, MaxParts:%d, PartNumberMarker:%d, NextPartNumberMarker:%d, len:%d", uuid, output.MaxParts, output.PartNumberMarker, output.NextPartNumberMarker, len(output.Parts)) | |||
| for _, partInfo := range output.Parts { | |||
| input.Parts = append(input.Parts, obs.Part{ | |||
| PartNumber: partInfo.PartNumber, | |||
| ETag: partInfo.ETag, | |||
| }) | |||
| } | |||
| if len(output.Parts) < output.MaxParts { | |||
| break | |||
| } else { | |||
| continue | |||
| } | |||
| break | |||
| } | |||
| _, err = ObsCli.CompleteMultipartUpload(input) | |||
| output, err := ObsCli.CompleteMultipartUpload(input) | |||
| if err != nil { | |||
| log.Error("CompleteMultipartUpload failed:", err.Error()) | |||
| return err | |||
| } | |||
| log.Info("uuid:%s, RequestId:%s", uuid, output.RequestId) | |||
| return nil | |||
| } | |||
| @@ -78,7 +78,7 @@ func UploadAttachmentUI(ctx *context.Context) { | |||
| } | |||
| func EditAttachmentUI(ctx *context.Context) { | |||
| id, _ := strconv.ParseInt(ctx.Params(":id"), 10, 64) | |||
| ctx.Data["PageIsDataset"] = true | |||
| attachment, _ := models.GetAttachmentByID(id) | |||
| @@ -986,23 +986,29 @@ func HandleUnDecompressAttachment() { | |||
| if attach.Type == models.TypeCloudBrainOne { | |||
| err = worker.SendDecompressTask(contexExt.Background(), attach.UUID, attach.Name) | |||
| if err != nil { | |||
| log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error()) | |||
| log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error()) | |||
| } else { | |||
| attach.DecompressState = models.DecompressStateIng | |||
| err = models.UpdateAttachment(attach) | |||
| if err != nil { | |||
| log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error()) | |||
| } | |||
| updateAttachmentDecompressStateIng(attach) | |||
| } | |||
| } else if attach.Type == models.TypeCloudBrainTwo { | |||
| attachjson, _ := json.Marshal(attach) | |||
| labelmsg.SendDecompressAttachToLabelOBS(string(attachjson)) | |||
| err = labelmsg.SendDecompressAttachToLabelOBS(string(attachjson)) | |||
| if err != nil { | |||
| log.Error("SendDecompressTask to labelsystem (%s) failed:%s", attach.UUID, err.Error()) | |||
| } else { | |||
| updateAttachmentDecompressStateIng(attach) | |||
| } | |||
| } | |||
| } | |||
| return | |||
| } | |||
| func updateAttachmentDecompressStateIng(attach *models.Attachment) { | |||
| attach.DecompressState = models.DecompressStateIng | |||
| err := models.UpdateAttachment(attach) | |||
| if err != nil { | |||
| log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error()) | |||
| } | |||
| } | |||
| func QueryAllPublicDataset(ctx *context.Context) { | |||
| attachs, err := models.GetAllPublicAttachments() | |||
| @@ -41,24 +41,24 @@ func queryUserDataPage(ctx *context.Context, tableName string, queryObj interfac | |||
| "A1": ctx.Tr("user.static.id"), | |||
| "B1": ctx.Tr("user.static.name"), | |||
| "C1": ctx.Tr("user.static.codemergecount"), | |||
| "D1": ctx.Tr("user.static.commitcount"), | |||
| "E1": ctx.Tr("user.static.issuecount"), | |||
| "F1": ctx.Tr("user.static.commentcount"), | |||
| "G1": ctx.Tr("user.static.focusrepocount"), | |||
| "H1": ctx.Tr("user.static.starrepocount"), | |||
| "I1": ctx.Tr("user.static.logincount"), | |||
| "J1": ctx.Tr("user.static.watchedcount"), | |||
| "K1": ctx.Tr("user.static.commitcodesize"), | |||
| "L1": ctx.Tr("user.static.solveissuecount"), | |||
| "M1": ctx.Tr("user.static.encyclopediascount"), | |||
| "N1": ctx.Tr("user.static.createrepocount"), | |||
| "O1": ctx.Tr("user.static.openiindex"), | |||
| "P1": ctx.Tr("user.static.registdate"), | |||
| "Q1": ctx.Tr("user.static.CloudBrainTaskNum"), | |||
| "R1": ctx.Tr("user.static.CloudBrainRunTime"), | |||
| "S1": ctx.Tr("user.static.CommitDatasetNum"), | |||
| "T1": ctx.Tr("user.static.CommitModelCount"), | |||
| "U1": ctx.Tr("user.static.UserIndex"), | |||
| "D1": ctx.Tr("user.static.UserIndex"), | |||
| "E1": ctx.Tr("user.static.commitcount"), | |||
| "F1": ctx.Tr("user.static.issuecount"), | |||
| "G1": ctx.Tr("user.static.commentcount"), | |||
| "H1": ctx.Tr("user.static.focusrepocount"), | |||
| "I1": ctx.Tr("user.static.starrepocount"), | |||
| "J1": ctx.Tr("user.static.logincount"), | |||
| "K1": ctx.Tr("user.static.watchedcount"), | |||
| "L1": ctx.Tr("user.static.commitcodesize"), | |||
| "M1": ctx.Tr("user.static.solveissuecount"), | |||
| "N1": ctx.Tr("user.static.encyclopediascount"), | |||
| "O1": ctx.Tr("user.static.createrepocount"), | |||
| "P1": ctx.Tr("user.static.openiindex"), | |||
| "Q1": ctx.Tr("user.static.registdate"), | |||
| "R1": ctx.Tr("user.static.CloudBrainTaskNum"), | |||
| "S1": ctx.Tr("user.static.CloudBrainRunTime"), | |||
| "T1": ctx.Tr("user.static.CommitDatasetNum"), | |||
| "U1": ctx.Tr("user.static.CommitModelCount"), | |||
| "V1": ctx.Tr("user.static.countdate"), | |||
| } | |||
| for k, v := range dataHeader { | |||
| @@ -78,28 +78,27 @@ func queryUserDataPage(ctx *context.Context, tableName string, queryObj interfac | |||
| xlsx.SetCellValue(sheetName, "A"+rows, userRecord.ID) | |||
| xlsx.SetCellValue(sheetName, "B"+rows, userRecord.Name) | |||
| xlsx.SetCellValue(sheetName, "C"+rows, userRecord.CodeMergeCount) | |||
| xlsx.SetCellValue(sheetName, "D"+rows, userRecord.CommitCount) | |||
| xlsx.SetCellValue(sheetName, "E"+rows, userRecord.IssueCount) | |||
| xlsx.SetCellValue(sheetName, "F"+rows, userRecord.CommentCount) | |||
| xlsx.SetCellValue(sheetName, "G"+rows, userRecord.FocusRepoCount) | |||
| xlsx.SetCellValue(sheetName, "H"+rows, userRecord.StarRepoCount) | |||
| xlsx.SetCellValue(sheetName, "I"+rows, userRecord.LoginCount) | |||
| xlsx.SetCellValue(sheetName, "J"+rows, userRecord.WatchedCount) | |||
| xlsx.SetCellValue(sheetName, "K"+rows, userRecord.CommitCodeSize) | |||
| xlsx.SetCellValue(sheetName, "L"+rows, userRecord.SolveIssueCount) | |||
| xlsx.SetCellValue(sheetName, "M"+rows, userRecord.EncyclopediasCount) | |||
| xlsx.SetCellValue(sheetName, "N"+rows, userRecord.CreateRepoCount) | |||
| xlsx.SetCellValue(sheetName, "O"+rows, fmt.Sprintf("%.2f", userRecord.OpenIIndex)) | |||
| xlsx.SetCellValue(sheetName, "D"+rows, fmt.Sprintf("%.2f", userRecord.UserIndex)) | |||
| xlsx.SetCellValue(sheetName, "E"+rows, userRecord.CommitCount) | |||
| xlsx.SetCellValue(sheetName, "F"+rows, userRecord.IssueCount) | |||
| xlsx.SetCellValue(sheetName, "G"+rows, userRecord.CommentCount) | |||
| xlsx.SetCellValue(sheetName, "H"+rows, userRecord.FocusRepoCount) | |||
| xlsx.SetCellValue(sheetName, "I"+rows, userRecord.StarRepoCount) | |||
| xlsx.SetCellValue(sheetName, "J"+rows, userRecord.LoginCount) | |||
| xlsx.SetCellValue(sheetName, "K"+rows, userRecord.WatchedCount) | |||
| xlsx.SetCellValue(sheetName, "L"+rows, userRecord.CommitCodeSize) | |||
| xlsx.SetCellValue(sheetName, "M"+rows, userRecord.SolveIssueCount) | |||
| xlsx.SetCellValue(sheetName, "N"+rows, userRecord.EncyclopediasCount) | |||
| xlsx.SetCellValue(sheetName, "O"+rows, userRecord.CreateRepoCount) | |||
| xlsx.SetCellValue(sheetName, "P"+rows, fmt.Sprintf("%.2f", userRecord.OpenIIndex)) | |||
| formatTime := userRecord.RegistDate.Format("2006-01-02 15:04:05") | |||
| xlsx.SetCellValue(sheetName, "P"+rows, formatTime[0:len(formatTime)-3]) | |||
| xlsx.SetCellValue(sheetName, "Q"+rows, userRecord.CloudBrainTaskNum) | |||
| xlsx.SetCellValue(sheetName, "R"+rows, fmt.Sprintf("%.2f", float64(userRecord.CloudBrainRunTime)/3600)) | |||
| xlsx.SetCellValue(sheetName, "S"+rows, userRecord.CommitDatasetNum) | |||
| xlsx.SetCellValue(sheetName, "T"+rows, userRecord.CommitModelCount) | |||
| xlsx.SetCellValue(sheetName, "U"+rows, fmt.Sprintf("%.2f", userRecord.UserIndex)) | |||
| xlsx.SetCellValue(sheetName, "Q"+rows, formatTime[0:len(formatTime)-3]) | |||
| xlsx.SetCellValue(sheetName, "R"+rows, userRecord.CloudBrainTaskNum) | |||
| xlsx.SetCellValue(sheetName, "S"+rows, fmt.Sprintf("%.2f", float64(userRecord.CloudBrainRunTime)/3600)) | |||
| xlsx.SetCellValue(sheetName, "T"+rows, userRecord.CommitDatasetNum) | |||
| xlsx.SetCellValue(sheetName, "U"+rows, userRecord.CommitModelCount) | |||
| formatTime = userRecord.DataDate | |||
| xlsx.SetCellValue(sheetName, "V"+rows, formatTime) | |||
| } | |||
| @@ -243,24 +242,24 @@ func QueryUserStaticDataPage(ctx *context.Context) { | |||
| "A1": ctx.Tr("user.static.id"), | |||
| "B1": ctx.Tr("user.static.name"), | |||
| "C1": ctx.Tr("user.static.codemergecount"), | |||
| "D1": ctx.Tr("user.static.commitcount"), | |||
| "E1": ctx.Tr("user.static.issuecount"), | |||
| "F1": ctx.Tr("user.static.commentcount"), | |||
| "G1": ctx.Tr("user.static.focusrepocount"), | |||
| "H1": ctx.Tr("user.static.starrepocount"), | |||
| "I1": ctx.Tr("user.static.logincount"), | |||
| "J1": ctx.Tr("user.static.watchedcount"), | |||
| "K1": ctx.Tr("user.static.commitcodesize"), | |||
| "L1": ctx.Tr("user.static.solveissuecount"), | |||
| "M1": ctx.Tr("user.static.encyclopediascount"), | |||
| "N1": ctx.Tr("user.static.createrepocount"), | |||
| "O1": ctx.Tr("user.static.openiindex"), | |||
| "P1": ctx.Tr("user.static.registdate"), | |||
| "Q1": ctx.Tr("user.static.CloudBrainTaskNum"), | |||
| "R1": ctx.Tr("user.static.CloudBrainRunTime"), | |||
| "S1": ctx.Tr("user.static.CommitDatasetNum"), | |||
| "T1": ctx.Tr("user.static.CommitModelCount"), | |||
| "U1": ctx.Tr("user.static.UserIndex"), | |||
| "D1": ctx.Tr("user.static.UserIndex"), | |||
| "E1": ctx.Tr("user.static.commitcount"), | |||
| "F1": ctx.Tr("user.static.issuecount"), | |||
| "G1": ctx.Tr("user.static.commentcount"), | |||
| "H1": ctx.Tr("user.static.focusrepocount"), | |||
| "I1": ctx.Tr("user.static.starrepocount"), | |||
| "J1": ctx.Tr("user.static.logincount"), | |||
| "K1": ctx.Tr("user.static.watchedcount"), | |||
| "L1": ctx.Tr("user.static.commitcodesize"), | |||
| "M1": ctx.Tr("user.static.solveissuecount"), | |||
| "N1": ctx.Tr("user.static.encyclopediascount"), | |||
| "O1": ctx.Tr("user.static.createrepocount"), | |||
| "P1": ctx.Tr("user.static.openiindex"), | |||
| "Q1": ctx.Tr("user.static.registdate"), | |||
| "R1": ctx.Tr("user.static.CloudBrainTaskNum"), | |||
| "S1": ctx.Tr("user.static.CloudBrainRunTime"), | |||
| "T1": ctx.Tr("user.static.CommitDatasetNum"), | |||
| "U1": ctx.Tr("user.static.CommitModelCount"), | |||
| "V1": ctx.Tr("user.static.countdate"), | |||
| } | |||
| for k, v := range dataHeader { | |||
| @@ -274,26 +273,26 @@ func QueryUserStaticDataPage(ctx *context.Context) { | |||
| xlsx.SetCellValue(sheetName, "A"+rows, userRecord.ID) | |||
| xlsx.SetCellValue(sheetName, "B"+rows, userRecord.Name) | |||
| xlsx.SetCellValue(sheetName, "C"+rows, userRecord.CodeMergeCount) | |||
| xlsx.SetCellValue(sheetName, "D"+rows, userRecord.CommitCount) | |||
| xlsx.SetCellValue(sheetName, "E"+rows, userRecord.IssueCount) | |||
| xlsx.SetCellValue(sheetName, "F"+rows, userRecord.CommentCount) | |||
| xlsx.SetCellValue(sheetName, "G"+rows, userRecord.FocusRepoCount) | |||
| xlsx.SetCellValue(sheetName, "H"+rows, userRecord.StarRepoCount) | |||
| xlsx.SetCellValue(sheetName, "I"+rows, userRecord.LoginCount) | |||
| xlsx.SetCellValue(sheetName, "J"+rows, userRecord.WatchedCount) | |||
| xlsx.SetCellValue(sheetName, "K"+rows, userRecord.CommitCodeSize) | |||
| xlsx.SetCellValue(sheetName, "L"+rows, userRecord.SolveIssueCount) | |||
| xlsx.SetCellValue(sheetName, "M"+rows, userRecord.EncyclopediasCount) | |||
| xlsx.SetCellValue(sheetName, "N"+rows, userRecord.CreateRepoCount) | |||
| xlsx.SetCellValue(sheetName, "O"+rows, fmt.Sprintf("%.2f", userRecord.OpenIIndex)) | |||
| xlsx.SetCellValue(sheetName, "D"+rows, fmt.Sprintf("%.2f", userRecord.UserIndex)) | |||
| xlsx.SetCellValue(sheetName, "E"+rows, userRecord.CommitCount) | |||
| xlsx.SetCellValue(sheetName, "F"+rows, userRecord.IssueCount) | |||
| xlsx.SetCellValue(sheetName, "G"+rows, userRecord.CommentCount) | |||
| xlsx.SetCellValue(sheetName, "H"+rows, userRecord.FocusRepoCount) | |||
| xlsx.SetCellValue(sheetName, "I"+rows, userRecord.StarRepoCount) | |||
| xlsx.SetCellValue(sheetName, "J"+rows, userRecord.LoginCount) | |||
| xlsx.SetCellValue(sheetName, "K"+rows, userRecord.WatchedCount) | |||
| xlsx.SetCellValue(sheetName, "L"+rows, userRecord.CommitCodeSize) | |||
| xlsx.SetCellValue(sheetName, "M"+rows, userRecord.SolveIssueCount) | |||
| xlsx.SetCellValue(sheetName, "N"+rows, userRecord.EncyclopediasCount) | |||
| xlsx.SetCellValue(sheetName, "O"+rows, userRecord.CreateRepoCount) | |||
| xlsx.SetCellValue(sheetName, "P"+rows, fmt.Sprintf("%.2f", userRecord.OpenIIndex)) | |||
| formatTime := userRecord.RegistDate.Format("2006-01-02 15:04:05") | |||
| xlsx.SetCellValue(sheetName, "P"+rows, formatTime[0:len(formatTime)-3]) | |||
| xlsx.SetCellValue(sheetName, "Q"+rows, userRecord.CloudBrainTaskNum) | |||
| xlsx.SetCellValue(sheetName, "R"+rows, fmt.Sprintf("%.2f", float64(userRecord.CloudBrainRunTime)/3600)) | |||
| xlsx.SetCellValue(sheetName, "S"+rows, userRecord.CommitDatasetNum) | |||
| xlsx.SetCellValue(sheetName, "T"+rows, userRecord.CommitModelCount) | |||
| xlsx.SetCellValue(sheetName, "U"+rows, fmt.Sprintf("%.2f", userRecord.UserIndex)) | |||
| xlsx.SetCellValue(sheetName, "Q"+rows, formatTime[0:len(formatTime)-3]) | |||
| xlsx.SetCellValue(sheetName, "R"+rows, userRecord.CloudBrainTaskNum) | |||
| xlsx.SetCellValue(sheetName, "S"+rows, fmt.Sprintf("%.2f", float64(userRecord.CloudBrainRunTime)/3600)) | |||
| xlsx.SetCellValue(sheetName, "T"+rows, userRecord.CommitDatasetNum) | |||
| xlsx.SetCellValue(sheetName, "U"+rows, userRecord.CommitModelCount) | |||
| formatTime = userRecord.DataDate | |||
| xlsx.SetCellValue(sheetName, "V"+rows, formatTime) | |||
| } | |||
| @@ -68,23 +68,23 @@ func SearchApi(ctx *context.Context) { | |||
| if OnlySearchLabel { | |||
| searchRepoByLabel(ctx, Key, Page, PageSize) | |||
| } else { | |||
| searchRepo(ctx, "repository-es-index", Key, Page, PageSize, OnlyReturnNum) | |||
| searchRepo(ctx, "repository-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, OnlyReturnNum) | |||
| } | |||
| return | |||
| } else if TableName == "issue" { | |||
| searchIssueOrPr(ctx, "issue-es-index", Key, Page, PageSize, OnlyReturnNum, "f") | |||
| searchIssueOrPr(ctx, "issue-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, OnlyReturnNum, "f") | |||
| return | |||
| } else if TableName == "user" { | |||
| searchUserOrOrg(ctx, "user-es-index", Key, Page, PageSize, true, OnlyReturnNum) | |||
| searchUserOrOrg(ctx, "user-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, true, OnlyReturnNum) | |||
| return | |||
| } else if TableName == "org" { | |||
| searchUserOrOrg(ctx, "user-es-index", Key, Page, PageSize, false, OnlyReturnNum) | |||
| searchUserOrOrg(ctx, "user-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, false, OnlyReturnNum) | |||
| return | |||
| } else if TableName == "dataset" { | |||
| searchDataSet(ctx, "dataset-es-index", Key, Page, PageSize, OnlyReturnNum) | |||
| searchDataSet(ctx, "dataset-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, OnlyReturnNum) | |||
| return | |||
| } else if TableName == "pr" { | |||
| searchIssueOrPr(ctx, "issue-es-index", Key, Page, PageSize, OnlyReturnNum, "t") | |||
| searchIssueOrPr(ctx, "issue-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, OnlyReturnNum, "t") | |||
| //searchPR(ctx, "issue-es-index", Key, Page, PageSize, OnlyReturnNum) | |||
| return | |||
| } | |||
| @@ -31,7 +31,7 @@ | |||
| <span slot="label"> | |||
| <el-image style="width: 13px; height: 13px" src="/img/pro_rgb.svg"> | |||
| </el-image> | |||
| 云脑分析 | |||
| 云脑分析(建设中..) | |||
| </span> | |||
| </el-tab-pane> | |||
| </el-tabs> | |||
| @@ -26,7 +26,7 @@ import qs from 'qs'; | |||
| import createDropzone from '../features/dropzone.js'; | |||
| const {_AppSubUrl, _StaticUrlPrefix, csrf} = window.config; | |||
| // const uploadtype = 0; | |||
| const chunkSize = 1024 * 1024 * 64; | |||
| export default { | |||
| props:{ | |||
| @@ -137,7 +137,6 @@ export default { | |||
| resetStatus() { | |||
| this.progress = 0; | |||
| this.status = ''; | |||
| console.log(this.uploadtype) | |||
| }, | |||
| updateProgress(file, progress) { | |||
| console.log("progress---",progress) | |||
| @@ -165,7 +164,6 @@ export default { | |||
| .getElementById('datasetId') | |||
| .getAttribute('datasetId'); | |||
| this.resetStatus(); | |||
| console.log(this.file,!this.file?.upload) | |||
| if(!this.file?.upload){ | |||
| this.btnFlag = false | |||
| return | |||
| @@ -186,7 +184,6 @@ export default { | |||
| File.prototype.slice || | |||
| File.prototype.mozSlice || | |||
| File.prototype.webkitSlice, | |||
| chunkSize = 1024 * 1024 * 64, | |||
| chunks = Math.ceil(file.size / chunkSize), | |||
| spark = new SparkMD5.ArrayBuffer(), | |||
| fileReader = new FileReader(); | |||
| @@ -327,7 +324,6 @@ export default { | |||
| }, | |||
| async newMultiUpload(file) { | |||
| console.log(this.uploadtype,this) | |||
| const res = await axios.get('/attachments/new_multipart', { | |||
| params: { | |||
| totalChunkCounts: file.totalChunkCounts, | |||
| @@ -348,7 +344,6 @@ export default { | |||
| File.prototype.slice || | |||
| File.prototype.mozSlice || | |||
| File.prototype.webkitSlice, | |||
| chunkSize = 1024 * 1024 * 32, | |||
| chunks = Math.ceil(file.size / chunkSize), | |||
| fileReader = new FileReader(), | |||
| time = new Date().getTime(); | |||
| @@ -457,7 +452,6 @@ export default { | |||
| } | |||
| async function completeUpload() { | |||
| console.log(_this.uploadtype) | |||
| return await axios.post( | |||
| '/attachments/complete_multipart', | |||
| qs.stringify({ | |||
| @@ -494,7 +488,6 @@ export default { | |||
| 1}/${chunks}个分片上传` | |||
| ); | |||
| this.progress = Math.ceil((currentChunk / chunks) * 100); | |||
| console.log("((currentChunk / chunks) * 100).toFixed(2)",((currentChunk / chunks) * 100).toFixed(2)) | |||
| this.updateProgress(file, ((currentChunk / chunks) * 100).toFixed(2)); | |||
| this.status = `${this.dropzoneParams.data('uploading')} ${( | |||
| (currentChunk / chunks) * | |||
| @@ -67,6 +67,15 @@ | |||
| label="PR数" | |||
| align="center"> | |||
| </el-table-column> | |||
| <el-table-column | |||
| prop="UserIndex" | |||
| label="用户指数" | |||
| width="120px" | |||
| align="center"> | |||
| <template slot-scope="scope"> | |||
| {{scope.row.UserIndex | rounding}} | |||
| </template> | |||
| </el-table-column> | |||
| <el-table-column | |||
| prop="CommitCount" | |||
| label="commit数" | |||
| @@ -161,15 +170,7 @@ | |||
| width="120px" | |||
| align="center"> | |||
| </el-table-column> | |||
| <el-table-column | |||
| prop="UserIndex" | |||
| label="用户指数" | |||
| width="120px" | |||
| align="center"> | |||
| <template slot-scope="scope"> | |||
| {{scope.row.UserIndex | rounding}} | |||
| </template> | |||
| </el-table-column> | |||
| <el-table-column | |||
| prop="DataDate" | |||
| label="系统统计时间" | |||