diff --git a/models/cloudbrain.go b/models/cloudbrain.go index ad0a6c570..9d180a848 100755 --- a/models/cloudbrain.go +++ b/models/cloudbrain.go @@ -112,7 +112,7 @@ type Cloudbrain struct { SubTaskName string ContainerID string ContainerIp string - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + CreatedUnix timeutil.TimeStamp `xorm:"INDEX"` UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` Duration int64 `xorm:"DEFAULT 0"` //运行时长 单位秒 TrainJobDuration string `xorm:"DEFAULT '00:00:00'"` @@ -185,6 +185,12 @@ func (task *Cloudbrain) ComputeAndSetDuration() { task.TrainJobDuration = ConvertDurationToStr(d) } +func (task *Cloudbrain) CorrectCreateUnix() { + if task.StartTime > 0 && task.CreatedUnix > task.StartTime { + task.CreatedUnix = task.StartTime + } +} + func (task *Cloudbrain) IsTerminal() bool { status := task.Status return status == string(ModelArtsTrainJobCompleted) || status == string(ModelArtsTrainJobFailed) || status == string(ModelArtsTrainJobKilled) || status == string(ModelArtsStopped) || status == string(JobStopped) || status == string(JobFailed) || status == string(JobSucceeded) @@ -219,6 +225,7 @@ func ParseAndSetDurationFromCloudBrainOne(result JobResultPayload, task *Cloudbr task.EndTime = timeutil.TimeStamp(result.JobStatus.CompletedTime / 1000) } } + task.CorrectCreateUnix() task.ComputeAndSetDuration() } @@ -1473,7 +1480,7 @@ func UpdateTrainJobVersion(job *Cloudbrain) error { func updateJobTrainVersion(e Engine, job *Cloudbrain) error { var sess *xorm.Session sess = e.Where("job_id = ? AND version_name=?", job.JobID, job.VersionName) - _, err := sess.Cols("status", "train_job_duration", "duration", "start_time", "end_time").Update(job) + _, err := sess.Cols("status", "train_job_duration", "duration", "start_time", "end_time", "created_unix").Update(job) return err } @@ -1562,7 +1569,7 @@ func UpdateInferenceJob(job *Cloudbrain) error { func updateInferenceJob(e Engine, job *Cloudbrain) error { var sess *xorm.Session sess = e.Where("job_id = ?", job.JobID) - _, err := sess.Cols("status", "train_job_duration", "duration", "start_time", "end_time").Update(job) + _, err := sess.Cols("status", "train_job_duration", "duration", "start_time", "end_time", "created_unix").Update(job) return err } func RestartCloudbrain(old *Cloudbrain, new *Cloudbrain) (err error) { diff --git a/models/user_business_analysis.go b/models/user_business_analysis.go index 65ce642d5..a3a68b1ab 100644 --- a/models/user_business_analysis.go +++ b/models/user_business_analysis.go @@ -246,7 +246,7 @@ func QueryUserStaticDataByTableName(start int, pageSize int, tableName string, q } log.Info("query return total:" + fmt.Sprint(allCount)) userBusinessAnalysisAllList := make([]*UserBusinessAnalysisAll, 0) - if err := statictisSess.Table(tableName).Where(cond).OrderBy("commit_count desc,id desc").Limit(pageSize, start). + if err := statictisSess.Table(tableName).Where(cond).OrderBy("user_index desc,id desc").Limit(pageSize, start). Find(&userBusinessAnalysisAllList); err != nil { return nil, 0 } @@ -448,6 +448,9 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS var indexTotal int64 indexTotal = 0 insertCount := 0 + userIndexMap := make(map[int64]float64, 0) + maxUserIndex := 0.0 + minUserIndex := 100000000.0 dateRecordBatch := make([]UserBusinessAnalysisAll, 0) for { sess.Select("`user`.*").Table("user").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) @@ -494,7 +497,13 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS dateRecordAll.GpuBenchMarkJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_GpuBenchMarkJob", CloudBrainTaskItemMap) dateRecordAll.CommitModelCount = getMapValue(dateRecordAll.ID, AiModelManageMap) dateRecordAll.UserIndex = getUserIndexFromAnalysisAll(dateRecordAll, ParaWeight) - + userIndexMap[dateRecordAll.ID] = dateRecordAll.UserIndex + if maxUserIndex < dateRecordAll.UserIndex { + maxUserIndex = dateRecordAll.UserIndex + } + if minUserIndex > dateRecordAll.UserIndex { + minUserIndex = dateRecordAll.UserIndex + } dateRecordBatch = append(dateRecordBatch, dateRecordAll) if len(dateRecordBatch) >= BATCH_INSERT_SIZE { insertTable(dateRecordBatch, tableName, statictisSess) @@ -523,9 +532,19 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS } } + //normalization + for k, v := range userIndexMap { + tmpResult := (v - minUserIndex) / (maxUserIndex - minUserIndex) + updateUserIndex(tableName, statictisSess, k, tmpResult) + } log.Info("refresh data finished.tableName=" + tableName + " total record:" + fmt.Sprint(insertCount)) } +func updateUserIndex(tableName string, statictisSess *xorm.Session, userId int64, userIndex float64) { + updateSql := "UPDATE public." + tableName + " set user_index=" + fmt.Sprint(userIndex) + " where id=" + fmt.Sprint(userId) + statictisSess.Exec(updateSql) +} + func insertTable(dateRecords []UserBusinessAnalysisAll, tableName string, statictisSess *xorm.Session) { insertBatchSql := "INSERT INTO public." + tableName + diff --git a/modules/labelmsg/redismsgsender.go b/modules/labelmsg/redismsgsender.go index 8b2eae772..c06407588 100644 --- a/modules/labelmsg/redismsgsender.go +++ b/modules/labelmsg/redismsgsender.go @@ -50,6 +50,7 @@ func SendDecompressAttachToLabelOBS(attach string) error { _, err := redisclient.Do("Publish", setting.DecompressOBSTaskName, attach) if err != nil { log.Critical("redis Publish failed.") + return err } log.Info("LabelDecompressOBSQueue(%s) success", attach) diff --git a/modules/setting/setting.go b/modules/setting/setting.go index 26f068193..eee539d0c 100755 --- a/modules/setting/setting.go +++ b/modules/setting/setting.go @@ -438,6 +438,7 @@ var ( //home page RecommentRepoAddr string ESSearchURL string + INDEXPOSTFIX string //notice config UserNameOfNoticeRepo string RepoNameOfNoticeRepo string @@ -1268,6 +1269,7 @@ func NewContext() { sec = Cfg.Section("homepage") RecommentRepoAddr = sec.Key("Address").MustString("https://git.openi.org.cn/OpenIOSSG/promote/raw/branch/master/") ESSearchURL = sec.Key("ESSearchURL").MustString("http://192.168.207.94:9200") + INDEXPOSTFIX = sec.Key("INDEXPOSTFIX").MustString("") sec = Cfg.Section("notice") UserNameOfNoticeRepo = sec.Key("USER_NAME").MustString("OpenIOSSG") diff --git a/modules/storage/obs.go b/modules/storage/obs.go index f733eef6c..08a354359 100755 --- a/modules/storage/obs.go +++ b/modules/storage/obs.go @@ -30,6 +30,8 @@ type FileInfo struct { } type FileInfoList []FileInfo +const MAX_LIST_PARTS = 1000 + func (ulist FileInfoList) Swap(i, j int) { ulist[i], ulist[j] = ulist[j], ulist[i] } func (ulist FileInfoList) Len() int { return len(ulist) } func (ulist FileInfoList) Less(i, j int) bool { @@ -97,29 +99,48 @@ func CompleteObsMultiPartUpload(uuid, uploadID, fileName string) error { input.Bucket = setting.Bucket input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") input.UploadId = uploadID - output, err := ObsCli.ListParts(&obs.ListPartsInput{ - Bucket: setting.Bucket, - Key: input.Key, - UploadId: uploadID, - }) - if err != nil { - log.Error("ListParts failed:", err.Error()) - return err - } - for _, partInfo := range output.Parts { - input.Parts = append(input.Parts, obs.Part{ - PartNumber: partInfo.PartNumber, - ETag: partInfo.ETag, + partNumberMarker := 0 + for { + output, err := ObsCli.ListParts(&obs.ListPartsInput{ + Bucket: setting.Bucket, + Key: input.Key, + UploadId: uploadID, + MaxParts: MAX_LIST_PARTS, + PartNumberMarker: partNumberMarker, }) + if err != nil { + log.Error("ListParts failed:", err.Error()) + return err + } + + partNumberMarker = output.NextPartNumberMarker + log.Info("uuid:%s, MaxParts:%d, PartNumberMarker:%d, NextPartNumberMarker:%d, len:%d", uuid, output.MaxParts, output.PartNumberMarker, output.NextPartNumberMarker, len(output.Parts)) + + for _, partInfo := range output.Parts { + input.Parts = append(input.Parts, obs.Part{ + PartNumber: partInfo.PartNumber, + ETag: partInfo.ETag, + }) + } + + if len(output.Parts) < output.MaxParts { + break + } else { + continue + } + + break } - _, err = ObsCli.CompleteMultipartUpload(input) + output, err := ObsCli.CompleteMultipartUpload(input) if err != nil { log.Error("CompleteMultipartUpload failed:", err.Error()) return err } + log.Info("uuid:%s, RequestId:%s", uuid, output.RequestId) + return nil } diff --git a/routers/api/v1/repo/modelarts.go b/routers/api/v1/repo/modelarts.go index e24ac95fb..9e4edea03 100755 --- a/routers/api/v1/repo/modelarts.go +++ b/routers/api/v1/repo/modelarts.go @@ -74,6 +74,7 @@ func GetModelArtsNotebook2(ctx *context.APIContext) { if job.EndTime == 0 && models.IsModelArtsDebugJobTerminal(job.Status) { job.EndTime = timeutil.TimeStampNow() } + job.CorrectCreateUnix() job.ComputeAndSetDuration() err = models.UpdateJob(job) if err != nil { @@ -160,6 +161,7 @@ func GetModelArtsTrainJobVersion(ctx *context.APIContext) { } if result.JobStatus.State != string(models.JobWaiting) { + models.ParseAndSetDurationFromCloudBrainOne(result, job) err = models.UpdateJob(job) if err != nil { log.Error("UpdateJob failed:", err) @@ -177,14 +179,12 @@ func GetModelArtsTrainJobVersion(ctx *context.APIContext) { } job.Status = modelarts.TransTrainJobStatus(result.IntStatus) job.Duration = result.Duration / 1000 - job.TrainJobDuration = result.TrainJobDuration - job.TrainJobDuration = models.ConvertDurationToStr(job.Duration) if job.EndTime == 0 && models.IsTrainJobTerminal(job.Status) && job.StartTime > 0 { job.EndTime = job.StartTime.Add(job.Duration) } - + job.CorrectCreateUnix() err = models.UpdateTrainJobVersion(job) if err != nil { log.Error("UpdateJob failed:", err) @@ -417,7 +417,7 @@ func GetModelArtsInferenceJob(ctx *context.APIContext) { if job.EndTime == 0 && models.IsTrainJobTerminal(job.Status) && job.StartTime > 0 { job.EndTime = job.StartTime.Add(job.Duration) } - + job.CorrectCreateUnix() err = models.UpdateInferenceJob(job) if err != nil { log.Error("UpdateJob failed:", err) diff --git a/routers/repo/attachment.go b/routers/repo/attachment.go index 96f17b74b..3c66a3537 100755 --- a/routers/repo/attachment.go +++ b/routers/repo/attachment.go @@ -78,7 +78,7 @@ func UploadAttachmentUI(ctx *context.Context) { } func EditAttachmentUI(ctx *context.Context) { - + id, _ := strconv.ParseInt(ctx.Params(":id"), 10, 64) ctx.Data["PageIsDataset"] = true attachment, _ := models.GetAttachmentByID(id) @@ -986,23 +986,29 @@ func HandleUnDecompressAttachment() { if attach.Type == models.TypeCloudBrainOne { err = worker.SendDecompressTask(contexExt.Background(), attach.UUID, attach.Name) if err != nil { - log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error()) + log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error()) } else { - attach.DecompressState = models.DecompressStateIng - err = models.UpdateAttachment(attach) - if err != nil { - log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error()) - } + updateAttachmentDecompressStateIng(attach) } } else if attach.Type == models.TypeCloudBrainTwo { attachjson, _ := json.Marshal(attach) - labelmsg.SendDecompressAttachToLabelOBS(string(attachjson)) + err = labelmsg.SendDecompressAttachToLabelOBS(string(attachjson)) + if err != nil { + log.Error("SendDecompressTask to labelsystem (%s) failed:%s", attach.UUID, err.Error()) + } else { + updateAttachmentDecompressStateIng(attach) + } } - } - return } +func updateAttachmentDecompressStateIng(attach *models.Attachment) { + attach.DecompressState = models.DecompressStateIng + err := models.UpdateAttachment(attach) + if err != nil { + log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error()) + } +} func QueryAllPublicDataset(ctx *context.Context) { attachs, err := models.GetAllPublicAttachments() diff --git a/routers/repo/cloudbrain.go b/routers/repo/cloudbrain.go index 34a5b7566..9723c34b3 100755 --- a/routers/repo/cloudbrain.go +++ b/routers/repo/cloudbrain.go @@ -1386,6 +1386,7 @@ func SyncCloudbrainStatus() { if task.EndTime == 0 && models.IsModelArtsDebugJobTerminal(task.Status) { task.EndTime = timeutil.TimeStampNow() } + task.CorrectCreateUnix() task.ComputeAndSetDuration() err = models.UpdateJob(task) if err != nil { @@ -1412,7 +1413,7 @@ func SyncCloudbrainStatus() { if task.EndTime == 0 && models.IsTrainJobTerminal(task.Status) && task.StartTime > 0 { task.EndTime = task.StartTime.Add(task.Duration) } - + task.CorrectCreateUnix() err = models.UpdateJob(task) if err != nil { log.Error("UpdateJob(%s) failed:%v", task.JobName, err) @@ -1534,6 +1535,7 @@ func handleNoDurationTask(cloudBrains []*models.Cloudbrain) { task.StartTime = timeutil.TimeStamp(startTime / 1000) task.EndTime = task.StartTime.Add(duration) } + task.CorrectCreateUnix() task.ComputeAndSetDuration() err = models.UpdateJob(task) if err != nil { diff --git a/routers/repo/user_data_analysis.go b/routers/repo/user_data_analysis.go index 9d906270f..f8a036feb 100755 --- a/routers/repo/user_data_analysis.go +++ b/routers/repo/user_data_analysis.go @@ -41,24 +41,24 @@ func queryUserDataPage(ctx *context.Context, tableName string, queryObj interfac "A1": ctx.Tr("user.static.id"), "B1": ctx.Tr("user.static.name"), "C1": ctx.Tr("user.static.codemergecount"), - "D1": ctx.Tr("user.static.commitcount"), - "E1": ctx.Tr("user.static.issuecount"), - "F1": ctx.Tr("user.static.commentcount"), - "G1": ctx.Tr("user.static.focusrepocount"), - "H1": ctx.Tr("user.static.starrepocount"), - "I1": ctx.Tr("user.static.logincount"), - "J1": ctx.Tr("user.static.watchedcount"), - "K1": ctx.Tr("user.static.commitcodesize"), - "L1": ctx.Tr("user.static.solveissuecount"), - "M1": ctx.Tr("user.static.encyclopediascount"), - "N1": ctx.Tr("user.static.createrepocount"), - "O1": ctx.Tr("user.static.openiindex"), - "P1": ctx.Tr("user.static.registdate"), - "Q1": ctx.Tr("user.static.CloudBrainTaskNum"), - "R1": ctx.Tr("user.static.CloudBrainRunTime"), - "S1": ctx.Tr("user.static.CommitDatasetNum"), - "T1": ctx.Tr("user.static.CommitModelCount"), - "U1": ctx.Tr("user.static.UserIndex"), + "D1": ctx.Tr("user.static.UserIndex"), + "E1": ctx.Tr("user.static.commitcount"), + "F1": ctx.Tr("user.static.issuecount"), + "G1": ctx.Tr("user.static.commentcount"), + "H1": ctx.Tr("user.static.focusrepocount"), + "I1": ctx.Tr("user.static.starrepocount"), + "J1": ctx.Tr("user.static.logincount"), + "K1": ctx.Tr("user.static.watchedcount"), + "L1": ctx.Tr("user.static.commitcodesize"), + "M1": ctx.Tr("user.static.solveissuecount"), + "N1": ctx.Tr("user.static.encyclopediascount"), + "O1": ctx.Tr("user.static.createrepocount"), + "P1": ctx.Tr("user.static.openiindex"), + "Q1": ctx.Tr("user.static.registdate"), + "R1": ctx.Tr("user.static.CloudBrainTaskNum"), + "S1": ctx.Tr("user.static.CloudBrainRunTime"), + "T1": ctx.Tr("user.static.CommitDatasetNum"), + "U1": ctx.Tr("user.static.CommitModelCount"), "V1": ctx.Tr("user.static.countdate"), } for k, v := range dataHeader { @@ -78,28 +78,27 @@ func queryUserDataPage(ctx *context.Context, tableName string, queryObj interfac xlsx.SetCellValue(sheetName, "A"+rows, userRecord.ID) xlsx.SetCellValue(sheetName, "B"+rows, userRecord.Name) xlsx.SetCellValue(sheetName, "C"+rows, userRecord.CodeMergeCount) - xlsx.SetCellValue(sheetName, "D"+rows, userRecord.CommitCount) - xlsx.SetCellValue(sheetName, "E"+rows, userRecord.IssueCount) - xlsx.SetCellValue(sheetName, "F"+rows, userRecord.CommentCount) - xlsx.SetCellValue(sheetName, "G"+rows, userRecord.FocusRepoCount) - xlsx.SetCellValue(sheetName, "H"+rows, userRecord.StarRepoCount) - xlsx.SetCellValue(sheetName, "I"+rows, userRecord.LoginCount) - xlsx.SetCellValue(sheetName, "J"+rows, userRecord.WatchedCount) - xlsx.SetCellValue(sheetName, "K"+rows, userRecord.CommitCodeSize) - xlsx.SetCellValue(sheetName, "L"+rows, userRecord.SolveIssueCount) - xlsx.SetCellValue(sheetName, "M"+rows, userRecord.EncyclopediasCount) - xlsx.SetCellValue(sheetName, "N"+rows, userRecord.CreateRepoCount) - xlsx.SetCellValue(sheetName, "O"+rows, fmt.Sprintf("%.2f", userRecord.OpenIIndex)) + xlsx.SetCellValue(sheetName, "D"+rows, fmt.Sprintf("%.2f", userRecord.UserIndex)) + xlsx.SetCellValue(sheetName, "E"+rows, userRecord.CommitCount) + xlsx.SetCellValue(sheetName, "F"+rows, userRecord.IssueCount) + xlsx.SetCellValue(sheetName, "G"+rows, userRecord.CommentCount) + xlsx.SetCellValue(sheetName, "H"+rows, userRecord.FocusRepoCount) + xlsx.SetCellValue(sheetName, "I"+rows, userRecord.StarRepoCount) + xlsx.SetCellValue(sheetName, "J"+rows, userRecord.LoginCount) + xlsx.SetCellValue(sheetName, "K"+rows, userRecord.WatchedCount) + xlsx.SetCellValue(sheetName, "L"+rows, userRecord.CommitCodeSize) + xlsx.SetCellValue(sheetName, "M"+rows, userRecord.SolveIssueCount) + xlsx.SetCellValue(sheetName, "N"+rows, userRecord.EncyclopediasCount) + xlsx.SetCellValue(sheetName, "O"+rows, userRecord.CreateRepoCount) + xlsx.SetCellValue(sheetName, "P"+rows, fmt.Sprintf("%.2f", userRecord.OpenIIndex)) formatTime := userRecord.RegistDate.Format("2006-01-02 15:04:05") - xlsx.SetCellValue(sheetName, "P"+rows, formatTime[0:len(formatTime)-3]) - - xlsx.SetCellValue(sheetName, "Q"+rows, userRecord.CloudBrainTaskNum) - xlsx.SetCellValue(sheetName, "R"+rows, fmt.Sprintf("%.2f", float64(userRecord.CloudBrainRunTime)/3600)) - xlsx.SetCellValue(sheetName, "S"+rows, userRecord.CommitDatasetNum) - xlsx.SetCellValue(sheetName, "T"+rows, userRecord.CommitModelCount) - xlsx.SetCellValue(sheetName, "U"+rows, fmt.Sprintf("%.2f", userRecord.UserIndex)) + xlsx.SetCellValue(sheetName, "Q"+rows, formatTime[0:len(formatTime)-3]) + xlsx.SetCellValue(sheetName, "R"+rows, userRecord.CloudBrainTaskNum) + xlsx.SetCellValue(sheetName, "S"+rows, fmt.Sprintf("%.2f", float64(userRecord.CloudBrainRunTime)/3600)) + xlsx.SetCellValue(sheetName, "T"+rows, userRecord.CommitDatasetNum) + xlsx.SetCellValue(sheetName, "U"+rows, userRecord.CommitModelCount) formatTime = userRecord.DataDate xlsx.SetCellValue(sheetName, "V"+rows, formatTime) } @@ -243,24 +242,24 @@ func QueryUserStaticDataPage(ctx *context.Context) { "A1": ctx.Tr("user.static.id"), "B1": ctx.Tr("user.static.name"), "C1": ctx.Tr("user.static.codemergecount"), - "D1": ctx.Tr("user.static.commitcount"), - "E1": ctx.Tr("user.static.issuecount"), - "F1": ctx.Tr("user.static.commentcount"), - "G1": ctx.Tr("user.static.focusrepocount"), - "H1": ctx.Tr("user.static.starrepocount"), - "I1": ctx.Tr("user.static.logincount"), - "J1": ctx.Tr("user.static.watchedcount"), - "K1": ctx.Tr("user.static.commitcodesize"), - "L1": ctx.Tr("user.static.solveissuecount"), - "M1": ctx.Tr("user.static.encyclopediascount"), - "N1": ctx.Tr("user.static.createrepocount"), - "O1": ctx.Tr("user.static.openiindex"), - "P1": ctx.Tr("user.static.registdate"), - "Q1": ctx.Tr("user.static.CloudBrainTaskNum"), - "R1": ctx.Tr("user.static.CloudBrainRunTime"), - "S1": ctx.Tr("user.static.CommitDatasetNum"), - "T1": ctx.Tr("user.static.CommitModelCount"), - "U1": ctx.Tr("user.static.UserIndex"), + "D1": ctx.Tr("user.static.UserIndex"), + "E1": ctx.Tr("user.static.commitcount"), + "F1": ctx.Tr("user.static.issuecount"), + "G1": ctx.Tr("user.static.commentcount"), + "H1": ctx.Tr("user.static.focusrepocount"), + "I1": ctx.Tr("user.static.starrepocount"), + "J1": ctx.Tr("user.static.logincount"), + "K1": ctx.Tr("user.static.watchedcount"), + "L1": ctx.Tr("user.static.commitcodesize"), + "M1": ctx.Tr("user.static.solveissuecount"), + "N1": ctx.Tr("user.static.encyclopediascount"), + "O1": ctx.Tr("user.static.createrepocount"), + "P1": ctx.Tr("user.static.openiindex"), + "Q1": ctx.Tr("user.static.registdate"), + "R1": ctx.Tr("user.static.CloudBrainTaskNum"), + "S1": ctx.Tr("user.static.CloudBrainRunTime"), + "T1": ctx.Tr("user.static.CommitDatasetNum"), + "U1": ctx.Tr("user.static.CommitModelCount"), "V1": ctx.Tr("user.static.countdate"), } for k, v := range dataHeader { @@ -274,26 +273,26 @@ func QueryUserStaticDataPage(ctx *context.Context) { xlsx.SetCellValue(sheetName, "A"+rows, userRecord.ID) xlsx.SetCellValue(sheetName, "B"+rows, userRecord.Name) xlsx.SetCellValue(sheetName, "C"+rows, userRecord.CodeMergeCount) - xlsx.SetCellValue(sheetName, "D"+rows, userRecord.CommitCount) - xlsx.SetCellValue(sheetName, "E"+rows, userRecord.IssueCount) - xlsx.SetCellValue(sheetName, "F"+rows, userRecord.CommentCount) - xlsx.SetCellValue(sheetName, "G"+rows, userRecord.FocusRepoCount) - xlsx.SetCellValue(sheetName, "H"+rows, userRecord.StarRepoCount) - xlsx.SetCellValue(sheetName, "I"+rows, userRecord.LoginCount) - xlsx.SetCellValue(sheetName, "J"+rows, userRecord.WatchedCount) - xlsx.SetCellValue(sheetName, "K"+rows, userRecord.CommitCodeSize) - xlsx.SetCellValue(sheetName, "L"+rows, userRecord.SolveIssueCount) - xlsx.SetCellValue(sheetName, "M"+rows, userRecord.EncyclopediasCount) - xlsx.SetCellValue(sheetName, "N"+rows, userRecord.CreateRepoCount) - xlsx.SetCellValue(sheetName, "O"+rows, fmt.Sprintf("%.2f", userRecord.OpenIIndex)) + xlsx.SetCellValue(sheetName, "D"+rows, fmt.Sprintf("%.2f", userRecord.UserIndex)) + xlsx.SetCellValue(sheetName, "E"+rows, userRecord.CommitCount) + xlsx.SetCellValue(sheetName, "F"+rows, userRecord.IssueCount) + xlsx.SetCellValue(sheetName, "G"+rows, userRecord.CommentCount) + xlsx.SetCellValue(sheetName, "H"+rows, userRecord.FocusRepoCount) + xlsx.SetCellValue(sheetName, "I"+rows, userRecord.StarRepoCount) + xlsx.SetCellValue(sheetName, "J"+rows, userRecord.LoginCount) + xlsx.SetCellValue(sheetName, "K"+rows, userRecord.WatchedCount) + xlsx.SetCellValue(sheetName, "L"+rows, userRecord.CommitCodeSize) + xlsx.SetCellValue(sheetName, "M"+rows, userRecord.SolveIssueCount) + xlsx.SetCellValue(sheetName, "N"+rows, userRecord.EncyclopediasCount) + xlsx.SetCellValue(sheetName, "O"+rows, userRecord.CreateRepoCount) + xlsx.SetCellValue(sheetName, "P"+rows, fmt.Sprintf("%.2f", userRecord.OpenIIndex)) formatTime := userRecord.RegistDate.Format("2006-01-02 15:04:05") - xlsx.SetCellValue(sheetName, "P"+rows, formatTime[0:len(formatTime)-3]) - xlsx.SetCellValue(sheetName, "Q"+rows, userRecord.CloudBrainTaskNum) - xlsx.SetCellValue(sheetName, "R"+rows, fmt.Sprintf("%.2f", float64(userRecord.CloudBrainRunTime)/3600)) - xlsx.SetCellValue(sheetName, "S"+rows, userRecord.CommitDatasetNum) - xlsx.SetCellValue(sheetName, "T"+rows, userRecord.CommitModelCount) - xlsx.SetCellValue(sheetName, "U"+rows, fmt.Sprintf("%.2f", userRecord.UserIndex)) + xlsx.SetCellValue(sheetName, "Q"+rows, formatTime[0:len(formatTime)-3]) + xlsx.SetCellValue(sheetName, "R"+rows, userRecord.CloudBrainTaskNum) + xlsx.SetCellValue(sheetName, "S"+rows, fmt.Sprintf("%.2f", float64(userRecord.CloudBrainRunTime)/3600)) + xlsx.SetCellValue(sheetName, "T"+rows, userRecord.CommitDatasetNum) + xlsx.SetCellValue(sheetName, "U"+rows, userRecord.CommitModelCount) formatTime = userRecord.DataDate xlsx.SetCellValue(sheetName, "V"+rows, formatTime) } diff --git a/routers/search.go b/routers/search.go index c5655b9e1..1cf78666e 100644 --- a/routers/search.go +++ b/routers/search.go @@ -68,23 +68,23 @@ func SearchApi(ctx *context.Context) { if OnlySearchLabel { searchRepoByLabel(ctx, Key, Page, PageSize) } else { - searchRepo(ctx, "repository-es-index", Key, Page, PageSize, OnlyReturnNum) + searchRepo(ctx, "repository-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, OnlyReturnNum) } return } else if TableName == "issue" { - searchIssueOrPr(ctx, "issue-es-index", Key, Page, PageSize, OnlyReturnNum, "f") + searchIssueOrPr(ctx, "issue-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, OnlyReturnNum, "f") return } else if TableName == "user" { - searchUserOrOrg(ctx, "user-es-index", Key, Page, PageSize, true, OnlyReturnNum) + searchUserOrOrg(ctx, "user-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, true, OnlyReturnNum) return } else if TableName == "org" { - searchUserOrOrg(ctx, "user-es-index", Key, Page, PageSize, false, OnlyReturnNum) + searchUserOrOrg(ctx, "user-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, false, OnlyReturnNum) return } else if TableName == "dataset" { - searchDataSet(ctx, "dataset-es-index", Key, Page, PageSize, OnlyReturnNum) + searchDataSet(ctx, "dataset-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, OnlyReturnNum) return } else if TableName == "pr" { - searchIssueOrPr(ctx, "issue-es-index", Key, Page, PageSize, OnlyReturnNum, "t") + searchIssueOrPr(ctx, "issue-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, OnlyReturnNum, "t") //searchPR(ctx, "issue-es-index", Key, Page, PageSize, OnlyReturnNum) return } diff --git a/templates/explore/organizations.tmpl b/templates/explore/organizations.tmpl index 6d308161d..5faf039af 100644 --- a/templates/explore/organizations.tmpl +++ b/templates/explore/organizations.tmpl @@ -91,19 +91,19 @@ {{if eq $i 0}}
  • - +
  • {{else if eq $i 1}}
  • - +
  • {{else if eq $i 2}}
  • - +
  • {{else }} @@ -139,19 +139,19 @@ {{if eq $i 0}}
  • - +
  • {{else if eq $i 1}}
  • - +
  • {{else if eq $i 2}}
  • - +
  • {{else }} @@ -187,19 +187,19 @@ {{if eq $i 0}}
  • - +
  • {{else if eq $i 1}}
  • - +
  • {{else if eq $i 2}}
  • - +
  • {{else }} diff --git a/templates/org/member/members.tmpl b/templates/org/member/members.tmpl index 9c45007e5..0f862da7a 100644 --- a/templates/org/member/members.tmpl +++ b/templates/org/member/members.tmpl @@ -9,14 +9,14 @@
    {{ range .Members}}
    -
    +
    -
    +
    {{.FullName}}
    -
    +
    {{$.i18n.Tr "org.members.membership_visibility"}}
    @@ -31,7 +31,7 @@ {{end}}
    -
    +
    {{$.i18n.Tr "org.members.member_role"}}
    @@ -39,7 +39,7 @@ {{if index $.MembersIsUserOrgOwner .ID}}{{svg "octicon-shield-lock" 16}} {{$.i18n.Tr "org.members.owner"}}{{else}}{{$.i18n.Tr "org.members.member"}}{{end}}
    -
    +
    2FA
    @@ -53,7 +53,7 @@
    -
    +
    {{if eq $.SignedUser.ID .ID}}
    diff --git a/templates/org/team/teams.tmpl b/templates/org/team/teams.tmpl index bc7f5febd..cac7729cc 100644 --- a/templates/org/team/teams.tmpl +++ b/templates/org/team/teams.tmpl @@ -9,7 +9,7 @@
    -
    +
    {{range .Teams}}
    diff --git a/web_src/js/components/DataAnalysis.vue b/web_src/js/components/DataAnalysis.vue index 7b81e9b9f..186a216c2 100755 --- a/web_src/js/components/DataAnalysis.vue +++ b/web_src/js/components/DataAnalysis.vue @@ -31,7 +31,7 @@ - 云脑分析 + 云脑分析(建设中..) diff --git a/web_src/js/components/MinioUploader.vue b/web_src/js/components/MinioUploader.vue index 12edccb7c..8c33608e7 100755 --- a/web_src/js/components/MinioUploader.vue +++ b/web_src/js/components/MinioUploader.vue @@ -26,7 +26,7 @@ import qs from 'qs'; import createDropzone from '../features/dropzone.js'; const {_AppSubUrl, _StaticUrlPrefix, csrf} = window.config; -// const uploadtype = 0; +const chunkSize = 1024 * 1024 * 64; export default { props:{ @@ -137,7 +137,6 @@ export default { resetStatus() { this.progress = 0; this.status = ''; - console.log(this.uploadtype) }, updateProgress(file, progress) { console.log("progress---",progress) @@ -165,7 +164,6 @@ export default { .getElementById('datasetId') .getAttribute('datasetId'); this.resetStatus(); - console.log(this.file,!this.file?.upload) if(!this.file?.upload){ this.btnFlag = false return @@ -186,7 +184,6 @@ export default { File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice, - chunkSize = 1024 * 1024 * 64, chunks = Math.ceil(file.size / chunkSize), spark = new SparkMD5.ArrayBuffer(), fileReader = new FileReader(); @@ -327,7 +324,6 @@ export default { }, async newMultiUpload(file) { - console.log(this.uploadtype,this) const res = await axios.get('/attachments/new_multipart', { params: { totalChunkCounts: file.totalChunkCounts, @@ -348,7 +344,6 @@ export default { File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice, - chunkSize = 1024 * 1024 * 32, chunks = Math.ceil(file.size / chunkSize), fileReader = new FileReader(), time = new Date().getTime(); @@ -457,7 +452,6 @@ export default { } async function completeUpload() { - console.log(_this.uploadtype) return await axios.post( '/attachments/complete_multipart', qs.stringify({ @@ -494,7 +488,6 @@ export default { 1}/${chunks}个分片上传` ); this.progress = Math.ceil((currentChunk / chunks) * 100); - console.log("((currentChunk / chunks) * 100).toFixed(2)",((currentChunk / chunks) * 100).toFixed(2)) this.updateProgress(file, ((currentChunk / chunks) * 100).toFixed(2)); this.status = `${this.dropzoneParams.data('uploading')} ${( (currentChunk / chunks) * diff --git a/web_src/js/components/UserAnalysis.vue b/web_src/js/components/UserAnalysis.vue index 241768c15..d7a8f1f81 100755 --- a/web_src/js/components/UserAnalysis.vue +++ b/web_src/js/components/UserAnalysis.vue @@ -67,6 +67,15 @@ label="PR数" align="center"> + + + - - - +