diff --git a/README.md b/README.md index 1d9ab8d06..7c954c2e4 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@

logoAiForge - 启智AI开发协作平台

-[![release](https://img.shields.io/badge/release-1.21.11.1-blue)](https://git.openi.org.cn/OpenI/aiforge/releases/latest) +[![release](https://img.shields.io/badge/release-1.21.11.1-blue)](https://openi.pcl.ac.cn/OpenI/aiforge/releases/latest) [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) @@ -10,7 +10,7 @@ 启智AI开发协作平台是一个在线Web应用,旨在为人工智能算法、模型开发提供在线协同工作环境,它提供了代码托管、数据集管理与共享、免费云端算力资源支持(GPU/NPU)、共享镜像等功能。 -[启智AI开发协作平台](https://git.openi.org.cn) 是使用本项目构建的在线服务,您可以直接点击链接访问试用。 +[启智AI开发协作平台](https://openi.pcl.ac.cn) 是使用本项目构建的在线服务,您可以直接点击链接访问试用。 本项目是基于[Gitea](https://github.com/go-gitea/gitea)发展而来的,我们对其进行了Fork并基于此扩展了人工智能开发中需要的功能,如数据集管理和模型训练等。对于和代码托管相关的功能,您可以参考[Gitea的文档](https://docs.gitea.io/zh-cn/)。 @@ -20,7 +20,7 @@ 后端服务涵盖了AI模型开发流水线,包括代码协同开发、数据管理、模型调试、训练、推理和部署等(*目前尚未支持模型部署*)。在不同的开发阶段,我们还将提供丰富的开发工具供用户使用,如数据标注、数据筛选、模型转换、模型压缩、代码检测等。我们也欢迎社区提供更多丰富的工具接入,提高利用平台进行开发的效率。 ![系统架构图](assets/架构图.png) ## 在线服务使用 -本项目的在线服务平台的详细使用帮助文档,可参阅本项目[百科](https://git.openi.org.cn/OpenI/aiforge/wiki)内容。 +本项目的在线服务平台的详细使用帮助文档,可参阅本项目[百科](https://openi.pcl.ac.cn/OpenI/aiforge/wiki)内容。 - 如何创建账号 - 如何创建组织及管理成员权限 - 如何创建项目仓库 @@ -39,22 +39,22 @@ [从源代码安装说明](https://docs.gitea.io/zh-cn/install-from-source/) ## 授权许可 -本项目采用 MIT 开源授权许可证,完整的授权说明已放置在 [LICENSE](https://git.openi.org.cn/OpenI/aiforge/src/branch/develop/LICENSE) 文件中。 +本项目采用 MIT 开源授权许可证,完整的授权说明已放置在 [LICENSE](https://openi.pcl.ac.cn/OpenI/aiforge/src/branch/develop/LICENSE) 文件中。 ## 需要帮助? 如果您在使用或者开发过程中遇到问题,可以在以下渠道咨询: - - 点击[这里](https://git.openi.org.cn/OpenI/aiforge/issues)在线提交问题(点击页面右上角绿色按钮**创建任务**) + - 点击[这里](https://openi.pcl.ac.cn/OpenI/aiforge/issues)在线提交问题(点击页面右上角绿色按钮**创建任务**) - 加入微信群实时交流,获得进一步的支持 - + ## 启智社区小白训练营: -- 结合案例给大家详细讲解如何使用社区平台,帮助无技术背景的小白成长为启智社区达人 (https://git.openi.org.cn/zeizei/OpenI_Learning) +- 结合案例给大家详细讲解如何使用社区平台,帮助无技术背景的小白成长为启智社区达人 (https://openi.pcl.ac.cn/zeizei/OpenI_Learning) ## 平台引用 如果本平台对您的科研工作提供了帮助,可在论文致谢中加入: -英文版:```Thanks for the support provided by OpenI Community (https://git.openi.org.cn).``` -中文版:```感谢启智社区提供的技术支持(https://git.openi.org.cn)。``` +英文版:```Thanks for the support provided by OpenI Community (https://openi.pcl.ac.cn).``` +中文版:```感谢启智社区提供的技术支持(https://openi.pcl.ac.cn)。``` 如果您的成果中引用了本平台,也欢迎在下述开源项目中提交您的成果信息: -https://git.openi.org.cn/OpenIOSSG/references +https://openi.pcl.ac.cn/OpenIOSSG/references diff --git a/models/ai_model_manage.go b/models/ai_model_manage.go index 0d754b0ba..5b14b9ba2 100644 --- a/models/ai_model_manage.go +++ b/models/ai_model_manage.go @@ -14,6 +14,7 @@ import ( type AiModelManage struct { ID string `xorm:"pk" json:"id"` Name string `xorm:"INDEX NOT NULL" json:"name"` + ModelType int `xorm:"NULL" json:"modelType"` Version string `xorm:"NOT NULL" json:"version"` VersionCount int `xorm:"NOT NULL DEFAULT 0" json:"versionCount"` New int `xorm:"NOT NULL" json:"new"` @@ -287,6 +288,37 @@ func ModifyModelDescription(id string, description string) error { return nil } +func ModifyLocalModel(id string, name, label, description string, engine int) error { + var sess *xorm.Session + sess = x.ID(id) + defer sess.Close() + re, err := sess.Cols("name", "label", "description", "engine").Update(&AiModelManage{ + Description: description, + Name: name, + Label: label, + Engine: int64(engine), + }) + if err != nil { + return err + } + log.Info("success to update description from db.re=" + fmt.Sprint((re))) + return nil +} + +func ModifyModelSize(id string, size int64) error { + var sess *xorm.Session + sess = x.ID(id) + defer sess.Close() + re, err := sess.Cols("size").Update(&AiModelManage{ + Size: size, + }) + if err != nil { + return err + } + log.Info("success to update size from db.re=" + fmt.Sprint((re))) + return nil +} + func ModifyModelStatus(id string, modelSize int64, status int, modelPath string, statusDesc string) error { var sess *xorm.Session sess = x.ID(id) diff --git a/models/attachment.go b/models/attachment.go index 2b5fa8efc..2b747db21 100755 --- a/models/attachment.go +++ b/models/attachment.go @@ -134,7 +134,8 @@ func (a *Attachment) S3DownloadURL() string { if a.Type == TypeCloudBrainOne { url, _ = storage.Attachments.PresignedGetURL(setting.Attachment.Minio.BasePath+AttachmentRelativePath(a.UUID), a.Name) } else if a.Type == TypeCloudBrainTwo { - url, _ = storage.ObsGetPreSignedUrl(a.UUID, a.Name) + objectName := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(a.UUID[0:1], a.UUID[1:2], a.UUID, a.Name)), "/") + url, _ = storage.ObsGetPreSignedUrl(objectName, a.Name) } return url @@ -550,7 +551,6 @@ func AttachmentsByDatasetOption(datasets []int64, opts *SearchDatasetOptions) ([ ) } - attachments := make([]*Attachment, 0) if err := sess.Table(&Attachment{}).Where(cond).Desc("id"). Find(&attachments); err != nil { diff --git a/models/cloudbrain.go b/models/cloudbrain.go index dacb1b03a..cdd9698fe 100755 --- a/models/cloudbrain.go +++ b/models/cloudbrain.go @@ -291,12 +291,30 @@ func (task *Cloudbrain) IsRunning() bool { status == string(JobRunning) || status == GrampusStatusRunning } +func (task *Cloudbrain) IsUserHasRight(user *User) bool { + if user == nil { + return false + } + return user.IsAdmin || user.ID == task.UserID +} + func ConvertDurationToStr(duration int64) string { if duration <= 0 { return DURATION_STR_ZERO } return util.AddZero(duration/3600) + ":" + util.AddZero(duration%3600/60) + ":" + util.AddZero(duration%60) } +func ConvertStrToDuration(trainJobDuration string) int64 { + trainJobDurationList := strings.Split(trainJobDuration, ":") + if len(trainJobDurationList) == 3 { + i, _ := strconv.ParseInt(trainJobDurationList[0], 10, 64) + j, _ := strconv.ParseInt(trainJobDurationList[1], 10, 64) + k, _ := strconv.ParseInt(trainJobDurationList[2], 10, 64) + return i*3600 + j*60 + k + } else { + return 0 + } +} func IsTrainJobTerminal(status string) bool { return status == string(ModelArtsTrainJobCompleted) || status == string(ModelArtsTrainJobFailed) || status == string(ModelArtsTrainJobKilled) || status == GrampusStatusFailed || status == GrampusStatusStopped || status == GrampusStatusSucceeded @@ -1589,9 +1607,23 @@ func Cloudbrains(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int64, error) { } } if (opts.AiCenter) != "" { - cond = cond.And( - builder.Like{"cloudbrain.ai_center", opts.AiCenter}, - ) + if opts.AiCenter == AICenterOfCloudBrainOne { + cond = cond.And( + builder.Eq{"cloudbrain.type": TypeCloudBrainOne}, + ) + } else if opts.AiCenter == AICenterOfCloudBrainTwo { + cond = cond.And( + builder.Eq{"cloudbrain.type": TypeCloudBrainTwo}, + ) + } else if opts.AiCenter == AICenterOfChengdu { + cond = cond.And( + builder.Eq{"cloudbrain.type": TypeCDCenter}, + ) + } else { + cond = cond.And( + builder.Like{"cloudbrain.ai_center", opts.AiCenter}, + ) + } } if (opts.Cluster) != "" { if opts.Cluster == "resource_cluster_openi" { @@ -1968,7 +2000,7 @@ func UpdateTrainJobVersion(job *Cloudbrain) error { func updateJobTrainVersion(e Engine, job *Cloudbrain) error { var sess *xorm.Session sess = e.Where("job_id = ? AND version_name=?", job.JobID, job.VersionName) - _, err := sess.Cols("status", "train_job_duration", "duration", "start_time", "end_time", "created_unix").Update(job) + _, err := sess.Cols("status", "train_job_duration", "duration", "start_time", "end_time", "created_unix", "ai_center").Update(job) return err } @@ -2030,10 +2062,17 @@ func GetStoppedJobWithNoStartTimeEndTime() ([]*Cloudbrain, error) { cloudbrains := make([]*Cloudbrain, 0) return cloudbrains, x.SQL("select * from cloudbrain where status in (?,?,?,?,?,?,?) and (start_time is null or end_time is null) limit 100", ModelArtsTrainJobCompleted, ModelArtsTrainJobFailed, ModelArtsTrainJobKilled, ModelArtsStopped, JobStopped, JobFailed, JobSucceeded).Find(&cloudbrains) } +func GetC2NetWithAiCenterWrongJob() ([]*Cloudbrain, error) { + cloudbrains := make([]*Cloudbrain, 0) + return cloudbrains, x. + In("status", ModelArtsTrainJobCompleted, ModelArtsTrainJobFailed, ModelArtsTrainJobKilled, ModelArtsStopped, JobStopped, JobFailed, JobSucceeded). + Where("type = ?", TypeC2Net). + Find(&cloudbrains) +} func GetModelSafetyTestTask() ([]*Cloudbrain, error) { cloudbrains := make([]*Cloudbrain, 0) - sess := x.Where("job_type = ?", string(JobTypeModelSafety)) + sess := x.Where("job_type=?", string(JobTypeModelSafety)) err := sess.Find(&cloudbrains) return cloudbrains, err } @@ -2285,10 +2324,23 @@ func CloudbrainAllStatic(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int64, er } // sess.OrderBy("cloudbrain.created_unix DESC") cloudbrains := make([]*CloudbrainInfo, 0, setting.UI.IssuePagingNum) - if err := sess.Cols("status", "type", "job_type", "train_job_duration", "duration", "compute_resource", "created_unix", "start_time", "end_time", "work_server_number").Table(&Cloudbrain{}).Unscoped().Where(cond). + if err := sess.Table(&Cloudbrain{}).Unscoped().Where(cond). Find(&cloudbrains); err != nil { return nil, 0, fmt.Errorf("Find: %v", err) } + if opts.NeedRepoInfo { + var ids []int64 + for _, task := range cloudbrains { + ids = append(ids, task.RepoID) + } + repositoryMap, err := GetRepositoriesMapByIDs(ids) + if err == nil { + for _, task := range cloudbrains { + task.Repo = repositoryMap[task.RepoID] + } + } + + } return cloudbrains, count, nil } diff --git a/models/cloudbrain_static.go b/models/cloudbrain_static.go index 19e55fb6d..a7678b267 100644 --- a/models/cloudbrain_static.go +++ b/models/cloudbrain_static.go @@ -1,7 +1,6 @@ package models import ( - "fmt" "strconv" "time" @@ -42,14 +41,14 @@ type TaskDetail struct { type CloudbrainDurationStatistic struct { ID int64 `xorm:"pk autoincr"` Cluster string - AiCenterCode string + AiCenterCode string `xorm:"INDEX"` AiCenterName string ComputeResource string - AccCardType string + AccCardType string `xorm:"INDEX"` - DateTime string - DayTime string - HourTime int + DateTime timeutil.TimeStamp `xorm:"INDEX"` + DayTime string `xorm:"INDEX"` + HourTime int `xorm:"INDEX"` CardsUseDuration int CardsTotalDuration int CardsTotalNum int @@ -275,11 +274,15 @@ func GetCloudbrainByTime(beginTime int64, endTime int64) ([]*CloudbrainInfo, err sess := x.NewSession() defer sess.Close() var cond = builder.NewCond() - cond = cond.And( - builder.And(builder.Gte{"cloudbrain.end_time": beginTime}, builder.Lte{"cloudbrain.end_time": endTime}), + sess.Exec("if ") + cond = cond.Or( + builder.And(builder.Gte{"cloudbrain.end_time": beginTime}, builder.Lte{"cloudbrain.start_time": beginTime}, builder.Gt{"cloudbrain.start_time": 0}), ) cond = cond.Or( - builder.Eq{"cloudbrain.status": string(JobRunning)}, + builder.And(builder.Gte{"cloudbrain.start_time": beginTime}, builder.Lte{"cloudbrain.start_time": endTime}, builder.Gt{"cloudbrain.start_time": 0}), + ) + cond = cond.Or( + builder.And(builder.Eq{"cloudbrain.status": string(JobRunning)}), ) sess.OrderBy("cloudbrain.created_unix ASC") cloudbrains := make([]*CloudbrainInfo, 0, 10) @@ -309,30 +312,20 @@ func InsertCloudbrainDurationStatistic(cloudbrainDurationStatistic *CloudbrainDu return xStatistic.Insert(cloudbrainDurationStatistic) } -func DeleteCloudbrainDurationStatisticHour(date string, hour int, aiCenterCode string, accCardType string) error { - sess := xStatistic.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return fmt.Errorf("Begin: %v", err) - } - - if _, err := sess.Where("day_time = ? AND hour_time = ? AND ai_center_code = ? AND acc_card_type = ?", date, hour, aiCenterCode, accCardType).Delete(&CloudbrainDurationStatistic{}); err != nil { - return fmt.Errorf("Delete: %v", err) +func getDurationStatistic(cb *CloudbrainDurationStatistic) (*CloudbrainDurationStatistic, error) { + has, err := x.Get(cb) + if err != nil { + return nil, err + } else if !has { + return nil, ErrJobNotExist{} } - - if err := sess.Commit(); err != nil { - sess.Close() - return fmt.Errorf("Commit: %v", err) - } - - sess.Close() - return nil + return cb, nil } func GetCanUseCardInfo() ([]*ResourceQueue, error) { sess := x.NewSession() defer sess.Close() - sess.OrderBy("resource_queue.id ASC") + sess.OrderBy("resource_queue.cluster DESC, resource_queue.ai_center_code ASC") ResourceQueues := make([]*ResourceQueue, 0, 10) if err := sess.Table(&ResourceQueue{}).Find(&ResourceQueues); err != nil { log.Info("find error.") @@ -346,7 +339,7 @@ func GetCardDurationStatistics(opts *DurationStatisticOptions) ([]*CloudbrainDur var cond = builder.NewCond() if opts.BeginTime.Unix() > 0 && opts.EndTime.Unix() > 0 { cond = cond.And( - builder.And(builder.Gte{"cloudbrain_duration_statistic.created_unix": opts.BeginTime.Unix()}, builder.Lte{"cloudbrain_duration_statistic.created_unix": opts.EndTime.Unix()}), + builder.And(builder.Gte{"cloudbrain_duration_statistic.date_time": opts.BeginTime.Unix()}, builder.Lt{"cloudbrain_duration_statistic.date_time": opts.EndTime.Unix()}), ) } if opts.AiCenterCode != "" { @@ -365,10 +358,31 @@ func GetCardDurationStatistics(opts *DurationStatisticOptions) ([]*CloudbrainDur func GetDurationRecordBeginTime() ([]*CloudbrainDurationStatistic, error) { sess := xStatistic.NewSession() defer sess.Close() - sess.OrderBy("cloudbrain_duration_statistic.id ASC limit 1") + sess.OrderBy("cloudbrain_duration_statistic.date_time ASC limit 1") CloudbrainDurationStatistics := make([]*CloudbrainDurationStatistic, 0) if err := sess.Table(&CloudbrainDurationStatistic{}).Find(&CloudbrainDurationStatistics); err != nil { log.Info("find error.") } return CloudbrainDurationStatistics, nil } + +func GetDurationRecordUpdateTime() ([]*CloudbrainDurationStatistic, error) { + sess := xStatistic.NewSession() + defer sess.Close() + sess.OrderBy("cloudbrain_duration_statistic.date_time DESC limit 1") + CloudbrainDurationStatistics := make([]*CloudbrainDurationStatistic, 0) + if err := sess.Table(&CloudbrainDurationStatistic{}).Find(&CloudbrainDurationStatistics); err != nil { + log.Info("find error.") + } + return CloudbrainDurationStatistics, nil +} + +func DeleteCloudbrainDurationStatistic(beginTime timeutil.TimeStamp, endTime timeutil.TimeStamp) error { + sess := xStatistic.NewSession() + defer sess.Close() + if _, err := sess.Exec("DELETE FROM cloudbrain_duration_statistic WHERE cloudbrain_duration_statistic.date_time BETWEEN ? AND ?", beginTime, endTime); err != nil { + log.Info("DELETE cloudbrain_duration_statistic data error.") + return err + } + return nil +} diff --git a/models/file_chunk.go b/models/file_chunk.go index 0fc3a8879..cad7746b7 100755 --- a/models/file_chunk.go +++ b/models/file_chunk.go @@ -28,6 +28,23 @@ type FileChunk struct { UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` } +type ModelFileChunk struct { + ID int64 `xorm:"pk autoincr"` + UUID string `xorm:"INDEX"` + Md5 string `xorm:"INDEX"` + ModelUUID string `xorm:"INDEX"` + ObjectName string `xorm:"DEFAULT ''"` + IsUploaded int `xorm:"DEFAULT 0"` // not uploaded: 0, uploaded: 1 + UploadID string `xorm:"UNIQUE"` //minio upload id + TotalChunks int + Size int64 + UserID int64 `xorm:"INDEX"` + Type int `xorm:"INDEX DEFAULT 0"` + CompletedParts []string `xorm:"DEFAULT ''"` // chunkNumber+etag eg: ,1-asqwewqe21312312.2-123hjkas + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` +} + // GetFileChunkByMD5 returns fileChunk by given id func GetFileChunkByMD5(md5 string) (*FileChunk, error) { return getFileChunkByMD5(x, md5) @@ -49,6 +66,21 @@ func GetFileChunkByMD5AndUser(md5 string, userID int64, typeCloudBrain int) (*Fi return getFileChunkByMD5AndUser(x, md5, userID, typeCloudBrain) } +func GetModelFileChunkByMD5AndUser(md5 string, userID int64, typeCloudBrain int, uuid string) (*ModelFileChunk, error) { + return getModelFileChunkByMD5AndUser(x, md5, userID, typeCloudBrain, uuid) +} + +func getModelFileChunkByMD5AndUser(e Engine, md5 string, userID int64, typeCloudBrain int, uuid string) (*ModelFileChunk, error) { + fileChunk := new(ModelFileChunk) + + if has, err := e.Where("md5 = ? and user_id = ? and type = ? and model_uuid= ?", md5, userID, typeCloudBrain, uuid).Get(fileChunk); err != nil { + return nil, err + } else if !has { + return nil, ErrFileChunkNotExist{md5, ""} + } + return fileChunk, nil +} + func getFileChunkByMD5AndUser(e Engine, md5 string, userID int64, typeCloudBrain int) (*FileChunk, error) { fileChunk := new(FileChunk) @@ -76,6 +108,21 @@ func getFileChunkByUUID(e Engine, uuid string) (*FileChunk, error) { return fileChunk, nil } +func GetModelFileChunkByUUID(uuid string) (*ModelFileChunk, error) { + return getModelFileChunkByUUID(x, uuid) +} + +func getModelFileChunkByUUID(e Engine, uuid string) (*ModelFileChunk, error) { + fileChunk := new(ModelFileChunk) + + if has, err := e.Where("uuid = ?", uuid).Get(fileChunk); err != nil { + return nil, err + } else if !has { + return nil, ErrFileChunkNotExist{"", uuid} + } + return fileChunk, nil +} + // InsertFileChunk insert a record into file_chunk. func InsertFileChunk(fileChunk *FileChunk) (_ *FileChunk, err error) { if _, err := x.Insert(fileChunk); err != nil { @@ -85,6 +132,14 @@ func InsertFileChunk(fileChunk *FileChunk) (_ *FileChunk, err error) { return fileChunk, nil } +// InsertFileChunk insert a record into file_chunk. +func InsertModelFileChunk(fileChunk *ModelFileChunk) (_ *ModelFileChunk, err error) { + if _, err := x.Insert(fileChunk); err != nil { + return nil, err + } + return fileChunk, nil +} + func DeleteFileChunkById(uuid string) (*FileChunk, error) { return deleteFileChunkById(x, uuid) } @@ -106,6 +161,17 @@ func deleteFileChunkById(e Engine, uuid string) (*FileChunk, error) { } } +func UpdateModelFileChunk(fileChunk *ModelFileChunk) error { + return updateModelFileChunk(x, fileChunk) +} + +func updateModelFileChunk(e Engine, fileChunk *ModelFileChunk) error { + var sess *xorm.Session + sess = e.Where("uuid = ?", fileChunk.UUID) + _, err := sess.Cols("is_uploaded").Update(fileChunk) + return err +} + // UpdateFileChunk updates the given file_chunk in database func UpdateFileChunk(fileChunk *FileChunk) error { return updateFileChunk(x, fileChunk) @@ -127,3 +193,12 @@ func deleteFileChunk(e Engine, fileChunk *FileChunk) error { _, err := e.ID(fileChunk.ID).Delete(fileChunk) return err } + +func DeleteModelFileChunk(fileChunk *ModelFileChunk) error { + return deleteModelFileChunk(x, fileChunk) +} + +func deleteModelFileChunk(e Engine, fileChunk *ModelFileChunk) error { + _, err := e.ID(fileChunk.ID).Delete(fileChunk) + return err +} diff --git a/models/models.go b/models/models.go index a4ec43f43..6427c576c 100755 --- a/models/models.go +++ b/models/models.go @@ -136,6 +136,7 @@ func init() { new(ImageTopic), new(ImageTopicRelation), new(FileChunk), + new(ModelFileChunk), new(BlockChain), new(RecommendOrg), new(AiModelManage), @@ -185,6 +186,7 @@ func init() { new(UserAnalysisPara), new(Invitation), new(CloudbrainDurationStatistic), + new(UserSummaryCurrentYear), ) gonicNames := []string{"SSL", "UID"} diff --git a/models/resource_queue.go b/models/resource_queue.go index fc0dd8cb5..f54be572e 100644 --- a/models/resource_queue.go +++ b/models/resource_queue.go @@ -143,6 +143,9 @@ func InsertResourceQueue(queue ResourceQueue) (int64, error) { func UpdateResourceQueueById(queueId int64, queue ResourceQueue) (int64, error) { return x.ID(queueId).Update(&queue) } +func UpdateResourceCardsTotalNum(queueId int64, queue ResourceQueue) (int64, error) { + return x.ID(queueId).Cols("cards_total_num", "remark").Update(&queue) +} func SearchResourceQueue(opts SearchResourceQueueOptions) (int64, []ResourceQueue, error) { var cond = builder.NewCond() @@ -313,9 +316,6 @@ func SyncGrampusQueues(updateList []ResourceQueue, insertList []ResourceQueue, e if _, err = sess.In("id", deleteSpcIds).Update(&ResourceSpecification{Status: SpecOffShelf}); err != nil { return err } - if _, err = sess.In("spec_id", deleteSpcIds).Delete(&ResourceSceneSpec{}); err != nil { - return err - } } } diff --git a/models/resource_scene.go b/models/resource_scene.go index 0c921b578..0117535ea 100644 --- a/models/resource_scene.go +++ b/models/resource_scene.go @@ -116,7 +116,7 @@ func InsertResourceScene(r ResourceSceneReq) error { //check specs := make([]ResourceSpecification, 0) - cond := builder.In("id", r.SpecIds).And(builder.Eq{"status": SpecOnShelf}) + cond := builder.In("id", r.SpecIds) if err := sess.Where(cond).Find(&specs); err != nil { return err } @@ -175,7 +175,7 @@ func UpdateResourceScene(r ResourceSceneReq) error { } //check specification specs := make([]ResourceSpecification, 0) - cond := builder.In("id", r.SpecIds).And(builder.Eq{"status": SpecOnShelf}) + cond := builder.In("id", r.SpecIds) if err := sess.Where(cond).Find(&specs); err != nil { return err } diff --git a/models/resource_specification.go b/models/resource_specification.go index e04cc6ed9..2f815818b 100644 --- a/models/resource_specification.go +++ b/models/resource_specification.go @@ -12,6 +12,13 @@ const ( SpecOffShelf ) +type SearchSpecOrderBy int + +const ( + SearchSpecOrderById SearchSpecOrderBy = iota + SearchSpecOrder4Standard +) + type ResourceSpecification struct { ID int64 `xorm:"pk autoincr"` QueueId int64 `xorm:"INDEX"` @@ -85,6 +92,7 @@ type SearchResourceSpecificationOptions struct { Status int Cluster string AvailableCode int + OrderBy SearchSpecOrderBy } type SearchResourceBriefSpecificationOptions struct { @@ -168,6 +176,7 @@ type FindSpecsOptions struct { UseShareMemGiB bool //if true,find specs no matter used or not used in scene. if false,only find specs used in scene RequestAll bool + SpecStatus int } type Specification struct { @@ -232,10 +241,18 @@ func SearchResourceSpecification(opts SearchResourceSpecificationOptions) (int64 return 0, nil, err } + var orderby = "" + switch opts.OrderBy { + case SearchSpecOrder4Standard: + orderby = "resource_queue.compute_resource asc,resource_queue.acc_card_type asc,resource_specification.acc_cards_num asc,resource_specification.cpu_cores asc,resource_specification.mem_gi_b asc,resource_specification.share_mem_gi_b asc" + default: + orderby = "resource_specification.id desc" + } + r := make([]ResourceSpecAndQueue, 0) err = x.Where(cond). Join("INNER", "resource_queue", "resource_queue.ID = resource_specification.queue_id"). - Desc("resource_specification.id"). + OrderBy(orderby). Limit(opts.PageSize, (opts.Page-1)*opts.PageSize). Unscoped().Find(&r) if err != nil { @@ -269,10 +286,6 @@ func ResourceSpecOffShelf(id int64) (int64, error) { } sess.Close() }() - //delete scene spec relation - if _, err = sess.Where("spec_id = ?", id).Delete(&ResourceSceneSpec{}); err != nil { - return 0, err - } param := ResourceSpecification{ Status: SpecOffShelf, @@ -326,9 +339,6 @@ func SyncGrampusSpecs(updateList []ResourceSpecification, insertList []ResourceS if _, err = sess.Cols("status", "is_available").In("id", deleteIds).Update(&ResourceSpecification{Status: SpecOffShelf, IsAvailable: false}); err != nil { return err } - if _, err = sess.In("spec_id", deleteIds).Delete(&ResourceSceneSpec{}); err != nil { - return err - } } //update exists specs @@ -393,6 +403,9 @@ func FindSpecs(opts FindSpecsOptions) ([]*Specification, error) { if opts.UseShareMemGiB { cond = cond.And(builder.Eq{"resource_specification.share_mem_gi_b": opts.ShareMemGiB}) } + if opts.SpecStatus > 0 { + cond = cond.And(builder.Eq{"resource_specification.status": opts.SpecStatus}) + } r := make([]*Specification, 0) s := x.Where(cond). Join("INNER", "resource_queue", "resource_queue.id = resource_specification.queue_id") diff --git a/models/user_business_analysis.go b/models/user_business_analysis.go index e99927e18..394c24825 100644 --- a/models/user_business_analysis.go +++ b/models/user_business_analysis.go @@ -3,12 +3,15 @@ package models import ( "encoding/json" "fmt" + "io/ioutil" + "net/http" "sort" "strconv" "strings" "time" "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/timeutil" "xorm.io/builder" "xorm.io/xorm" @@ -19,185 +22,6 @@ const ( BATCH_INSERT_SIZE = 50 ) -type UserBusinessAnalysisAll struct { - ID int64 `xorm:"pk"` - - CountDate int64 `xorm:"pk"` - - //action :ActionMergePullRequest // 11 - CodeMergeCount int `xorm:"NOT NULL DEFAULT 0"` - - //action :ActionCommitRepo // 5 - CommitCount int `xorm:"NOT NULL DEFAULT 0"` - - //action :ActionCreateIssue // 10 - IssueCount int `xorm:"NOT NULL DEFAULT 0"` - - //comment table current date - CommentCount int `xorm:"NOT NULL DEFAULT 0"` - - //watch table current date - FocusRepoCount int `xorm:"NOT NULL DEFAULT 0"` - - //star table current date - StarRepoCount int `xorm:"NOT NULL DEFAULT 0"` - - //follow table - WatchedCount int `xorm:"NOT NULL DEFAULT 0"` - - // user table - GiteaAgeMonth int `xorm:"NOT NULL DEFAULT 0"` - - // - CommitCodeSize int `xorm:"NOT NULL DEFAULT 0"` - - //attachement table - CommitDatasetSize int `xorm:"NOT NULL DEFAULT 0"` - - //0 - CommitModelCount int `xorm:"NOT NULL DEFAULT 0"` - - //issue, issueassignees - SolveIssueCount int `xorm:"NOT NULL DEFAULT 0"` - - //baike - EncyclopediasCount int `xorm:"NOT NULL DEFAULT 0"` - - //user - RegistDate timeutil.TimeStamp `xorm:"NOT NULL"` - - //repo - CreateRepoCount int `xorm:"NOT NULL DEFAULT 0"` - - //login count, from elk - LoginCount int `xorm:"NOT NULL DEFAULT 0"` - - //openi index - OpenIIndex float64 `xorm:"NOT NULL DEFAULT 0"` - - //user - Email string `xorm:"NOT NULL"` - - //user - Name string `xorm:"NOT NULL"` - - DataDate string `xorm:"NULL"` - - //cloudbraintask - CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` - GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` - NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` - GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` - NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` - NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` - GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` - CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` - CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` - UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` - UserIndexPrimitive float64 `xorm:"NOT NULL DEFAULT 0"` - - UserLocation string `xorm:"NULL"` - - FocusOtherUser int `xorm:"NOT NULL DEFAULT 0"` - CollectDataset int `xorm:"NOT NULL DEFAULT 0"` - CollectedDataset int `xorm:"NOT NULL DEFAULT 0"` - RecommendDataset int `xorm:"NOT NULL DEFAULT 0"` - CollectImage int `xorm:"NOT NULL DEFAULT 0"` - CollectedImage int `xorm:"NOT NULL DEFAULT 0"` - RecommendImage int `xorm:"NOT NULL DEFAULT 0"` - - Phone string `xorm:"NULL"` - InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"` -} - -type UserBusinessAnalysis struct { - ID int64 `xorm:"pk"` - DataDate string `xorm:"pk"` - CountDate int64 `xorm:"NULL"` - - //action :ActionMergePullRequest // 11 - CodeMergeCount int `xorm:"NOT NULL DEFAULT 0"` - - //action :ActionCommitRepo // 5 - CommitCount int `xorm:"NOT NULL DEFAULT 0"` - - //action :ActionCreateIssue // 6 - IssueCount int `xorm:"NOT NULL DEFAULT 0"` - - //comment table current date - CommentCount int `xorm:"NOT NULL DEFAULT 0"` - - //watch table current date - FocusRepoCount int `xorm:"NOT NULL DEFAULT 0"` - - //star table current date - StarRepoCount int `xorm:"NOT NULL DEFAULT 0"` - - //follow table - WatchedCount int `xorm:"NOT NULL DEFAULT 0"` - - // user table - GiteaAgeMonth int `xorm:"NOT NULL DEFAULT 0"` - - // - CommitCodeSize int `xorm:"NOT NULL DEFAULT 0"` - - //attachement table - CommitDatasetSize int `xorm:"NOT NULL DEFAULT 0"` - - //0 - CommitModelCount int `xorm:"NOT NULL DEFAULT 0"` - - //issue, issueassignees - SolveIssueCount int `xorm:"NOT NULL DEFAULT 0"` - - //baike - EncyclopediasCount int `xorm:"NOT NULL DEFAULT 0"` - - //user - RegistDate timeutil.TimeStamp `xorm:"NOT NULL"` - - //repo - CreateRepoCount int `xorm:"NOT NULL DEFAULT 0"` - - //login count, from elk - LoginCount int `xorm:"NOT NULL DEFAULT 0"` - - //openi index - OpenIIndex float64 `xorm:"NOT NULL DEFAULT 0"` - - //user - Email string `xorm:"NOT NULL"` - - //user - Name string `xorm:"NOT NULL"` - - CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` - GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` - NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` - GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` - NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` - NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` - GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` - CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` - CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` - UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` - UserIndexPrimitive float64 `xorm:"NOT NULL DEFAULT 0"` - - UserLocation string `xorm:"NULL"` - - FocusOtherUser int `xorm:"NOT NULL DEFAULT 0"` - CollectDataset int `xorm:"NOT NULL DEFAULT 0"` - CollectedDataset int `xorm:"NOT NULL DEFAULT 0"` - RecommendDataset int `xorm:"NOT NULL DEFAULT 0"` - CollectImage int `xorm:"NOT NULL DEFAULT 0"` - CollectedImage int `xorm:"NOT NULL DEFAULT 0"` - RecommendImage int `xorm:"NOT NULL DEFAULT 0"` - - Phone string `xorm:"NULL"` - InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"` -} - type UserBusinessAnalysisQueryOptions struct { ListOptions UserName string @@ -499,7 +323,7 @@ func QueryUserStaticDataForUserDefine(opts *UserBusinessAnalysisQueryOptions, wi DataDate := currentTimeNow.Format("2006-01-02 15:04") CodeMergeCountMap := queryPullRequest(start_unix, end_unix) - CommitCountMap := queryCommitAction(start_unix, end_unix, 5) + CommitCountMap, _ := queryCommitAction(start_unix, end_unix, 5) IssueCountMap := queryCreateIssue(start_unix, end_unix) CommentCountMap := queryComment(start_unix, end_unix) @@ -517,16 +341,16 @@ func QueryUserStaticDataForUserDefine(opts *UserBusinessAnalysisQueryOptions, wi CommitCodeSizeMapJson, _ := json.Marshal(CommitCodeSizeMap) log.Info("CommitCodeSizeMapJson=" + string(CommitCodeSizeMapJson)) } - CommitDatasetSizeMap, CommitDatasetNumMap := queryDatasetSize(start_unix, end_unix) + CommitDatasetSizeMap, CommitDatasetNumMap, _ := queryDatasetSize(start_unix, end_unix) SolveIssueCountMap := querySolveIssue(start_unix, end_unix) - CreateRepoCountMap := queryUserCreateRepo(start_unix, end_unix) + CreateRepoCountMap, _, _ := queryUserCreateRepo(start_unix, end_unix) LoginCountMap := queryLoginCount(start_unix, end_unix) OpenIIndexMap := queryUserRepoOpenIIndex(start_unix, end_unix) CloudBrainTaskMap, CloudBrainTaskItemMap := queryCloudBrainTask(start_unix, end_unix) AiModelManageMap := queryUserModel(start_unix, end_unix) CollectDataset, CollectedDataset := queryDatasetStars(start_unix, end_unix) - RecommendDataset := queryRecommedDataSet(start_unix, end_unix) + RecommendDataset, _ := queryRecommedDataSet(start_unix, end_unix) CollectImage, CollectedImage := queryImageStars(start_unix, end_unix) RecommendImage := queryRecommedImage(start_unix, end_unix) @@ -752,7 +576,7 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS startTime := currentTimeNow.AddDate(0, 0, -1) CodeMergeCountMap := queryPullRequest(start_unix, end_unix) - CommitCountMap := queryCommitAction(start_unix, end_unix, 5) + CommitCountMap, mostActiveMap := queryCommitAction(start_unix, end_unix, 5) IssueCountMap := queryCreateIssue(start_unix, end_unix) CommentCountMap := queryComment(start_unix, end_unix) @@ -764,13 +588,13 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS log.Info("query commit code errr.") } else { log.Info("query commit code size, len=" + fmt.Sprint(len(CommitCodeSizeMap))) - CommitCodeSizeMapJson, _ := json.Marshal(CommitCodeSizeMap) - log.Info("CommitCodeSizeMapJson=" + string(CommitCodeSizeMapJson)) + //CommitCodeSizeMapJson, _ := json.Marshal(CommitCodeSizeMap) + //log.Info("CommitCodeSizeMapJson=" + string(CommitCodeSizeMapJson)) } //CommitCodeSizeMap := queryCommitCodeSize(StartTimeNextDay.Unix(), EndTimeNextDay.Unix()) - CommitDatasetSizeMap, CommitDatasetNumMap := queryDatasetSize(start_unix, end_unix) + CommitDatasetSizeMap, CommitDatasetNumMap, dataSetDownloadMap := queryDatasetSize(start_unix, end_unix) SolveIssueCountMap := querySolveIssue(start_unix, end_unix) - CreateRepoCountMap := queryUserCreateRepo(start_unix, end_unix) + CreateRepoCountMap, DetailInfoMap, MostDownloadMap := queryUserCreateRepo(start_unix, end_unix) LoginCountMap := queryLoginCount(start_unix, end_unix) OpenIIndexMap := queryUserRepoOpenIIndex(startTime.Unix(), end_unix) @@ -778,14 +602,19 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS AiModelManageMap := queryUserModel(start_unix, end_unix) CollectDataset, CollectedDataset := queryDatasetStars(start_unix, end_unix) - RecommendDataset := queryRecommedDataSet(start_unix, end_unix) + RecommendDataset, CreatedDataset := queryRecommedDataSet(start_unix, end_unix) CollectImage, CollectedImage := queryImageStars(start_unix, end_unix) RecommendImage := queryRecommedImage(start_unix, end_unix) InvitationMap := queryUserInvitationCount(start_unix, end_unix) DataDate := currentTimeNow.Format("2006-01-02") + " 00:01" - + bonusMap := make(map[string]map[string]int) + if tableName == "user_business_analysis_current_year" { + bonusMap = getBonusMap() + log.Info("truncate all data from table:user_summary_current_year ") + statictisSess.Exec("TRUNCATE TABLE user_summary_current_year") + } cond := "type != 1 and is_active=true" count, err := sess.Where(cond).Count(new(User)) if err != nil { @@ -883,6 +712,37 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS userMetrics["TotalHasActivityUser"] = getMapKeyStringValue("TotalHasActivityUser", userMetrics) + 1 } } + if tableName == "user_business_analysis_current_year" { + //年度数据 + subTime := time.Now().UTC().Sub(dateRecordAll.RegistDate.AsTime().UTC()) + mostActiveDay := "" + if userInfo, ok := mostActiveMap[dateRecordAll.ID]; ok { + mostActiveDay = getMostActiveJson(userInfo) + } + scoreMap := make(map[string]float64) + repoInfo := getRepoDetailInfo(DetailInfoMap, dateRecordAll.ID, MostDownloadMap) + dataSetInfo, datasetscore := getDataSetInfo(dateRecordAll.ID, CreatedDataset, dataSetDownloadMap, CommitDatasetNumMap, CollectedDataset) + scoreMap["datasetscore"] = datasetscore + codeInfo, codescore := getCodeInfo(dateRecordAll) + scoreMap["codescore"] = codescore + cloudBrainInfo := getCloudBrainInfo(dateRecordAll, CloudBrainTaskItemMap, scoreMap) + playARoll := getPlayARoll(bonusMap, dateRecordAll.Name, scoreMap) + re := &UserSummaryCurrentYear{ + ID: dateRecordAll.ID, + Name: dateRecordAll.Name, + Email: dateRecordAll.Email, + Phone: dateRecordAll.Phone, + RegistDate: dateRecordAll.RegistDate, + DateCount: int(subTime.Hours()) / 24, + MostActiveDay: mostActiveDay, + RepoInfo: repoInfo, + DataSetInfo: dataSetInfo, + CodeInfo: codeInfo, + CloudBrainInfo: cloudBrainInfo, + PlayARoll: playARoll, + } + statictisSess.Insert(re) + } } if len(dateRecordBatch) > 0 { err := insertTable(dateRecordBatch, tableName, statictisSess) @@ -890,6 +750,7 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS if err != nil { log.Info("insert all data failed." + err.Error()) } + } indexTotal += PAGE_SIZE if indexTotal >= count { @@ -911,6 +772,204 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS log.Info("refresh data finished.tableName=" + tableName + " total record:" + fmt.Sprint(insertCount)) } +func getBonusMap() map[string]map[string]int { + bonusMap := make(map[string]map[string]int) + url := setting.RecommentRepoAddr + "bonus/record.txt" + content, err := GetContentFromPromote(url) + if err == nil { + filenames := strings.Split(content, "\n") + for i := 0; i < len(filenames); i++ { + url = setting.RecommentRepoAddr + "bonus/" + filenames[i] + csvContent, err1 := GetContentFromPromote(url) + if err1 == nil { + //read csv + lines := strings.Split(csvContent, "\n") + for j := 1; j < len(lines); j++ { + aLine := strings.Split(lines[j], ",") + if len(aLine) < 7 { + continue + } + userName := aLine[1] + //email := lines[2] + record, ok := bonusMap[userName] + if !ok { + record = make(map[string]int) + } + record["times"] = getMapKeyStringValue("times", record) + getIntValue(aLine[3]) + record["total_bonus"] = getMapKeyStringValue("total_bonus", record) + getIntValue(aLine[4]) + record["total_cardtime"] = getMapKeyStringValue("total_cardtime", record) + getIntValue(aLine[5]) + record["total_giveup"] = getMapKeyStringValue("total_giveup", record) + getIntValue(aLine[6]) + } + } + } + } + return bonusMap +} + +func getIntValue(val string) int { + i, err := strconv.Atoi(val) + if err == nil { + return i + } + return 0 +} + +func getPlayARoll(bonusMap map[string]map[string]int, userName string, scoreMap map[string]float64) string { + bonusInfo := make(map[string]string) + record, ok := bonusMap[userName] + if ok { + rollscore := 0.0 + bonusInfo["times"] = fmt.Sprint(record["times"]) + if record["times"] >= 4 { + rollscore = float64(record["times"]) / float64(4) + } + scoreMap["rollscore"] = rollscore + bonusInfo["total_bonus"] = fmt.Sprint(record["total_bonus"]) + bonusInfo["total_cardtime"] = fmt.Sprint(record["total_cardtime"]) + bonusInfo["total_giveup"] = fmt.Sprint(record["total_giveup"]) + bonusInfoJson, _ := json.Marshal(bonusInfo) + return string(bonusInfoJson) + } else { + return "" + } +} + +func getCloudBrainInfo(dateRecordAll UserBusinessAnalysisAll, CloudBrainTaskItemMap map[string]int, scoreMap map[string]float64) string { + trainscore := 0.0 + debugscore := 0.0 + runtime := 0.0 + if dateRecordAll.CloudBrainTaskNum > 0 { + cloudBrainInfo := make(map[string]string) + cloudBrainInfo["create_task_num"] = fmt.Sprint(dateRecordAll.CloudBrainTaskNum) + cloudBrainInfo["debug_task_num"] = fmt.Sprint(dateRecordAll.GpuDebugJob + dateRecordAll.NpuDebugJob) + if dateRecordAll.GpuDebugJob+dateRecordAll.NpuDebugJob >= 50 { + debugscore = float64(dateRecordAll.GpuDebugJob+dateRecordAll.NpuDebugJob) / float64(50) + } + cloudBrainInfo["train_task_num"] = fmt.Sprint(dateRecordAll.GpuTrainJob + dateRecordAll.NpuTrainJob) + if dateRecordAll.GpuTrainJob+dateRecordAll.NpuTrainJob >= 50 { + trainscore = float64(dateRecordAll.GpuTrainJob+dateRecordAll.NpuTrainJob) / float64(50) + } + cloudBrainInfo["inference_task_num"] = fmt.Sprint(dateRecordAll.NpuInferenceJob + CloudBrainTaskItemMap[fmt.Sprint(dateRecordAll.ID)+"_GpuInferenceJob"]) + cloudBrainInfo["card_runtime"] = fmt.Sprint(dateRecordAll.CloudBrainRunTime) + if dateRecordAll.CloudBrainRunTime >= 100 { + runtime = float64(dateRecordAll.CloudBrainRunTime) / float64(100) + } + cloudBrainInfo["card_runtime_money"] = fmt.Sprint(dateRecordAll.CloudBrainRunTime * 5) + cloudBrainInfo["CloudBrainOne"] = fmt.Sprint(CloudBrainTaskItemMap[fmt.Sprint(dateRecordAll.ID)+"_CloudBrainOne"]) + cloudBrainInfo["CloudBrainTwo"] = fmt.Sprint(CloudBrainTaskItemMap[fmt.Sprint(dateRecordAll.ID)+"_CloudBrainTwo"]) + cloudBrainInfo["C2Net"] = fmt.Sprint(CloudBrainTaskItemMap[fmt.Sprint(dateRecordAll.ID)+"_C2Net"]) + + cloudBrainInfoJson, _ := json.Marshal(cloudBrainInfo) + scoreMap["trainscore"] = trainscore + scoreMap["debugscore"] = debugscore + scoreMap["runtime"] = runtime + return string(cloudBrainInfoJson) + } else { + scoreMap["trainscore"] = trainscore + scoreMap["debugscore"] = debugscore + scoreMap["runtime"] = runtime + return "" + } +} + +func getCodeInfo(dateRecordAll UserBusinessAnalysisAll) (string, float64) { + if dateRecordAll.CommitCount > 0 { + codeInfo := make(map[string]string) + codeInfo["commit_count"] = fmt.Sprint(dateRecordAll.CommitCount) + codeInfo["commit_line"] = fmt.Sprint(dateRecordAll.CommitCodeSize) + score := 0.0 + score = float64(dateRecordAll.CommitCodeSize) / float64(dateRecordAll.CommitCount) / float64(20000) + if score < (float64(dateRecordAll.CommitCount) / float64(100)) { + score = float64(dateRecordAll.CommitCount) / float64(100) + } + codeInfo["score"] = fmt.Sprintf("%.2f", score) + + codeInfoJson, _ := json.Marshal(codeInfo) + return string(codeInfoJson), score + } else { + return "", 0 + } +} + +func getDataSetInfo(userId int64, CreatedDataset map[int64]int, dataSetDownloadMap map[int64]int, CommitDatasetNumMap map[int64]int, CollectedDataset map[int64]int) (string, float64) { + datasetInfo := make(map[string]string) + score := 0.0 + if create_count, ok := CreatedDataset[userId]; ok { + datasetInfo["create_count"] = fmt.Sprint(create_count) + score = float64(create_count) / 10 + } + if upload_count, ok := CommitDatasetNumMap[userId]; ok { + datasetInfo["upload_file_count"] = fmt.Sprint(upload_count) + } + if download_count, ok := dataSetDownloadMap[userId]; ok { + datasetInfo["download_count"] = fmt.Sprint(download_count) + } + if cllected_count, ok := CollectedDataset[userId]; ok { + datasetInfo["cllected_count"] = fmt.Sprint(cllected_count) + } + + if len(datasetInfo) > 0 { + datasetInfoJson, _ := json.Marshal(datasetInfo) + return string(datasetInfoJson), score + } else { + return "", score + } +} + +func getRepoDetailInfo(repoDetailInfoMap map[string]int, userId int64, mostDownload map[int64]string) string { + repoDetailInfo := make(map[string]string) + if total, ok := repoDetailInfoMap[fmt.Sprint(userId)+"_total"]; ok { + repoDetailInfo["repo_total"] = fmt.Sprint(total) + } + if private, ok := repoDetailInfoMap[fmt.Sprint(userId)+"_is_private"]; ok { + repoDetailInfo["repo_is_private"] = fmt.Sprint(private) + } + if public, ok := repoDetailInfoMap[fmt.Sprint(userId)+"_is_public"]; ok { + repoDetailInfo["repo_is_public"] = fmt.Sprint(public) + } + if download, ok := repoDetailInfoMap[fmt.Sprint(userId)+"_total_download"]; ok { + repoDetailInfo["repo_total_download"] = fmt.Sprint(download) + } + if mostdownload, ok := repoDetailInfoMap[fmt.Sprint(userId)+"_most_download"]; ok { + repoDetailInfo["repo_most_download_count"] = fmt.Sprint(mostdownload) + } + if mostdownloadName, ok := mostDownload[userId]; ok { + repoDetailInfo["repo_most_download_name"] = mostdownloadName + } + if len(repoDetailInfo) > 0 { + repoDetailInfoJson, _ := json.Marshal(repoDetailInfo) + return string(repoDetailInfoJson) + } else { + return "" + } +} + +func getMostActiveJson(userInfo map[string]int) string { + mostActiveMap := make(map[string]string) + if day, ok := userInfo["hour_day"]; ok { + hour := userInfo["hour_hour"] + month := userInfo["hour_month"] + year := userInfo["hour_year"] + delete(userInfo, "hour_day") + delete(userInfo, "hour_hour") + delete(userInfo, "hour_month") + delete(userInfo, "hour_year") + mostActiveMap["before_dawn"] = fmt.Sprint(year) + "/" + fmt.Sprint(month) + "/" + fmt.Sprint(day) + " " + fmt.Sprint(hour) + } + max := 0 + max_day := "" + for key, value := range userInfo { + if value > max { + max = value + max_day = key + } + } + mostActiveMap["most_active_day"] = max_day + mostActiveMap["most_active_num"] = fmt.Sprint(max) + mostActiveMapJson, _ := json.Marshal(mostActiveMap) + return string(mostActiveMapJson) +} + func updateUserIndex(tableName string, statictisSess *xorm.Session, userId int64, userIndex float64) { updateSql := "UPDATE public." + tableName + " set user_index=" + fmt.Sprint(userIndex*100) + " where id=" + fmt.Sprint(userId) statictisSess.Exec(updateSql) @@ -997,7 +1056,7 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, DataDate := CountDate.Format("2006-01-02") CodeMergeCountMap := queryPullRequest(start_unix, end_unix) - CommitCountMap := queryCommitAction(start_unix, end_unix, 5) + CommitCountMap, _ := queryCommitAction(start_unix, end_unix, 5) IssueCountMap := queryCreateIssue(start_unix, end_unix) CommentCountMap := queryComment(start_unix, end_unix) @@ -1010,19 +1069,19 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, log.Info("query commit code errr.") } else { //log.Info("query commit code size, len=" + fmt.Sprint(len(CommitCodeSizeMap))) - CommitCodeSizeMapJson, _ := json.Marshal(CommitCodeSizeMap) - log.Info("CommitCodeSizeMapJson=" + string(CommitCodeSizeMapJson)) + //CommitCodeSizeMapJson, _ := json.Marshal(CommitCodeSizeMap) + //log.Info("CommitCodeSizeMapJson=" + string(CommitCodeSizeMapJson)) } - CommitDatasetSizeMap, CommitDatasetNumMap := queryDatasetSize(start_unix, end_unix) + CommitDatasetSizeMap, CommitDatasetNumMap, _ := queryDatasetSize(start_unix, end_unix) SolveIssueCountMap := querySolveIssue(start_unix, end_unix) - CreateRepoCountMap := queryUserCreateRepo(start_unix, end_unix) + CreateRepoCountMap, _, _ := queryUserCreateRepo(start_unix, end_unix) LoginCountMap := queryLoginCount(start_unix, end_unix) OpenIIndexMap := queryUserRepoOpenIIndex(start_unix, end_unix) CloudBrainTaskMap, CloudBrainTaskItemMap := queryCloudBrainTask(start_unix, end_unix) AiModelManageMap := queryUserModel(start_unix, end_unix) CollectDataset, CollectedDataset := queryDatasetStars(start_unix, end_unix) - RecommendDataset := queryRecommedDataSet(start_unix, end_unix) + RecommendDataset, _ := queryRecommedDataSet(start_unix, end_unix) CollectImage, CollectedImage := queryImageStars(start_unix, end_unix) RecommendImage := queryRecommedImage(start_unix, end_unix) @@ -1490,41 +1549,65 @@ func queryPullRequest(start_unix int64, end_unix int64) map[int64]int { return resultMap } -func queryCommitAction(start_unix int64, end_unix int64, actionType int64) map[int64]int { +func queryCommitAction(start_unix int64, end_unix int64, actionType int64) (map[int64]int, map[int64]map[string]int) { sess := x.NewSession() defer sess.Close() resultMap := make(map[int64]int) - - cond := "user_id=act_user_id and op_type=" + fmt.Sprint(actionType) + " and created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) + mostActiveMap := make(map[int64]map[string]int) + cond := "user_id=act_user_id and created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) count, err := sess.Where(cond).Count(new(Action)) if err != nil { log.Info("query action error. return.") - return resultMap + return resultMap, mostActiveMap } + var indexTotal int64 indexTotal = 0 for { - sess.Select("id,user_id,op_type,act_user_id").Table("action").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) + sess.Select("id,user_id,op_type,act_user_id,created_unix").Table("action").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) actionList := make([]*Action, 0) sess.Find(&actionList) log.Info("query action size=" + fmt.Sprint(len(actionList))) for _, actionRecord := range actionList { - if _, ok := resultMap[actionRecord.UserID]; !ok { - resultMap[actionRecord.UserID] = 1 + if int64(actionRecord.OpType) == actionType { + if _, ok := resultMap[actionRecord.UserID]; !ok { + resultMap[actionRecord.UserID] = 1 + } else { + resultMap[actionRecord.UserID] += 1 + } + } + key := getDate(actionRecord.CreatedUnix) + if _, ok := mostActiveMap[actionRecord.UserID]; !ok { + tmpMap := make(map[string]int) + tmpMap[key] = 1 + mostActiveMap[actionRecord.UserID] = tmpMap } else { - resultMap[actionRecord.UserID] += 1 + mostActiveMap[actionRecord.UserID][key] = getMapKeyStringValue(key, mostActiveMap[actionRecord.UserID]) + 1 + } + utcTime := actionRecord.CreatedUnix.AsTime() + hour := utcTime.Hour() + if hour >= 0 && hour <= 5 { + key = "hour_hour" + if getMapKeyStringValue(key, mostActiveMap[actionRecord.UserID]) < hour { + mostActiveMap[actionRecord.UserID][key] = hour + mostActiveMap[actionRecord.UserID]["hour_day"] = utcTime.Day() + mostActiveMap[actionRecord.UserID]["hour_month"] = int(utcTime.Month()) + mostActiveMap[actionRecord.UserID]["hour_year"] = utcTime.Year() + } } } - indexTotal += PAGE_SIZE if indexTotal >= count { break } } - return resultMap + return resultMap, mostActiveMap +} +func getDate(createTime timeutil.TimeStamp) string { + return createTime.Format("2006-01-02") } func queryCreateIssue(start_unix int64, end_unix int64) map[int64]int { @@ -1714,15 +1797,16 @@ func queryFollow(start_unix int64, end_unix int64) (map[int64]int, map[int64]int return resultMap, resultFocusedByOtherMap } -func queryRecommedDataSet(start_unix int64, end_unix int64) map[int64]int { +func queryRecommedDataSet(start_unix int64, end_unix int64) (map[int64]int, map[int64]int) { sess := x.NewSession() defer sess.Close() - userIdDdatasetMap := make(map[int64]int) - cond := " created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) + " and recommend=true" + userIdRecommentDatasetMap := make(map[int64]int) + userIdCreateDatasetMap := make(map[int64]int) + cond := " created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) count, err := sess.Where(cond).Count(new(Dataset)) if err != nil { log.Info("query recommend dataset error. return.") - return userIdDdatasetMap + return userIdRecommentDatasetMap, userIdCreateDatasetMap } var indexTotal int64 indexTotal = 0 @@ -1732,18 +1816,21 @@ func queryRecommedDataSet(start_unix int64, end_unix int64) map[int64]int { sess.Find(&datasetList) log.Info("query datasetList size=" + fmt.Sprint(len(datasetList))) for _, datasetRecord := range datasetList { - if _, ok := userIdDdatasetMap[datasetRecord.UserID]; !ok { - userIdDdatasetMap[datasetRecord.UserID] = 1 - } else { - userIdDdatasetMap[datasetRecord.UserID] += 1 + if datasetRecord.Recommend { + if _, ok := userIdRecommentDatasetMap[datasetRecord.UserID]; !ok { + userIdRecommentDatasetMap[datasetRecord.UserID] = 1 + } else { + userIdRecommentDatasetMap[datasetRecord.UserID] += 1 + } } + userIdCreateDatasetMap[datasetRecord.UserID] = getMapValue(datasetRecord.UserID, userIdCreateDatasetMap) + 1 } indexTotal += PAGE_SIZE if indexTotal >= count { break } } - return userIdDdatasetMap + return userIdRecommentDatasetMap, userIdCreateDatasetMap } func queryAllDataSet() (map[int64]int64, map[int64]int64) { @@ -1922,22 +2009,23 @@ func queryImageStars(start_unix int64, end_unix int64) (map[int64]int, map[int64 return imageCollect, imageCollected } -func queryDatasetSize(start_unix int64, end_unix int64) (map[int64]int, map[int64]int) { +func queryDatasetSize(start_unix int64, end_unix int64) (map[int64]int, map[int64]int, map[int64]int) { sess := x.NewSession() defer sess.Close() resultSizeMap := make(map[int64]int) resultNumMap := make(map[int64]int) + resultDownloadMap := make(map[int64]int) cond := " created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) count, err := sess.Where(cond).Count(new(Attachment)) if err != nil { log.Info("query attachment error. return.") - return resultSizeMap, resultNumMap + return resultSizeMap, resultNumMap, resultDownloadMap } var indexTotal int64 indexTotal = 0 for { - sess.Select("id,uploader_id,size").Table("attachment").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) + sess.Select("id,uploader_id,size,download_count").Table("attachment").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) attachmentList := make([]*Attachment, 0) sess.Find(&attachmentList) @@ -1946,9 +2034,11 @@ func queryDatasetSize(start_unix int64, end_unix int64) (map[int64]int, map[int6 if _, ok := resultSizeMap[attachRecord.UploaderID]; !ok { resultSizeMap[attachRecord.UploaderID] = int(attachRecord.Size / (1024 * 1024)) //MB resultNumMap[attachRecord.UploaderID] = 1 + resultDownloadMap[attachRecord.UploaderID] = int(attachRecord.DownloadCount) } else { resultSizeMap[attachRecord.UploaderID] += int(attachRecord.Size / (1024 * 1024)) //MB resultNumMap[attachRecord.UploaderID] += 1 + resultDownloadMap[attachRecord.UploaderID] += int(attachRecord.DownloadCount) } } @@ -1958,32 +2048,50 @@ func queryDatasetSize(start_unix int64, end_unix int64) (map[int64]int, map[int6 } } - return resultSizeMap, resultNumMap + return resultSizeMap, resultNumMap, resultDownloadMap } -func queryUserCreateRepo(start_unix int64, end_unix int64) map[int64]int { +func queryUserCreateRepo(start_unix int64, end_unix int64) (map[int64]int, map[string]int, map[int64]string) { sess := x.NewSession() defer sess.Close() resultMap := make(map[int64]int) + detailInfoMap := make(map[string]int) + mostDownloadMap := make(map[int64]string) + cond := "is_fork=false and created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) count, err := sess.Where(cond).Count(new(Repository)) if err != nil { log.Info("query Repository error. return.") - return resultMap + return resultMap, detailInfoMap, mostDownloadMap } var indexTotal int64 indexTotal = 0 for { - sess.Select("id,owner_id,name").Table("repository").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) + sess.Select("id,owner_id,name,is_private,clone_cnt").Table("repository").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) repoList := make([]*Repository, 0) sess.Find(&repoList) log.Info("query Repository size=" + fmt.Sprint(len(repoList))) for _, repoRecord := range repoList { - if _, ok := resultMap[repoRecord.OwnerID]; !ok { - resultMap[repoRecord.OwnerID] = 1 + resultMap[repoRecord.OwnerID] = getMapValue(repoRecord.OwnerID, resultMap) + 1 + + key := fmt.Sprint(repoRecord.OwnerID) + "_total" + detailInfoMap[key] = getMapKeyStringValue(key, detailInfoMap) + 1 + + if repoRecord.IsPrivate { + key := fmt.Sprint(repoRecord.OwnerID) + "_is_private" + detailInfoMap[key] = getMapKeyStringValue(key, detailInfoMap) + 1 } else { - resultMap[repoRecord.OwnerID] += 1 + key := fmt.Sprint(repoRecord.OwnerID) + "_is_public" + detailInfoMap[key] = getMapKeyStringValue(key, detailInfoMap) + 1 + } + key = fmt.Sprint(repoRecord.OwnerID) + "_total_download" + detailInfoMap[key] = getMapKeyStringValue(key, detailInfoMap) + int(repoRecord.CloneCnt) + + key = fmt.Sprint(repoRecord.OwnerID) + "_most_download" + if int(repoRecord.CloneCnt) > getMapKeyStringValue(key, detailInfoMap) { + detailInfoMap[key] = int(repoRecord.CloneCnt) + mostDownloadMap[repoRecord.OwnerID] = repoRecord.DisplayName() } } indexTotal += PAGE_SIZE @@ -1992,7 +2100,7 @@ func queryUserCreateRepo(start_unix int64, end_unix int64) map[int64]int { } } - return resultMap + return resultMap, detailInfoMap, mostDownloadMap } func queryUserRepoOpenIIndex(start_unix int64, end_unix int64) map[int64]float64 { @@ -2180,6 +2288,7 @@ func queryCloudBrainTask(start_unix int64, end_unix int64) (map[int64]int, map[s setMapKey("CloudBrainRunTime", cloudTaskRecord.UserID, int(cloudTaskRecord.Duration), resultItemMap) } if cloudTaskRecord.Type == 1 { //npu + setMapKey("CloudBrainTwo", cloudTaskRecord.UserID, 1, resultItemMap) if cloudTaskRecord.JobType == "TRAIN" { setMapKey("NpuTrainJob", cloudTaskRecord.UserID, 1, resultItemMap) } else if cloudTaskRecord.JobType == "INFERENCE" { @@ -2187,14 +2296,32 @@ func queryCloudBrainTask(start_unix int64, end_unix int64) (map[int64]int, map[s } else { setMapKey("NpuDebugJob", cloudTaskRecord.UserID, 1, resultItemMap) } - } else { //type=0 gpu + } else if cloudTaskRecord.Type == 0 { //type=0 gpu + setMapKey("CloudBrainOne", cloudTaskRecord.UserID, 1, resultItemMap) if cloudTaskRecord.JobType == "TRAIN" { setMapKey("GpuTrainJob", cloudTaskRecord.UserID, 1, resultItemMap) + } else if cloudTaskRecord.JobType == "INFERENCE" { + setMapKey("GpuInferenceJob", cloudTaskRecord.UserID, 1, resultItemMap) } else if cloudTaskRecord.JobType == "BENCHMARK" { setMapKey("GpuBenchMarkJob", cloudTaskRecord.UserID, 1, resultItemMap) } else { setMapKey("GpuDebugJob", cloudTaskRecord.UserID, 1, resultItemMap) } + } else if cloudTaskRecord.Type == 2 { + setMapKey("C2Net", cloudTaskRecord.UserID, 1, resultItemMap) + if cloudTaskRecord.ComputeResource == NPUResource { + if cloudTaskRecord.JobType == "TRAIN" { + setMapKey("NpuTrainJob", cloudTaskRecord.UserID, 1, resultItemMap) + } else { + setMapKey("NpuDebugJob", cloudTaskRecord.UserID, 1, resultItemMap) + } + } else if cloudTaskRecord.ComputeResource == GPUResource { + if cloudTaskRecord.JobType == "TRAIN" { + setMapKey("GpuTrainJob", cloudTaskRecord.UserID, 1, resultItemMap) + } else { + setMapKey("GpuDebugJob", cloudTaskRecord.UserID, 1, resultItemMap) + } + } } } indexTotal += PAGE_SIZE @@ -2274,3 +2401,26 @@ func subMonth(t1, t2 time.Time) (month int) { } return month } + +func GetContentFromPromote(url string) (string, error) { + defer func() { + if err := recover(); err != nil { + log.Info("not error.", err) + return + } + }() + resp, err := http.Get(url) + if err != nil || resp.StatusCode != 200 { + log.Info("Get organizations url error=" + err.Error()) + return "", err + } + + bytes, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + log.Info("Get organizations url error=" + err.Error()) + return "", err + } + allLineStr := string(bytes) + return allLineStr, nil +} diff --git a/models/user_business_struct.go b/models/user_business_struct.go index fe98be760..9dcc12342 100644 --- a/models/user_business_struct.go +++ b/models/user_business_struct.go @@ -2,6 +2,27 @@ package models import "code.gitea.io/gitea/modules/timeutil" +type UserSummaryCurrentYear struct { + ID int64 `xorm:"pk"` + Email string `xorm:"NOT NULL"` + //user + Name string `xorm:"NOT NULL"` + Phone string `xorm:"NULL"` + //user + RegistDate timeutil.TimeStamp `xorm:"NOT NULL"` + + DateCount int `xorm:"NOT NULL DEFAULT 0"` + MostActiveDay string `xorm:" NULL "` //08.05 + RepoInfo string `xorm:"varchar(1000)"` //创建了XX 个项目,公开项目XX 个,私有项目XX 个累计被下载XXX 次,其中《XXXXXXX 》项目,获得了最高XXX 次下载 + DataSetInfo string `xorm:"varchar(500)"` //创建了XX 个数据集,上传了XX 个数据集文件,累计被下载XX 次,被收藏XX 次 + CodeInfo string `xorm:"varchar(500)"` //代码提交次数,提交总代码行数,最晚的提交时间 + CloudBrainInfo string `xorm:"varchar(1000)"` //,创建了XX 个云脑任务,调试任务XX 个,训练任务XX 个,推理任务XX 个,累计运行了XXXX 卡时,累计节省xxxxx 元 + //这些免费的算力资源分别有,XX% 来自鹏城云脑1,XX% 来自鹏城云脑2,XX% 来自智算网络 + PlayARoll string `xorm:"varchar(500)"` //你参加了XX 次“我为开源打榜狂”活动,累计上榜XX 次,总共获得了社区XXX 元的激励 + + Label string `xorm:"varchar(500)"` +} + type UserBusinessAnalysisCurrentYear struct { ID int64 `xorm:"pk"` CountDate int64 `xorm:"pk"` @@ -505,3 +526,182 @@ type UserMetrics struct { ActivityUserJson string `xorm:"text NULL"` //激活用户列表 CurrentDayRegistUser int `xorm:"NOT NULL DEFAULT 0"` //当天注册用户 } + +type UserBusinessAnalysisAll struct { + ID int64 `xorm:"pk"` + + CountDate int64 `xorm:"pk"` + + //action :ActionMergePullRequest // 11 + CodeMergeCount int `xorm:"NOT NULL DEFAULT 0"` + + //action :ActionCommitRepo // 5 + CommitCount int `xorm:"NOT NULL DEFAULT 0"` + + //action :ActionCreateIssue // 10 + IssueCount int `xorm:"NOT NULL DEFAULT 0"` + + //comment table current date + CommentCount int `xorm:"NOT NULL DEFAULT 0"` + + //watch table current date + FocusRepoCount int `xorm:"NOT NULL DEFAULT 0"` + + //star table current date + StarRepoCount int `xorm:"NOT NULL DEFAULT 0"` + + //follow table + WatchedCount int `xorm:"NOT NULL DEFAULT 0"` + + // user table + GiteaAgeMonth int `xorm:"NOT NULL DEFAULT 0"` + + // + CommitCodeSize int `xorm:"NOT NULL DEFAULT 0"` + + //attachement table + CommitDatasetSize int `xorm:"NOT NULL DEFAULT 0"` + + //0 + CommitModelCount int `xorm:"NOT NULL DEFAULT 0"` + + //issue, issueassignees + SolveIssueCount int `xorm:"NOT NULL DEFAULT 0"` + + //baike + EncyclopediasCount int `xorm:"NOT NULL DEFAULT 0"` + + //user + RegistDate timeutil.TimeStamp `xorm:"NOT NULL"` + + //repo + CreateRepoCount int `xorm:"NOT NULL DEFAULT 0"` + + //login count, from elk + LoginCount int `xorm:"NOT NULL DEFAULT 0"` + + //openi index + OpenIIndex float64 `xorm:"NOT NULL DEFAULT 0"` + + //user + Email string `xorm:"NOT NULL"` + + //user + Name string `xorm:"NOT NULL"` + + DataDate string `xorm:"NULL"` + + //cloudbraintask + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserIndexPrimitive float64 `xorm:"NOT NULL DEFAULT 0"` + + UserLocation string `xorm:"NULL"` + + FocusOtherUser int `xorm:"NOT NULL DEFAULT 0"` + CollectDataset int `xorm:"NOT NULL DEFAULT 0"` + CollectedDataset int `xorm:"NOT NULL DEFAULT 0"` + RecommendDataset int `xorm:"NOT NULL DEFAULT 0"` + CollectImage int `xorm:"NOT NULL DEFAULT 0"` + CollectedImage int `xorm:"NOT NULL DEFAULT 0"` + RecommendImage int `xorm:"NOT NULL DEFAULT 0"` + + Phone string `xorm:"NULL"` + InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"` +} + +type UserBusinessAnalysis struct { + ID int64 `xorm:"pk"` + DataDate string `xorm:"pk"` + CountDate int64 `xorm:"NULL"` + + //action :ActionMergePullRequest // 11 + CodeMergeCount int `xorm:"NOT NULL DEFAULT 0"` + + //action :ActionCommitRepo // 5 + CommitCount int `xorm:"NOT NULL DEFAULT 0"` + + //action :ActionCreateIssue // 6 + IssueCount int `xorm:"NOT NULL DEFAULT 0"` + + //comment table current date + CommentCount int `xorm:"NOT NULL DEFAULT 0"` + + //watch table current date + FocusRepoCount int `xorm:"NOT NULL DEFAULT 0"` + + //star table current date + StarRepoCount int `xorm:"NOT NULL DEFAULT 0"` + + //follow table + WatchedCount int `xorm:"NOT NULL DEFAULT 0"` + + // user table + GiteaAgeMonth int `xorm:"NOT NULL DEFAULT 0"` + + // + CommitCodeSize int `xorm:"NOT NULL DEFAULT 0"` + + //attachement table + CommitDatasetSize int `xorm:"NOT NULL DEFAULT 0"` + + //0 + CommitModelCount int `xorm:"NOT NULL DEFAULT 0"` + + //issue, issueassignees + SolveIssueCount int `xorm:"NOT NULL DEFAULT 0"` + + //baike + EncyclopediasCount int `xorm:"NOT NULL DEFAULT 0"` + + //user + RegistDate timeutil.TimeStamp `xorm:"NOT NULL"` + + //repo + CreateRepoCount int `xorm:"NOT NULL DEFAULT 0"` + + //login count, from elk + LoginCount int `xorm:"NOT NULL DEFAULT 0"` + + //openi index + OpenIIndex float64 `xorm:"NOT NULL DEFAULT 0"` + + //user + Email string `xorm:"NOT NULL"` + + //user + Name string `xorm:"NOT NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserIndexPrimitive float64 `xorm:"NOT NULL DEFAULT 0"` + + UserLocation string `xorm:"NULL"` + + FocusOtherUser int `xorm:"NOT NULL DEFAULT 0"` + CollectDataset int `xorm:"NOT NULL DEFAULT 0"` + CollectedDataset int `xorm:"NOT NULL DEFAULT 0"` + RecommendDataset int `xorm:"NOT NULL DEFAULT 0"` + CollectImage int `xorm:"NOT NULL DEFAULT 0"` + CollectedImage int `xorm:"NOT NULL DEFAULT 0"` + RecommendImage int `xorm:"NOT NULL DEFAULT 0"` + + Phone string `xorm:"NULL"` + InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"` +} diff --git a/modules/grampus/resty.go b/modules/grampus/resty.go index f36721c85..a9e1aed5c 100755 --- a/modules/grampus/resty.go +++ b/modules/grampus/resty.go @@ -245,6 +245,32 @@ func GetTrainJobLog(jobID string) (string, error) { return logContent, nil } +func GetGrampusMetrics(jobID string) (models.GetTrainJobMetricStatisticResult, error) { + checkSetting() + client := getRestyClient() + var result models.GetTrainJobMetricStatisticResult + res, err := client.R(). + SetAuthToken(TOKEN). + Get(HOST + urlTrainJob + "/" + jobID + "/task/0/replica/0/metrics") + + if err != nil { + return result, fmt.Errorf("resty GetTrainJobLog: %v", err) + } + if err = json.Unmarshal([]byte(res.String()), &result); err != nil { + log.Error("GetGrampusMetrics json.Unmarshal failed(%s): %v", res.String(), err.Error()) + return result, fmt.Errorf("json.Unmarshal failed(%s): %v", res.String(), err.Error()) + } + if res.StatusCode() != http.StatusOK { + log.Error("Call GrampusMetrics failed(%d):%s(%s)", res.StatusCode(), result.ErrorCode, result.ErrorMsg) + return result, fmt.Errorf("Call GrampusMetrics failed(%d):%d(%s)", res.StatusCode(), result.ErrorCode, result.ErrorMsg) + } + if !result.IsSuccess { + log.Error("GetGrampusMetrics(%s) failed", jobID) + return result, fmt.Errorf("GetGrampusMetrics failed:%s", result.ErrorMsg) + } + return result, nil +} + func StopJob(jobID string) (*models.GrampusStopJobResponse, error) { checkSetting() client := getRestyClient() diff --git a/modules/setting/setting.go b/modules/setting/setting.go index 90b86eb4a..374e6a99d 100755 --- a/modules/setting/setting.go +++ b/modules/setting/setting.go @@ -598,20 +598,23 @@ var ( //grampus config Grampus = struct { - Env string - Host string - UserName string - Password string - SpecialPools string - C2NetSequence string - SyncScriptProject string - LocalCenterID string - AiCenterInfo string + Env string + Host string + UserName string + Password string + SpecialPools string + C2NetSequence string + SyncScriptProject string + LocalCenterID string + AiCenterInfo string + AiCenterCodeAndNameInfo string + UsageRateBeginTime string }{} - C2NetInfos *C2NetSqInfos - CenterInfos *AiCenterInfos - C2NetMapInfo map[string]*C2NetSequenceInfo + C2NetInfos *C2NetSqInfos + CenterInfos *AiCenterInfos + C2NetMapInfo map[string]*C2NetSequenceInfo + AiCenterCodeAndNameMapInfo map[string]*C2NetSequenceInfo //elk config ElkUrl string @@ -1466,7 +1469,7 @@ func NewContext() { MaxDuration = sec.Key("MAX_DURATION").MustInt64(14400) TrainGpuTypes = sec.Key("TRAIN_GPU_TYPES").MustString("") TrainResourceSpecs = sec.Key("TRAIN_RESOURCE_SPECS").MustString("") - MaxModelSize = sec.Key("MAX_MODEL_SIZE").MustFloat64(500) + MaxModelSize = sec.Key("MAX_MODEL_SIZE").MustFloat64(200) InferenceGpuTypes = sec.Key("INFERENCE_GPU_TYPES").MustString("") InferenceResourceSpecs = sec.Key("INFERENCE_RESOURCE_SPECS").MustString("") SpecialPools = sec.Key("SPECIAL_POOL").MustString("") @@ -1680,6 +1683,8 @@ func getGrampusConfig() { Grampus.Password = sec.Key("PASSWORD").MustString("") Grampus.SpecialPools = sec.Key("SPECIAL_POOL").MustString("") Grampus.C2NetSequence = sec.Key("C2NET_SEQUENCE").MustString("{\"sequence\":[{\"id\":1,\"name\":\"cloudbrain_one\",\"content\":\"鹏城云脑一号\",\"content_en\":\"Pencheng Cloudbrain Ⅰ\"},{\"id\":2,\"name\":\"cloudbrain_two\",\"content\":\"鹏城云脑二号\",\"content_en\":\"Pencheng Cloudbrain Ⅱ\"},{\"id\":3,\"name\":\"beida\",\"content\":\"北大人工智能集群系统\",\"content_en\":\"Peking University AI Center\"},{\"id\":4,\"name\":\"hefei\",\"content\":\"合肥类脑智能开放平台\",\"content_en\":\"Hefei AI Center\"},{\"id\":5,\"name\":\"wuhan\",\"content\":\"武汉人工智能计算中心\",\"content_en\":\"Wuhan AI Center\"},{\"id\":6,\"name\":\"xian\",\"content\":\"西安未来人工智能计算中心\",\"content_en\":\"Xi'an AI Center\"},{\"id\":7,\"pclcci\":\"more\",\"content\":\"鹏城云计算所\",\"content_en\":\"Pengcheng Cloud Computing Institute\"},{\"id\":8,\"name\":\"xuchang\",\"content\":\"中原人工智能计算中心\",\"content_en\":\"Zhongyuan AI Center\"},{\"id\":9,\"name\":\"chengdu\",\"content\":\"成都人工智能计算中心\",\"content_en\":\"Chengdu AI Center\"},{\"id\":10,\"name\":\"more\",\"content\":\"横琴先进智能计算中心\",\"content_en\":\"Hengqin AI Center\"},{\"id\":11,\"name\":\"more\",\"content\":\"国家超级计算济南中心\",\"content_en\":\"HPC & AI Center\"}]}") + Grampus.AiCenterCodeAndNameInfo = sec.Key("AI_CENTER_CODE_AND_NAME").MustString("{\"sequence\":[{\"id\":1,\"name\":\"cloudbrain_one\",\"content\":\"鹏城云脑一号\",\"content_en\":\"Pencheng Cloudbrain Ⅰ\"},{\"id\":2,\"name\":\"cloudbrain_two\",\"content\":\"鹏城云脑二号\",\"content_en\":\"Pencheng Cloudbrain Ⅱ\"},{\"id\":3,\"name\":\"beida\",\"content\":\"北大人工智能集群系统\",\"content_en\":\"Peking University AI Center\"},{\"id\":4,\"name\":\"hefei\",\"content\":\"合肥类脑智能开放平台\",\"content_en\":\"Hefei AI Center\"},{\"id\":5,\"name\":\"wuhan\",\"content\":\"武汉人工智能计算中心\",\"content_en\":\"Wuhan AI Center\"},{\"id\":6,\"name\":\"xian\",\"content\":\"西安未来人工智能计算中心\",\"content_en\":\"Xi'an AI Center\"},{\"id\":7,\"pclcci\":\"more\",\"content\":\"鹏城云计算所\",\"content_en\":\"Pengcheng Cloud Computing Institute\"},{\"id\":8,\"name\":\"xuchang\",\"content\":\"中原人工智能计算中心\",\"content_en\":\"Zhongyuan AI Center\"},{\"id\":9,\"name\":\"chengdu\",\"content\":\"成都人工智能计算中心\",\"content_en\":\"Chengdu AI Center\"},{\"id\":10,\"name\":\"more\",\"content\":\"横琴先进智能计算中心\",\"content_en\":\"Hengqin AI Center\"},{\"id\":11,\"name\":\"more\",\"content\":\"国家超级计算济南中心\",\"content_en\":\"HPC & AI Center\"}]}") + Grampus.UsageRateBeginTime = sec.Key("USAGE_RATE_BEGIN_TIME").MustString("2021-01-01 00:00:00") if Grampus.C2NetSequence != "" { if err := json.Unmarshal([]byte(Grampus.C2NetSequence), &C2NetInfos); err != nil { log.Error("Unmarshal(C2NetSequence) failed:%v", err) @@ -1689,6 +1694,15 @@ func getGrampusConfig() { C2NetMapInfo[value.Name] = value } } + if Grampus.AiCenterCodeAndNameInfo != "" { + if err := json.Unmarshal([]byte(Grampus.AiCenterCodeAndNameInfo), &C2NetInfos); err != nil { + log.Error("Unmarshal(AiCenterCodeAndNameInfo) failed:%v", err) + } + AiCenterCodeAndNameMapInfo = make(map[string]*C2NetSequenceInfo) + for _, value := range C2NetInfos.C2NetSqInfo { + AiCenterCodeAndNameMapInfo[value.Name] = value + } + } Grampus.SyncScriptProject = sec.Key("SYNC_SCRIPT_PROJECT").MustString("script_for_grampus") Grampus.LocalCenterID = sec.Key("LOCAL_CENTER_ID").MustString("cloudbrain2") Grampus.AiCenterInfo = sec.Key("AI_CENTER_INFO").MustString("") diff --git a/modules/storage/minio.go b/modules/storage/minio.go index 47f70e12d..a1a6e131a 100755 --- a/modules/storage/minio.go +++ b/modules/storage/minio.go @@ -144,8 +144,8 @@ func (m *MinioStorage) HasObject(path string) (bool, error) { // Indicate to our routine to exit cleanly upon return. defer close(doneCh) - - objectCh := m.client.ListObjects(m.bucket, m.buildMinioPath(path), false, doneCh) + //objectCh := m.client.ListObjects(m.bucket, m.buildMinioPath(path), false, doneCh) + objectCh := m.client.ListObjects(m.bucket, path, false, doneCh) for object := range objectCh { if object.Err != nil { return hasObject, object.Err diff --git a/modules/storage/minio_ext.go b/modules/storage/minio_ext.go index 4b738c068..d4a8abba5 100755 --- a/modules/storage/minio_ext.go +++ b/modules/storage/minio_ext.go @@ -3,7 +3,6 @@ package storage import ( "encoding/xml" "errors" - "path" "sort" "strconv" "strings" @@ -101,7 +100,7 @@ func getClients() (*minio_ext.Client, *miniov6.Core, error) { return client, core, nil } -func GenMultiPartSignedUrl(uuid string, uploadId string, partNumber int, partSize int64) (string, error) { +func GenMultiPartSignedUrl(objectName string, uploadId string, partNumber int, partSize int64) (string, error) { minioClient, _, err := getClients() if err != nil { log.Error("getClients failed:", err.Error()) @@ -110,7 +109,7 @@ func GenMultiPartSignedUrl(uuid string, uploadId string, partNumber int, partSiz minio := setting.Attachment.Minio bucketName := minio.Bucket - objectName := strings.TrimPrefix(path.Join(minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + //objectName := strings.TrimPrefix(path.Join(minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") return minioClient.GenUploadPartSignedUrl(uploadId, bucketName, objectName, partNumber, partSize, PresignedUploadPartUrlExpireTime, setting.Attachment.Minio.Location) } @@ -268,6 +267,23 @@ func MinioCopyFiles(bucketName string, srcPath string, destPath string, Files [] return fileTotalSize, nil } +func MinioCopyAFile(srcBucketName, srcObjectName, destBucketName, destObjectName string) (int64, error) { + _, core, err := getClients() + var fileTotalSize int64 + fileTotalSize = 0 + if err != nil { + log.Error("getClients failed:", err.Error()) + return fileTotalSize, err + } + meta, err := core.StatObject(srcBucketName, srcObjectName, miniov6.StatObjectOptions{}) + if err != nil { + log.Info("Get file error:" + err.Error()) + } + core.CopyObject(srcBucketName, srcObjectName, destBucketName, destObjectName, meta.UserMetadata) + fileTotalSize = meta.Size + return fileTotalSize, nil +} + func MinioPathCopy(bucketName string, srcPath string, destPath string) (int64, error) { _, core, err := getClients() var fileTotalSize int64 @@ -301,7 +317,7 @@ func MinioPathCopy(bucketName string, srcPath string, destPath string) (int64, e return fileTotalSize, nil } -func NewMultiPartUpload(uuid string) (string, error) { +func NewMultiPartUpload(objectName string) (string, error) { _, core, err := getClients() if err != nil { log.Error("getClients failed:", err.Error()) @@ -310,12 +326,12 @@ func NewMultiPartUpload(uuid string) (string, error) { minio := setting.Attachment.Minio bucketName := minio.Bucket - objectName := strings.TrimPrefix(path.Join(minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + //objectName := strings.TrimPrefix(path.Join(minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") return core.NewMultipartUpload(bucketName, objectName, miniov6.PutObjectOptions{}) } -func CompleteMultiPartUpload(uuid string, uploadID string, totalChunks int) (string, error) { +func CompleteMultiPartUpload(objectName string, uploadID string, totalChunks int) (string, error) { client, core, err := getClients() if err != nil { log.Error("getClients failed:", err.Error()) @@ -324,8 +340,8 @@ func CompleteMultiPartUpload(uuid string, uploadID string, totalChunks int) (str minio := setting.Attachment.Minio bucketName := minio.Bucket - objectName := strings.TrimPrefix(path.Join(minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") - + //objectName := strings.TrimPrefix(path.Join(minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + log.Info("bucketName=" + bucketName + " objectName=" + objectName + " uploadID=" + uploadID) partInfos, err := client.ListObjectParts(bucketName, objectName, uploadID) if err != nil { log.Error("ListObjectParts failed:", err.Error()) @@ -351,7 +367,7 @@ func CompleteMultiPartUpload(uuid string, uploadID string, totalChunks int) (str return core.CompleteMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload.Parts) } -func GetPartInfos(uuid string, uploadID string) (string, error) { +func GetPartInfos(objectName string, uploadID string) (string, error) { minioClient, _, err := getClients() if err != nil { log.Error("getClients failed:", err.Error()) @@ -360,7 +376,7 @@ func GetPartInfos(uuid string, uploadID string) (string, error) { minio := setting.Attachment.Minio bucketName := minio.Bucket - objectName := strings.TrimPrefix(path.Join(minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + //objectName := strings.TrimPrefix(path.Join(minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") partInfos, err := minioClient.ListObjectParts(bucketName, objectName, uploadID) if err != nil { diff --git a/modules/storage/obs.go b/modules/storage/obs.go index 57ef63029..83b03ed44 100755 --- a/modules/storage/obs.go +++ b/modules/storage/obs.go @@ -90,17 +90,16 @@ func listAllParts(uuid, uploadID, key string) (output *obs.ListPartsOutput, err } else { continue } - - break } return output, nil } -func GetObsPartInfos(uuid, uploadID, fileName string) (string, error) { - key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") +func GetObsPartInfos(objectName, uploadID string) (string, error) { + key := objectName + //strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") - allParts, err := listAllParts(uuid, uploadID, key) + allParts, err := listAllParts(objectName, uploadID, key) if err != nil { log.Error("listAllParts failed: %v", err) return "", err @@ -114,10 +113,11 @@ func GetObsPartInfos(uuid, uploadID, fileName string) (string, error) { return chunks, nil } -func NewObsMultiPartUpload(uuid, fileName string) (string, error) { +func NewObsMultiPartUpload(objectName string) (string, error) { input := &obs.InitiateMultipartUploadInput{} input.Bucket = setting.Bucket - input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") + input.Key = objectName + //strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") output, err := ObsCli.InitiateMultipartUpload(input) if err != nil { @@ -128,13 +128,14 @@ func NewObsMultiPartUpload(uuid, fileName string) (string, error) { return output.UploadId, nil } -func CompleteObsMultiPartUpload(uuid, uploadID, fileName string, totalChunks int) error { +func CompleteObsMultiPartUpload(objectName, uploadID string, totalChunks int) error { input := &obs.CompleteMultipartUploadInput{} input.Bucket = setting.Bucket - input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") + //input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") + input.Key = objectName input.UploadId = uploadID - allParts, err := listAllParts(uuid, uploadID, input.Key) + allParts, err := listAllParts(objectName, uploadID, input.Key) if err != nil { log.Error("listAllParts failed: %v", err) return err @@ -153,15 +154,16 @@ func CompleteObsMultiPartUpload(uuid, uploadID, fileName string, totalChunks int return err } - log.Info("uuid:%s, RequestId:%s", uuid, output.RequestId) + log.Info("uuid:%s, RequestId:%s", objectName, output.RequestId) return nil } -func ObsMultiPartUpload(uuid string, uploadId string, partNumber int, fileName string, putBody io.ReadCloser) error { +func ObsMultiPartUpload(objectName string, uploadId string, partNumber int, fileName string, putBody io.ReadCloser) error { input := &obs.UploadPartInput{} input.Bucket = setting.Bucket - input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") + input.Key = objectName + //strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") input.UploadId = uploadId input.PartNumber = partNumber input.Body = putBody @@ -241,11 +243,6 @@ func ObsDownloadAFile(bucket string, key string) (io.ReadCloser, error) { } } -func ObsDownload(uuid string, fileName string) (io.ReadCloser, error) { - - return ObsDownloadAFile(setting.Bucket, strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/")) -} - func ObsModelDownload(JobName string, fileName string) (io.ReadCloser, error) { input := &obs.GetObjectInput{} input.Bucket = setting.Bucket @@ -297,7 +294,7 @@ func ObsCopyManyFile(srcBucket string, srcPath string, destBucket string, destPa log.Info("Get File error, error=" + err.Error()) continue } - obsCopyFile(srcBucket, srcKey, destBucket, destKey) + ObsCopyFile(srcBucket, srcKey, destBucket, destKey) fileTotalSize += out.ContentLength } @@ -321,7 +318,7 @@ func ObsCopyAllFile(srcBucket string, srcPath string, destBucket string, destPat index++ for _, val := range output.Contents { destKey := destPath + val.Key[length:] - obsCopyFile(srcBucket, val.Key, destBucket, destKey) + ObsCopyFile(srcBucket, val.Key, destBucket, destKey) fileTotalSize += val.Size } if output.IsTruncated { @@ -340,7 +337,7 @@ func ObsCopyAllFile(srcBucket string, srcPath string, destBucket string, destPat return fileTotalSize, nil } -func obsCopyFile(srcBucket string, srcKeyName string, destBucket string, destKeyName string) error { +func ObsCopyFile(srcBucket string, srcKeyName string, destBucket string, destKeyName string) error { input := &obs.CopyObjectInput{} input.Bucket = destBucket input.Key = destKeyName @@ -529,11 +526,12 @@ func GetObsListObject(jobName, outPutPath, parentDir, versionName string) ([]Fil } } -func ObsGenMultiPartSignedUrl(uuid string, uploadId string, partNumber int, fileName string) (string, error) { +func ObsGenMultiPartSignedUrl(objectName string, uploadId string, partNumber int) (string, error) { input := &obs.CreateSignedUrlInput{} input.Bucket = setting.Bucket - input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") + input.Key = objectName + //strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") input.Expires = 60 * 60 input.Method = obs.HttpMethodPut @@ -581,10 +579,11 @@ func GetObsCreateSignedUrl(jobName, parentDir, fileName string) (string, error) return GetObsCreateSignedUrlByBucketAndKey(setting.Bucket, strings.TrimPrefix(path.Join(setting.TrainJobModelPath, jobName, setting.OutPutPath, parentDir, fileName), "/")) } -func ObsGetPreSignedUrl(uuid, fileName string) (string, error) { +func ObsGetPreSignedUrl(objectName, fileName string) (string, error) { input := &obs.CreateSignedUrlInput{} input.Method = obs.HttpMethodGet - input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") + input.Key = objectName + //strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") input.Bucket = setting.Bucket input.Expires = 60 * 60 diff --git a/options/locale/locale_en-US.ini b/options/locale/locale_en-US.ini index 66ee9b64f..e0f0719f8 100755 --- a/options/locale/locale_en-US.ini +++ b/options/locale/locale_en-US.ini @@ -1273,12 +1273,14 @@ model.manage.model_accuracy = Model Accuracy model.convert=Model Transformation model.list=Model List model.manage.create_new_convert_task=Create Model Transformation Task - +model.manage.import_local_model=Import Local Model +model.manage.import_online_model=Import Online Model model.manage.notcreatemodel=No model has been created model.manage.init1=Code version: You have not initialized the code repository, please model.manage.init2=initialized first ; model.manage.createtrainjob_tip=Training task: you haven't created a training task, please create it first -model.manage.createtrainjob=Training task. +model.manage.createmodel_tip=You can import local model or online model. Import online model should +model.manage.createtrainjob=Create training task. model.manage.delete=Delete Model model.manage.delete_confirm=Are you sure to delete this model? Once this model is deleted, it cannot be restored. model.manage.select.trainjob=Select train task diff --git a/options/locale/locale_zh-CN.ini b/options/locale/locale_zh-CN.ini index da82a2316..71672c838 100755 --- a/options/locale/locale_zh-CN.ini +++ b/options/locale/locale_zh-CN.ini @@ -1287,12 +1287,14 @@ model.manage.model_accuracy = 模型精度 model.convert=模型转换任务 model.list=模型列表 model.manage.create_new_convert_task=创建模型转换任务 - +model.manage.import_local_model=导入本地模型 +model.manage.import_online_model=导入线上模型 model.manage.notcreatemodel=未创建过模型 model.manage.init1=代码版本:您还没有初始化代码仓库,请先 model.manage.init2=创建代码版本; model.manage.createtrainjob_tip=训练任务:您还没创建过训练任务,请先创建 -model.manage.createtrainjob=训练任务。 +model.manage.createmodel_tip=您可以导入本地模型或者导入线上模型。导入线上模型需先 +model.manage.createtrainjob=创建训练任务。 model.manage.delete=删除模型 model.manage.delete_confirm=你确认删除该模型么?此模型一旦删除不可恢复。 model.manage.select.trainjob=选择训练任务 diff --git a/routers/admin/cloudbrains.go b/routers/admin/cloudbrains.go index cbf6782ed..91685251b 100755 --- a/routers/admin/cloudbrains.go +++ b/routers/admin/cloudbrains.go @@ -17,6 +17,7 @@ import ( "code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/setting" + cloudbrainService "code.gitea.io/gitea/services/cloudbrain" ) const ( @@ -95,6 +96,8 @@ func CloudBrains(ctx *context.Context) { models.LoadSpecs4CloudbrainInfo(ciTasks) for i, task := range ciTasks { + ciTasks[i] = cloudbrainService.UpdateCloudbrainAiCenter(ciTasks[i]) + ciTasks[i].Cloudbrain.AiCenter = repo.GetAiCenterNameByCode(ciTasks[i].Cloudbrain.AiCenter, ctx.Language()) ciTasks[i].CanDebug = true ciTasks[i].CanDel = true ciTasks[i].Cloudbrain.ComputeResource = task.ComputeResource @@ -186,7 +189,8 @@ func DownloadCloudBrains(ctx *context.Context) { } models.LoadSpecs4CloudbrainInfo(pageRecords) for _, record := range pageRecords { - + record = cloudbrainService.UpdateCloudbrainAiCenter(record) + record.Cloudbrain.AiCenter = repo.GetAiCenterNameByCode(record.Cloudbrain.AiCenter, ctx.Language()) for k, v := range allValues(row, record, ctx) { f.SetCellValue(cloudBrain, k, v) } @@ -208,7 +212,7 @@ func allValues(row int, rs *models.CloudbrainInfo, ctx *context.Context) map[str return map[string]string{getCellName("A", row): rs.DisplayJobName, getCellName("B", row): repo.GetCloudbrainCluster(rs.Cloudbrain, ctx), getCellName("C", row): rs.JobType, getCellName("D", row): rs.Status, getCellName("E", row): time.Unix(int64(rs.Cloudbrain.CreatedUnix), 0).Format(CREATE_TIME_FORMAT), getCellName("F", row): getDurationTime(rs), getCellName("G", row): rs.ComputeResource, - getCellName("H", row): repo.GetCloudbrainAiCenter(rs.Cloudbrain, ctx), getCellName("I", row): getCloudbrainCardType(rs), + getCellName("H", row): rs.Cloudbrain.AiCenter, getCellName("I", row): getCloudbrainCardType(rs), getCellName("J", row): rs.Name, getCellName("K", row): getRepoPathName(rs), getCellName("L", row): rs.JobName, } } diff --git a/routers/admin/resources.go b/routers/admin/resources.go index 20638553b..feea7b69b 100644 --- a/routers/admin/resources.go +++ b/routers/admin/resources.go @@ -127,6 +127,7 @@ func GetResourceSpecificationList(ctx *context.Context) { Status: status, Cluster: cluster, AvailableCode: available, + OrderBy: models.SearchSpecOrderById, }) if err != nil { log.Error("GetResourceSpecificationList error.%v", err) @@ -136,6 +137,26 @@ func GetResourceSpecificationList(ctx *context.Context) { ctx.JSON(http.StatusOK, response.SuccessWithData(list)) } +func GetAllResourceSpecificationList(ctx *context.Context) { + queue := ctx.QueryInt64("queue") + status := ctx.QueryInt("status") + cluster := ctx.Query("cluster") + available := ctx.QueryInt("available") + list, err := resource.GetAllDistinctResourceSpecification(models.SearchResourceSpecificationOptions{ + QueueId: queue, + Status: status, + Cluster: cluster, + AvailableCode: available, + }) + if err != nil { + log.Error("GetResourceSpecificationList error.%v", err) + ctx.JSON(http.StatusOK, response.ServerError(err.Error())) + return + } + + ctx.JSON(http.StatusOK, response.SuccessWithData(list)) +} + func GetResourceSpecificationScenes(ctx *context.Context) { specId := ctx.ParamsInt64(":id") list, err := resource.GetResourceSpecificationScenes(specId) diff --git a/routers/api/v1/api.go b/routers/api/v1/api.go index 96de00c55..14badfdb4 100755 --- a/routers/api/v1/api.go +++ b/routers/api/v1/api.go @@ -631,6 +631,7 @@ func RegisterRoutes(m *macaron.Macaron) { m.Get("/overview_resource", repo.GetCloudbrainResourceOverview) m.Get("/resource_usage_statistic", repo.GetDurationRateStatistic) m.Get("/resource_usage_rate_detail", repo.GetCloudbrainResourceUsageDetail) + m.Get("/resource_queues", repo.GetResourceQueues) m.Get("/apitest_for_statistic", repo.CloudbrainDurationStatisticForTest) }) }, operationReq) @@ -1017,7 +1018,9 @@ func RegisterRoutes(m *macaron.Macaron) { m.Get("/query_modelfile_for_predict", repo.QueryModelFileForPredict) m.Get("/query_train_model", repo.QueryTrainModelList) m.Post("/create_model_convert", repo.CreateModelConvert) - m.Get("/show_model_convert_page") + m.Get("/show_model_convert_page", repo.ShowModelConvertPage) + m.Get("/query_model_convert_byId", repo.QueryModelConvertById) + m.Get("/:id", repo.GetCloudbrainModelConvertTask) m.Get("/:id/log", repo.CloudbrainForModelConvertGetLog) m.Get("/:id/modelartlog", repo.TrainJobForModelConvertGetLog) @@ -1054,6 +1057,7 @@ func RegisterRoutes(m *macaron.Macaron) { m.Get("", repo.GetModelArtsTrainJobVersion) m.Post("/stop_version", cloudbrain.AdminOrOwnerOrJobCreaterRightForTrain, repo_ext.GrampusStopJob) m.Get("/log", repo_ext.GrampusGetLog) + m.Get("/metrics", repo_ext.GrampusMetrics) m.Get("/download_log", cloudbrain.AdminOrJobCreaterRightForTrain, repo_ext.GrampusDownloadLog) }) }) diff --git a/routers/api/v1/repo/cloudbrain.go b/routers/api/v1/repo/cloudbrain.go index 3b6ce54aa..cecb0ec27 100755 --- a/routers/api/v1/repo/cloudbrain.go +++ b/routers/api/v1/repo/cloudbrain.go @@ -666,12 +666,24 @@ func CloudbrainGetLog(ctx *context.APIContext) { existStr = taskRes.TaskStatuses[0].ExitDiagnostics } ctx.Data["existStr"] = existStr - log.Info("existStr=" + existStr) } else { ModelSafetyGetLog(ctx) return } + } + if job.JobType == string(models.JobTypeTrain) || job.JobType == string(models.JobTypeInference) { + if job.Type == models.TypeCloudBrainOne { + result, err := cloudbrain.GetJob(job.JobID) + existStr := "" + if err == nil && result != nil { + jobRes, _ := models.ConvertToJobResultPayload(result.Payload) + taskRoles := jobRes.TaskRoles + taskRes, _ := models.ConvertToTaskPod(taskRoles[cloudbrain.SubTaskName].(map[string]interface{})) + existStr = taskRes.TaskStatuses[0].ExitDiagnostics + } + ctx.Data["existStr"] = existStr + } } lines := ctx.QueryInt("lines") @@ -716,17 +728,25 @@ func CloudbrainGetLog(ctx *context.APIContext) { if result["Content"] != nil { content = result["Content"].(string) } + if ctx.Data["existStr"] != nil && result["Lines"].(int) < 50 { content = content + ctx.Data["existStr"].(string) } + + logFileName := result["FileName"] + + //Logs can only be downloaded if the file exists + //and the current user is an administrator or the creator of the task + canLogDownload := logFileName != nil && logFileName != "" && job.IsUserHasRight(ctx.User) + re := map[string]interface{}{ "JobID": ID, - "LogFileName": result["FileName"], + "LogFileName": logFileName, "StartLine": result["StartLine"], "EndLine": result["EndLine"], "Content": content, "Lines": result["Lines"], - "CanLogDownload": result["FileName"] != "", + "CanLogDownload": canLogDownload, "StartTime": job.StartTime, } //result := CloudbrainGetLogByJobId(job.JobID, job.JobName) diff --git a/routers/api/v1/repo/cloudbrain_dashboard.go b/routers/api/v1/repo/cloudbrain_dashboard.go index d1ccf1bf5..446522fc2 100755 --- a/routers/api/v1/repo/cloudbrain_dashboard.go +++ b/routers/api/v1/repo/cloudbrain_dashboard.go @@ -12,6 +12,8 @@ import ( "code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/routers/repo" + cloudbrainService "code.gitea.io/gitea/services/cloudbrain" + "code.gitea.io/gitea/services/cloudbrain/resource" "github.com/360EntSecGroup-Skylar/excelize/v2" ) @@ -121,8 +123,8 @@ func GetOverviewDuration(ctx *context.Context) { recordBeginTime := recordCloudbrain[0].Cloudbrain.CreatedUnix now := time.Now() endTime := now - worker_server_num := 1 - cardNum := 1 + // worker_server_num := 1 + // cardNum := 1 durationAllSum := int64(0) cardDuSum := int64(0) @@ -148,34 +150,40 @@ func GetOverviewDuration(ctx *context.Context) { models.LoadSpecs4CloudbrainInfo(cloudbrains) for _, cloudbrain := range cloudbrains { - if cloudbrain.Cloudbrain.WorkServerNumber >= 1 { - worker_server_num = cloudbrain.Cloudbrain.WorkServerNumber - } else { - worker_server_num = 1 - } - if cloudbrain.Cloudbrain.Spec == nil { - cardNum = 1 - } else { - cardNum = cloudbrain.Cloudbrain.Spec.AccCardsNum - } - duration := cloudbrain.Duration - durationSum := cloudbrain.Duration * int64(worker_server_num) * int64(cardNum) + cloudbrain = cloudbrainService.UpdateCloudbrainAiCenter(cloudbrain) + CardDurationString := repo.GetCloudbrainCardDuration(cloudbrain.Cloudbrain) + CardDuration := models.ConvertStrToDuration(CardDurationString) + // if cloudbrain.Cloudbrain.WorkServerNumber >= 1 { + // worker_server_num = cloudbrain.Cloudbrain.WorkServerNumber + // } else { + // worker_server_num = 1 + // } + // if cloudbrain.Cloudbrain.Spec == nil { + // cardNum = 1 + // } else { + // cardNum = cloudbrain.Cloudbrain.Spec.AccCardsNum + // } + // duration := cloudbrain.Duration + // duration := cloudbrain.Duration + duration := models.ConvertStrToDuration(cloudbrain.TrainJobDuration) + // CardDuration := cloudbrain.Duration * int64(worker_server_num) * int64(cardNum) + if cloudbrain.Cloudbrain.Type == models.TypeCloudBrainOne { cloudBrainOneDuration += duration - cloudBrainOneCardDuSum += durationSum + cloudBrainOneCardDuSum += CardDuration } else if cloudbrain.Cloudbrain.Type == models.TypeCloudBrainTwo { cloudBrainTwoDuration += duration - cloudBrainTwoCardDuSum += durationSum + cloudBrainTwoCardDuSum += CardDuration } else if cloudbrain.Cloudbrain.Type == models.TypeC2Net { c2NetDuration += duration - c2NetCardDuSum += durationSum + c2NetCardDuSum += CardDuration } else if cloudbrain.Cloudbrain.Type == models.TypeCDCenter { cDCenterDuration += duration - cDNetCardDuSum += durationSum + cDNetCardDuSum += CardDuration } durationAllSum += duration - cardDuSum += durationSum + cardDuSum += CardDuration } ctx.JSON(http.StatusOK, map[string]interface{}{ "cloudBrainOneCardDuSum": cloudBrainOneCardDuSum, @@ -192,6 +200,28 @@ func GetOverviewDuration(ctx *context.Context) { }) } +func GetCloudbrainCardDuration(task models.Cloudbrain) string { + cardNum := int(0) + spec, err := resource.GetCloudbrainSpec(task.ID) + if err != nil { + log.Info("error:" + err.Error()) + return "" + } + if spec != nil { + cardNum = spec.AccCardsNum + } else { + cardNum = 1 + } + var workServerNumber int64 + if task.WorkServerNumber >= 1 { + workServerNumber = int64(task.WorkServerNumber) + } else { + workServerNumber = 1 + } + cardDuration := models.ConvertDurationToStr(workServerNumber * int64(cardNum) * task.Duration) + return cardDuration +} + func GetAllCloudbrainsTrend(ctx *context.Context) { queryType := ctx.QueryTrim("type") @@ -703,6 +733,30 @@ func GetCloudbrainsDetailData(ctx *context.Context) { aiCenter := ctx.Query("aiCenter") needDeleteInfo := ctx.Query("needDeleteInfo") + if cloudBrainType == models.TypeCloudBrainOne && aiCenter == models.AICenterOfCloudBrainOne { + aiCenter = "" + } + if cloudBrainType == models.TypeCloudBrainTwo && aiCenter == models.AICenterOfCloudBrainTwo { + aiCenter = "" + } + if cloudBrainType == models.TypeCDCenter && aiCenter == models.AICenterOfChengdu { + aiCenter = "" + } + if cloudBrainType == models.TypeCloudBrainAll { + if aiCenter == models.AICenterOfCloudBrainOne { + cloudBrainType = models.TypeCloudBrainOne + aiCenter = "" + } + if aiCenter == models.AICenterOfCloudBrainTwo { + cloudBrainType = models.TypeCloudBrainTwo + aiCenter = "" + } + if aiCenter == models.AICenterOfChengdu { + cloudBrainType = models.TypeCDCenter + aiCenter = "" + } + } + page := ctx.QueryInt("page") pageSize := ctx.QueryInt("pagesize") if page <= 0 { @@ -732,7 +786,7 @@ func GetCloudbrainsDetailData(ctx *context.Context) { keyword := strings.Trim(ctx.Query("q"), " ") - ciTasks, _, err := models.CloudbrainAll(&models.CloudbrainsOptions{ + ciTasks, count, err := models.CloudbrainAll(&models.CloudbrainsOptions{ ListOptions: models.ListOptions{ Page: page, PageSize: pageSize, @@ -747,8 +801,8 @@ func GetCloudbrainsDetailData(ctx *context.Context) { NeedRepoInfo: true, BeginTimeUnix: int64(recordBeginTime), EndTimeUnix: endTime.Unix(), - // AiCenter: aiCenter, - NeedDeleteInfo: needDeleteInfo, + AiCenter: aiCenter, + NeedDeleteInfo: needDeleteInfo, }) if err != nil { ctx.ServerError("Get job failed:", err) @@ -758,45 +812,43 @@ func GetCloudbrainsDetailData(ctx *context.Context) { nilTime := time.Time{} tasks := []models.TaskDetail{} for i, task := range ciTasks { - if aiCenter == "" || aiCenter == task.Cloudbrain.Spec.AiCenterCode { - ciTasks[i].Cloudbrain.ComputeResource = task.ComputeResource - var taskDetail models.TaskDetail - taskDetail.ID = ciTasks[i].Cloudbrain.ID - taskDetail.JobID = ciTasks[i].Cloudbrain.JobID - taskDetail.JobName = ciTasks[i].JobName - taskDetail.DisplayJobName = ciTasks[i].DisplayJobName - taskDetail.Status = ciTasks[i].Status - taskDetail.JobType = ciTasks[i].JobType - taskDetail.CreatedUnix = ciTasks[i].Cloudbrain.CreatedUnix - taskDetail.RunTime = ciTasks[i].Cloudbrain.TrainJobDuration - taskDetail.StartTime = ciTasks[i].StartTime - taskDetail.EndTime = ciTasks[i].EndTime - taskDetail.ComputeResource = ciTasks[i].ComputeResource - taskDetail.Type = ciTasks[i].Cloudbrain.Type - taskDetail.UserName = ciTasks[i].User.Name - taskDetail.RepoID = ciTasks[i].RepoID - if ciTasks[i].Repo != nil { - taskDetail.RepoName = ciTasks[i].Repo.OwnerName + "/" + ciTasks[i].Repo.Name - taskDetail.RepoAlias = ciTasks[i].Repo.OwnerName + "/" + ciTasks[i].Repo.Alias - } - if ciTasks[i].Cloudbrain.WorkServerNumber >= 1 { - taskDetail.WorkServerNum = int64(ciTasks[i].Cloudbrain.WorkServerNumber) - } else { - taskDetail.WorkServerNum = 1 - } - taskDetail.CardDuration = repo.GetCloudbrainCardDuration(ciTasks[i].Cloudbrain) - taskDetail.WaitTime = repo.GetCloudbrainWaitTime(ciTasks[i].Cloudbrain) + task = cloudbrainService.UpdateCloudbrainAiCenter(task) + var taskDetail models.TaskDetail + taskDetail.ID = ciTasks[i].Cloudbrain.ID + taskDetail.JobID = ciTasks[i].Cloudbrain.JobID + taskDetail.JobName = ciTasks[i].JobName + taskDetail.DisplayJobName = ciTasks[i].DisplayJobName + taskDetail.Status = ciTasks[i].Status + taskDetail.JobType = ciTasks[i].JobType + taskDetail.CreatedUnix = ciTasks[i].Cloudbrain.CreatedUnix + taskDetail.RunTime = ciTasks[i].Cloudbrain.TrainJobDuration + taskDetail.StartTime = ciTasks[i].StartTime + taskDetail.EndTime = ciTasks[i].EndTime + taskDetail.ComputeResource = ciTasks[i].ComputeResource + taskDetail.Type = ciTasks[i].Cloudbrain.Type + taskDetail.UserName = ciTasks[i].User.Name + taskDetail.RepoID = ciTasks[i].RepoID + taskDetail.AiCenter = repo.GetAiCenterNameByCode(task.Cloudbrain.AiCenter, ctx.Language()) + if ciTasks[i].Repo != nil { + taskDetail.RepoName = ciTasks[i].Repo.OwnerName + "/" + ciTasks[i].Repo.Name + taskDetail.RepoAlias = ciTasks[i].Repo.OwnerName + "/" + ciTasks[i].Repo.Alias + } + if ciTasks[i].Cloudbrain.WorkServerNumber >= 1 { + taskDetail.WorkServerNum = int64(ciTasks[i].Cloudbrain.WorkServerNumber) + } else { + taskDetail.WorkServerNum = 1 + } + taskDetail.CardDuration = repo.GetCloudbrainCardDuration(ciTasks[i].Cloudbrain) + taskDetail.WaitTime = repo.GetCloudbrainWaitTime(ciTasks[i].Cloudbrain) - if ciTasks[i].Cloudbrain.DeletedAt != nilTime || ciTasks[i].Repo == nil { - taskDetail.IsDelete = true - } else { - taskDetail.IsDelete = false - } - taskDetail.Spec = ciTasks[i].Spec - tasks = append(tasks, taskDetail) + if ciTasks[i].Cloudbrain.DeletedAt != nilTime || ciTasks[i].Repo == nil { + taskDetail.IsDelete = true + } else { + taskDetail.IsDelete = false } + taskDetail.Spec = ciTasks[i].Spec + tasks = append(tasks, taskDetail) } - count := int64(len(tasks)) pager := context.NewPagination(int(count), pageSize, page, getTotalPage(count, pageSize)) pager.SetDefaultParams(ctx) pager.AddParam(ctx, "listType", "ListType") @@ -1176,6 +1228,12 @@ func getMonthCloudbrainInfo(beginTime time.Time, endTime time.Time) ([]DateCloud } func DownloadCloudBrainBoard(ctx *context.Context) { + recordCloudbrain, err := models.GetRecordBeginTime() + if err != nil { + log.Error("Can not get recordCloudbrain", err) + ctx.Error(http.StatusBadRequest, ctx.Tr("repo.record_begintime_get_err")) + return + } page := 1 @@ -1184,14 +1242,20 @@ func DownloadCloudBrainBoard(ctx *context.Context) { var cloudBrain = ctx.Tr("repo.cloudbrain") fileName := getCloudbrainFileName(cloudBrain) + recordBeginTime := recordCloudbrain[0].Cloudbrain.CreatedUnix + now := time.Now() + endTime := now + _, total, err := models.CloudbrainAll(&models.CloudbrainsOptions{ ListOptions: models.ListOptions{ Page: page, PageSize: pageSize, }, - Type: models.TypeCloudBrainAll, - NeedRepoInfo: false, + Type: models.TypeCloudBrainAll, + BeginTimeUnix: int64(recordBeginTime), + EndTimeUnix: endTime.Unix(), }) + log.Info("totalcountisis:", total) if err != nil { log.Warn("Can not get cloud brain info", err) @@ -1216,8 +1280,10 @@ func DownloadCloudBrainBoard(ctx *context.Context) { Page: page, PageSize: pageSize, }, - Type: models.TypeCloudBrainAll, - NeedRepoInfo: true, + Type: models.TypeCloudBrainAll, + BeginTimeUnix: int64(recordBeginTime), + EndTimeUnix: endTime.Unix(), + NeedRepoInfo: true, }) if err != nil { log.Warn("Can not get cloud brain info", err) @@ -1225,7 +1291,8 @@ func DownloadCloudBrainBoard(ctx *context.Context) { } models.LoadSpecs4CloudbrainInfo(pageRecords) for _, record := range pageRecords { - + record = cloudbrainService.UpdateCloudbrainAiCenter(record) + record.Cloudbrain.AiCenter = repo.GetAiCenterNameByCode(record.Cloudbrain.AiCenter, ctx.Language()) for k, v := range allCloudbrainValues(row, record, ctx) { f.SetCellValue(cloudBrain, k, v) } @@ -1264,7 +1331,7 @@ func allCloudbrainValues(row int, rs *models.CloudbrainInfo, ctx *context.Contex getCellName("G", row): rs.TrainJobDuration, getCellName("H", row): repo.GetCloudbrainCardDuration(rs.Cloudbrain), getCellName("I", row): getBrainStartTime(rs), getCellName("J", row): getBrainEndTime(rs), getCellName("K", row): rs.ComputeResource, getCellName("L", row): getCloudbrainCardType(rs), - getCellName("M", row): getWorkServerNum(rs), getCellName("N", row): repo.GetCloudbrainAiCenter(rs.Cloudbrain, ctx), + getCellName("M", row): getWorkServerNum(rs), getCellName("N", row): rs.Cloudbrain.AiCenter, getCellName("O", row): getCloudbrainFlavorName(rs), getCellName("P", row): rs.Name, getCellName("Q", row): getBrainRepo(rs), getCellName("R", row): rs.JobName, getCellName("S", row): getBrainDeleteTime(rs), } @@ -1417,7 +1484,7 @@ func GetCloudbrainResourceOverview(ctx *context.Context) { log.Error("Can not get GetDurationRecordBeginTime", err) return } - recordBeginTime := recordCloudbrainDuration[0].CreatedUnix + recordBeginTime := recordCloudbrainDuration[0].DateTime recordUpdateTime := time.Now().Unix() resourceQueues, err := models.GetCanUseCardInfo() if err != nil { @@ -1428,11 +1495,12 @@ func GetCloudbrainResourceOverview(ctx *context.Context) { C2NetResourceDetail := []models.ResourceDetail{} for _, resourceQueue := range resourceQueues { if resourceQueue.Cluster == models.OpenICluster { + aiCenterName := repo.GetAiCenterNameByCode(resourceQueue.AiCenterCode, ctx.Language()) var resourceDetail models.ResourceDetail resourceDetail.QueueCode = resourceQueue.QueueCode resourceDetail.Cluster = resourceQueue.Cluster resourceDetail.AiCenterCode = resourceQueue.AiCenterCode - resourceDetail.AiCenterName = resourceQueue.AiCenterName + "/" + resourceQueue.AiCenterCode + resourceDetail.AiCenterName = resourceQueue.AiCenterCode + "/" + aiCenterName resourceDetail.ComputeResource = resourceQueue.ComputeResource resourceDetail.AccCardType = resourceQueue.AccCardType + "(" + resourceQueue.ComputeResource + ")" resourceDetail.CardsTotalNum = resourceQueue.CardsTotalNum @@ -1440,11 +1508,12 @@ func GetCloudbrainResourceOverview(ctx *context.Context) { OpenIResourceDetail = append(OpenIResourceDetail, resourceDetail) } if resourceQueue.Cluster == models.C2NetCluster { + aiCenterName := repo.GetAiCenterNameByCode(resourceQueue.AiCenterCode, ctx.Language()) var resourceDetail models.ResourceDetail resourceDetail.QueueCode = resourceQueue.QueueCode resourceDetail.Cluster = resourceQueue.Cluster resourceDetail.AiCenterCode = resourceQueue.AiCenterCode - resourceDetail.AiCenterName = resourceQueue.AiCenterName + "/" + resourceQueue.AiCenterCode + resourceDetail.AiCenterName = resourceQueue.AiCenterCode + "/" + aiCenterName resourceDetail.ComputeResource = resourceQueue.ComputeResource resourceDetail.AccCardType = resourceQueue.AccCardType + "(" + resourceQueue.ComputeResource + ")" resourceDetail.CardsTotalNum = resourceQueue.CardsTotalNum @@ -1554,7 +1623,7 @@ func getBeginAndEndTime(ctx *context.Context) (time.Time, time.Time) { ctx.Error(http.StatusBadRequest, ctx.Tr("repo.record_begintime_get_err")) return beginTime, endTime } - brainRecordBeginTime := recordCloudbrainDuration[0].CreatedUnix.AsTime() + brainRecordBeginTime := recordCloudbrainDuration[0].DateTime.AsTime() beginTime = brainRecordBeginTime endTime = now } else if queryType == "today" { @@ -1596,7 +1665,7 @@ func getBeginAndEndTime(ctx *context.Context) (time.Time, time.Time) { ctx.Error(http.StatusBadRequest, ctx.Tr("repo.record_begintime_get_err")) return beginTime, endTime } - brainRecordBeginTime := recordCloudbrainDuration[0].CreatedUnix.AsTime() + brainRecordBeginTime := recordCloudbrainDuration[0].DateTime.AsTime() beginTime = brainRecordBeginTime endTime = now } else { @@ -1627,7 +1696,7 @@ func getAiCenterUsageDuration(beginTime time.Time, endTime time.Time, cloudbrain usageRate := float64(0) for _, cloudbrainStatistic := range cloudbrainStatistics { - if int64(cloudbrainStatistic.CreatedUnix) >= beginTime.Unix() && int64(cloudbrainStatistic.CreatedUnix) < endTime.Unix() { + if int64(cloudbrainStatistic.DateTime) >= beginTime.Unix() && int64(cloudbrainStatistic.DateTime) < endTime.Unix() { totalDuration += cloudbrainStatistic.CardsTotalDuration usageDuration += cloudbrainStatistic.CardsUseDuration } @@ -1659,28 +1728,29 @@ func getDurationStatistic(beginTime time.Time, endTime time.Time) (models.Durati return OpenIDurationRate, C2NetDurationRate, 0 } for _, cloudbrainStatistic := range cardDurationStatistics { + aiCenterName := cloudbrainStatistic.AiCenterCode + "/" + repo.GetAiCenterNameByCode(cloudbrainStatistic.AiCenterCode, "zh-CN") if cloudbrainStatistic.Cluster == models.OpenICluster { - if _, ok := OpenITotalDuration[cloudbrainStatistic.AiCenterName]; !ok { - OpenITotalDuration[cloudbrainStatistic.AiCenterName] = cloudbrainStatistic.CardsTotalDuration + if _, ok := OpenITotalDuration[aiCenterName]; !ok { + OpenITotalDuration[aiCenterName] = cloudbrainStatistic.CardsTotalDuration } else { - OpenITotalDuration[cloudbrainStatistic.AiCenterName] += cloudbrainStatistic.CardsTotalDuration + OpenITotalDuration[aiCenterName] += cloudbrainStatistic.CardsTotalDuration } - if _, ok := OpenIUsageDuration[cloudbrainStatistic.AiCenterName]; !ok { - OpenIUsageDuration[cloudbrainStatistic.AiCenterName] = cloudbrainStatistic.CardsUseDuration + if _, ok := OpenIUsageDuration[aiCenterName]; !ok { + OpenIUsageDuration[aiCenterName] = cloudbrainStatistic.CardsUseDuration } else { - OpenIUsageDuration[cloudbrainStatistic.AiCenterName] += cloudbrainStatistic.CardsUseDuration + OpenIUsageDuration[aiCenterName] += cloudbrainStatistic.CardsUseDuration } } if cloudbrainStatistic.Cluster == models.C2NetCluster { - if _, ok := C2NetTotalDuration[cloudbrainStatistic.AiCenterName]; !ok { - C2NetTotalDuration[cloudbrainStatistic.AiCenterName] = cloudbrainStatistic.CardsTotalDuration + if _, ok := C2NetTotalDuration[aiCenterName]; !ok { + C2NetTotalDuration[aiCenterName] = cloudbrainStatistic.CardsTotalDuration } else { - C2NetTotalDuration[cloudbrainStatistic.AiCenterName] += cloudbrainStatistic.CardsTotalDuration + C2NetTotalDuration[aiCenterName] += cloudbrainStatistic.CardsTotalDuration } - if _, ok := C2NetUsageDuration[cloudbrainStatistic.AiCenterName]; !ok { - C2NetUsageDuration[cloudbrainStatistic.AiCenterName] = cloudbrainStatistic.CardsUseDuration + if _, ok := C2NetUsageDuration[aiCenterName]; !ok { + C2NetUsageDuration[aiCenterName] = cloudbrainStatistic.CardsUseDuration } else { - C2NetUsageDuration[cloudbrainStatistic.AiCenterName] += cloudbrainStatistic.CardsUseDuration + C2NetUsageDuration[aiCenterName] += cloudbrainStatistic.CardsUseDuration } } } @@ -1690,16 +1760,17 @@ func getDurationStatistic(beginTime time.Time, endTime time.Time) (models.Durati return OpenIDurationRate, C2NetDurationRate, 0 } for _, v := range ResourceAiCenterRes { + aiCenterName := v.AiCenterCode + "/" + repo.GetAiCenterNameByCode(v.AiCenterCode, "zh-CN") if cutString(v.AiCenterCode, 4) == cutString(models.AICenterOfCloudBrainOne, 4) { - if _, ok := OpenIUsageDuration[v.AiCenterName]; !ok { - OpenIUsageDuration[v.AiCenterName] = 0 + if _, ok := OpenIUsageDuration[aiCenterName]; !ok { + OpenIUsageDuration[aiCenterName] = 0 } - if _, ok := OpenITotalDuration[v.AiCenterName]; !ok { - OpenITotalDuration[v.AiCenterName] = 0 + if _, ok := OpenITotalDuration[aiCenterName]; !ok { + OpenITotalDuration[aiCenterName] = 0 } } else { - if _, ok := C2NetUsageDuration[v.AiCenterName]; !ok { - C2NetUsageDuration[v.AiCenterName] = 0 + if _, ok := C2NetUsageDuration[aiCenterName]; !ok { + C2NetUsageDuration[aiCenterName] = 0 } } } @@ -1716,7 +1787,7 @@ func getDurationStatistic(beginTime time.Time, endTime time.Time) (models.Durati for _, v := range OpenITotalDuration { totalCanUse += float64(v) } - for _, v := range OpenIUsageRate { + for _, v := range OpenIUsageDuration { totalUse += float64(v) } if totalCanUse == 0 || totalUse == 0 { @@ -1724,6 +1795,7 @@ func getDurationStatistic(beginTime time.Time, endTime time.Time) (models.Durati } else { totalUsageRate = totalUse / totalCanUse } + delete(C2NetUsageDuration, "/") OpenIDurationRate.AiCenterTotalDurationStat = OpenITotalDuration OpenIDurationRate.AiCenterUsageDurationStat = OpenIUsageDuration @@ -1831,3 +1903,30 @@ func getHourCloudbrainDuration(beginTime time.Time, endTime time.Time, aiCenterC hourTimeStatistic.HourTimeUsageRate = hourTimeUsageRate return hourTimeStatistic, nil } + +func CloudbrainUpdateAiCenter(ctx *context.Context) { + repo.CloudbrainDurationStatisticHour() + ctx.JSON(http.StatusOK, map[string]interface{}{ + "message": 0, + }) +} + +func GetResourceQueues(ctx *context.Context) { + resourceQueues, err := models.GetCanUseCardInfo() + if err != nil { + log.Info("GetCanUseCardInfo err: %v", err) + return + } + Resource := make([]*models.ResourceQueue, 0) + aiCenterCodeMap := make(map[string]string) + for _, resourceQueue := range resourceQueues { + if _, ok := aiCenterCodeMap[resourceQueue.AiCenterCode]; !ok { + resourceQueue.AiCenterName = repo.GetAiCenterNameByCode(resourceQueue.AiCenterCode, ctx.Language()) + aiCenterCodeMap[resourceQueue.AiCenterCode] = resourceQueue.AiCenterCode + Resource = append(Resource, resourceQueue) + } + } + ctx.JSON(http.StatusOK, map[string]interface{}{ + "resourceQueues": Resource, + }) +} diff --git a/routers/api/v1/repo/modelarts.go b/routers/api/v1/repo/modelarts.go index 008567179..127ddd835 100755 --- a/routers/api/v1/repo/modelarts.go +++ b/routers/api/v1/repo/modelarts.go @@ -150,7 +150,6 @@ func GetModelArtsTrainJobVersion(ctx *context.APIContext) { if len(result.JobInfo.Tasks) > 0 { if len(result.JobInfo.Tasks[0].CenterID) > 0 && len(result.JobInfo.Tasks[0].CenterName) > 0 { job.AiCenter = result.JobInfo.Tasks[0].CenterID[0] + "+" + result.JobInfo.Tasks[0].CenterName[0] - // aiCenterName = result.JobInfo.Tasks[0].CenterName[0] aiCenterName = cloudbrainService.GetAiCenterShow(job.AiCenter, ctx.Context) } } @@ -285,15 +284,6 @@ func TrainJobGetLog(ctx *context.APIContext) { return } - prefix := strings.TrimPrefix(path.Join(setting.TrainJobModelPath, task.JobName, modelarts.LogPath, versionName), "/") + "/job" - _, err = storage.GetObsLogFileName(prefix) - var canLogDownload bool - if err != nil { - canLogDownload = false - } else { - canLogDownload = true - } - ctx.Data["log_file_name"] = resultLogFile.LogFileList[0] ctx.JSON(http.StatusOK, map[string]interface{}{ @@ -303,11 +293,23 @@ func TrainJobGetLog(ctx *context.APIContext) { "EndLine": result.EndLine, "Content": result.Content, "Lines": result.Lines, - "CanLogDownload": canLogDownload, + "CanLogDownload": canLogDownload(ctx.User, task), "StartTime": task.StartTime, }) } +func canLogDownload(user *models.User, task *models.Cloudbrain) bool { + if task == nil || !task.IsUserHasRight(user) { + return false + } + prefix := strings.TrimPrefix(path.Join(setting.TrainJobModelPath, task.JobName, modelarts.LogPath, task.VersionName), "/") + "/job" + _, err := storage.GetObsLogFileName(prefix) + if err != nil { + return false + } + return true +} + func trainJobGetLogContent(jobID string, versionID int64, baseLine string, order string, lines int) (*models.GetTrainJobLogFileNamesResult, *models.GetTrainJobLogResult, error) { resultLogFile, err := modelarts.GetTrainJobLogFileNames(jobID, strconv.FormatInt(versionID, 10)) diff --git a/routers/api/v1/repo/modelmanage.go b/routers/api/v1/repo/modelmanage.go index 2c1fd9f01..15260790d 100644 --- a/routers/api/v1/repo/modelmanage.go +++ b/routers/api/v1/repo/modelmanage.go @@ -104,3 +104,12 @@ func ShowModelConvertPage(ctx *context.APIContext) { } } + +func QueryModelConvertById(ctx *context.APIContext) { + modelResult, err := routerRepo.GetModelConvertById(ctx.Context) + if err == nil { + ctx.JSON(http.StatusOK, modelResult) + } else { + ctx.JSON(http.StatusOK, nil) + } +} diff --git a/routers/private/internal.go b/routers/private/internal.go index 3e2eeab31..14b0f05de 100755 --- a/routers/private/internal.go +++ b/routers/private/internal.go @@ -6,9 +6,10 @@ package private import ( - "code.gitea.io/gitea/routers/admin" "strings" + "code.gitea.io/gitea/routers/admin" + "code.gitea.io/gitea/routers/repo" "code.gitea.io/gitea/modules/log" @@ -52,7 +53,9 @@ func RegisterRoutes(m *macaron.Macaron) { m.Get("/tool/org_stat", OrgStatisticManually) m.Post("/tool/update_repo_visit/:date", UpdateRepoVisit) m.Post("/task/history_handle/duration", repo.HandleTaskWithNoDuration) + m.Post("/task/history_handle/aicenter", repo.HandleTaskWithAiCenter) m.Post("/resources/specification/handle_historical_task", admin.RefreshHistorySpec) + m.Post("/duration_statisctic/history_handle", repo.CloudbrainUpdateHistoryData) }, CheckInternalToken) } diff --git a/routers/repo/ai_model_convert.go b/routers/repo/ai_model_convert.go index 560ace8fd..962c76aae 100644 --- a/routers/repo/ai_model_convert.go +++ b/routers/repo/ai_model_convert.go @@ -150,6 +150,7 @@ func SaveModelConvert(ctx *context.Context) { go goCreateTask(modelConvert, ctx, task) ctx.JSON(200, map[string]string{ + "id": id, "code": "0", }) } @@ -726,6 +727,11 @@ func ShowModelConvertPageInfo(ctx *context.Context) { } } +func GetModelConvertById(ctx *context.Context) (*models.AiModelConvert, error) { + id := ctx.Query("id") + return models.QueryModelConvertById(id) +} + func GetModelConvertPageData(ctx *context.Context) ([]*models.AiModelConvert, int64, error) { page := ctx.QueryInt("page") if page <= 0 { diff --git a/routers/repo/ai_model_manage.go b/routers/repo/ai_model_manage.go index 1bef11703..7eedb9bc4 100644 --- a/routers/repo/ai_model_manage.go +++ b/routers/repo/ai_model_manage.go @@ -22,16 +22,24 @@ import ( ) const ( - Model_prefix = "aimodels/" - tplModelManageIndex = "repo/modelmanage/index" - tplModelManageDownload = "repo/modelmanage/download" - tplModelInfo = "repo/modelmanage/showinfo" - MODEL_LATEST = 1 - MODEL_NOT_LATEST = 0 - MODEL_MAX_SIZE = 1024 * 1024 * 1024 - STATUS_COPY_MODEL = 1 - STATUS_FINISHED = 0 - STATUS_ERROR = 2 + Attachment_model = "model" + Model_prefix = "aimodels/" + tplModelManageIndex = "repo/modelmanage/index" + tplModelManageDownload = "repo/modelmanage/download" + tplModelInfo = "repo/modelmanage/showinfo" + tplCreateLocalModelInfo = "repo/modelmanage/create_local_1" + tplCreateLocalForUploadModelInfo = "repo/modelmanage/create_local_2" + tplCreateOnlineModelInfo = "repo/modelmanage/create_online" + + MODEL_LATEST = 1 + MODEL_NOT_LATEST = 0 + MODEL_MAX_SIZE = 1024 * 1024 * 1024 + STATUS_COPY_MODEL = 1 + STATUS_FINISHED = 0 + STATUS_ERROR = 2 + + MODEL_LOCAL_TYPE = 1 + MODEL_ONLINE_TYPE = 0 ) func saveModelByParameters(jobId string, versionName string, name string, version string, label string, description string, engine int, ctx *context.Context) (string, error) { @@ -70,13 +78,12 @@ func saveModelByParameters(jobId string, versionName string, name string, versio cloudType = models.TypeCloudBrainTwo } else if aiTask.ComputeResource == models.GPUResource { cloudType = models.TypeCloudBrainOne - spec, err := resource.GetCloudbrainSpec(aiTask.ID) - if err == nil { - flaverName := "GPU: " + fmt.Sprint(spec.AccCardsNum) + "*" + spec.AccCardType + ",CPU: " + fmt.Sprint(spec.CpuCores) + "," + ctx.Tr("cloudbrain.memory") + ": " + fmt.Sprint(spec.MemGiB) + "GB," + ctx.Tr("cloudbrain.shared_memory") + ": " + fmt.Sprint(spec.ShareMemGiB) + "GB" - aiTask.FlavorName = flaverName - } } - + spec, err := resource.GetCloudbrainSpec(aiTask.ID) + if err == nil { + specJson, _ := json.Marshal(spec) + aiTask.FlavorName = string(specJson) + } accuracy := make(map[string]string) accuracy["F1"] = "" accuracy["Recall"] = "" @@ -189,6 +196,139 @@ func SaveNewNameModel(ctx *context.Context) { log.Info("save model end.") } +func SaveLocalModel(ctx *context.Context) { + if !ctx.Repo.CanWrite(models.UnitTypeModelManage) { + ctx.Error(403, ctx.Tr("repo.model_noright")) + return + } + re := map[string]string{ + "code": "-1", + } + log.Info("save SaveLocalModel start.") + uuid := uuid.NewV4() + id := uuid.String() + name := ctx.Query("name") + version := ctx.Query("version") + if version == "" { + version = "0.0.1" + } + label := ctx.Query("label") + description := ctx.Query("description") + engine := ctx.QueryInt("engine") + taskType := ctx.QueryInt("type") + modelActualPath := "" + if taskType == models.TypeCloudBrainOne { + destKeyNamePrefix := Model_prefix + models.AttachmentRelativePath(id) + "/" + modelActualPath = setting.Attachment.Minio.Bucket + "/" + destKeyNamePrefix + } else if taskType == models.TypeCloudBrainTwo { + destKeyNamePrefix := Model_prefix + models.AttachmentRelativePath(id) + "/" + modelActualPath = setting.Bucket + "/" + destKeyNamePrefix + } else { + re["msg"] = "type is error." + ctx.JSON(200, re) + return + } + var lastNewModelId string + repoId := ctx.Repo.Repository.ID + aimodels := models.QueryModelByName(name, repoId) + if len(aimodels) > 0 { + for _, model := range aimodels { + if model.Version == version { + re["msg"] = ctx.Tr("repo.model.manage.create_error") + ctx.JSON(200, re) + return + } + if model.New == MODEL_LATEST { + lastNewModelId = model.ID + } + } + } + model := &models.AiModelManage{ + ID: id, + Version: version, + ModelType: MODEL_LOCAL_TYPE, + VersionCount: len(aimodels) + 1, + Label: label, + Name: name, + Description: description, + New: MODEL_LATEST, + Type: taskType, + Path: modelActualPath, + Size: 0, + AttachmentId: "", + RepoId: repoId, + UserId: ctx.User.ID, + Engine: int64(engine), + TrainTaskInfo: "", + Accuracy: "", + Status: STATUS_FINISHED, + } + + err := models.SaveModelToDb(model) + if err != nil { + re["msg"] = err.Error() + ctx.JSON(200, re) + return + } + if len(lastNewModelId) > 0 { + //udpate status and version count + models.ModifyModelNewProperty(lastNewModelId, MODEL_NOT_LATEST, 0) + } + var units []models.RepoUnit + var deleteUnitTypes []models.UnitType + units = append(units, models.RepoUnit{ + RepoID: ctx.Repo.Repository.ID, + Type: models.UnitTypeModelManage, + Config: &models.ModelManageConfig{ + EnableModelManage: true, + }, + }) + deleteUnitTypes = append(deleteUnitTypes, models.UnitTypeModelManage) + + models.UpdateRepositoryUnits(ctx.Repo.Repository, units, deleteUnitTypes) + + log.Info("save model end.") + notification.NotifyOtherTask(ctx.User, ctx.Repo.Repository, id, name, models.ActionCreateNewModelTask) + re["code"] = "0" + re["id"] = id + ctx.JSON(200, re) +} + +func getSize(files []storage.FileInfo) int64 { + var size int64 + for _, file := range files { + size += file.Size + } + return size +} + +func UpdateModelSize(modeluuid string) { + model, err := models.QueryModelById(modeluuid) + if err == nil { + if model.Type == models.TypeCloudBrainOne { + if strings.HasPrefix(model.Path, setting.Attachment.Minio.Bucket+"/"+Model_prefix) { + files, err := storage.GetAllObjectByBucketAndPrefixMinio(setting.Attachment.Minio.Bucket, model.Path[len(setting.Attachment.Minio.Bucket)+1:]) + if err != nil { + log.Info("Failed to query model size from minio. id=" + modeluuid) + } + size := getSize(files) + models.ModifyModelSize(modeluuid, size) + } + } else if model.Type == models.TypeCloudBrainTwo { + if strings.HasPrefix(model.Path, setting.Bucket+"/"+Model_prefix) { + files, err := storage.GetAllObjectByBucketAndPrefix(setting.Bucket, model.Path[len(setting.Bucket)+1:]) + if err != nil { + log.Info("Failed to query model size from obs. id=" + modeluuid) + } + size := getSize(files) + models.ModifyModelSize(modeluuid, size) + } + } + } else { + log.Info("not found model,uuid=" + modeluuid) + } +} + func SaveModel(ctx *context.Context) { if !ctx.Repo.CanWrite(models.UnitTypeModelManage) { ctx.Error(403, ctx.Tr("repo.model_noright")) @@ -292,6 +432,60 @@ func downloadModelFromCloudBrainOne(modelUUID string, jobName string, parentDir return "", 0, nil } } +func DeleteModelFile(ctx *context.Context) { + log.Info("delete model start.") + id := ctx.Query("id") + fileName := ctx.Query("fileName") + model, err := models.QueryModelById(id) + if err == nil { + if model.ModelType == MODEL_LOCAL_TYPE { + if model.Type == models.TypeCloudBrainOne { + bucketName := setting.Attachment.Minio.Bucket + objectName := model.Path[len(bucketName)+1:] + fileName + log.Info("delete bucket=" + bucketName + " path=" + objectName) + if strings.HasPrefix(model.Path, bucketName+"/"+Model_prefix) { + totalSize := storage.MinioGetFilesSize(bucketName, []string{objectName}) + err := storage.Attachments.DeleteDir(objectName) + if err != nil { + log.Info("Failed to delete model. id=" + id) + re := map[string]string{ + "code": "-1", + } + re["msg"] = err.Error() + ctx.JSON(200, re) + return + } else { + log.Info("delete minio file size is:" + fmt.Sprint(totalSize)) + models.ModifyModelSize(id, model.Size-totalSize) + } + } + } else if model.Type == models.TypeCloudBrainTwo { + bucketName := setting.Bucket + objectName := model.Path[len(setting.Bucket)+1:] + fileName + log.Info("delete bucket=" + setting.Bucket + " path=" + objectName) + if strings.HasPrefix(model.Path, bucketName+"/"+Model_prefix) { + totalSize := storage.ObsGetFilesSize(bucketName, []string{objectName}) + err := storage.ObsRemoveObject(bucketName, objectName) + if err != nil { + log.Info("Failed to delete model. id=" + id) + re := map[string]string{ + "code": "-1", + } + re["msg"] = err.Error() + ctx.JSON(200, re) + return + } else { + log.Info("delete obs file size is:" + fmt.Sprint(totalSize)) + models.ModifyModelSize(id, model.Size-totalSize) + } + } + } + } + } + ctx.JSON(200, map[string]string{ + "code": "0", + }) +} func DeleteModel(ctx *context.Context) { log.Info("delete model start.") @@ -317,14 +511,28 @@ func deleteModelByID(ctx *context.Context, id string) error { return errors.New(ctx.Tr("repo.model_noright")) } if err == nil { - log.Info("bucket=" + setting.Bucket + " path=" + model.Path) - if strings.HasPrefix(model.Path, setting.Bucket+"/"+Model_prefix) { - err := storage.ObsRemoveObject(setting.Bucket, model.Path[len(setting.Bucket)+1:]) - if err != nil { - log.Info("Failed to delete model. id=" + id) - return err + + if model.Type == models.TypeCloudBrainOne { + bucketName := setting.Attachment.Minio.Bucket + log.Info("bucket=" + bucketName + " path=" + model.Path) + if strings.HasPrefix(model.Path, bucketName+"/"+Model_prefix) { + err := storage.Attachments.DeleteDir(model.Path[len(bucketName)+1:]) + if err != nil { + log.Info("Failed to delete model. id=" + id) + return err + } + } + } else if model.Type == models.TypeCloudBrainTwo { + log.Info("bucket=" + setting.Bucket + " path=" + model.Path) + if strings.HasPrefix(model.Path, setting.Bucket+"/"+Model_prefix) { + err := storage.ObsRemoveObject(setting.Bucket, model.Path[len(setting.Bucket)+1:]) + if err != nil { + log.Info("Failed to delete model. id=" + id) + return err + } } } + err = models.DeleteModelById(id) if err == nil { //find a model to change new aimodels := models.QueryModelByName(model.Name, model.RepoId) @@ -884,29 +1092,58 @@ func ModifyModel(id string, description string) error { func ModifyModelInfo(ctx *context.Context) { log.Info("modify model start.") id := ctx.Query("id") - description := ctx.Query("description") - + re := map[string]string{ + "code": "-1", + } task, err := models.QueryModelById(id) if err != nil { + re["msg"] = err.Error() log.Error("no such model!", err.Error()) - ctx.ServerError("no such model:", err) + ctx.JSON(200, re) return } if !isOper(ctx, task.UserId) { - ctx.NotFound(ctx.Req.URL.RequestURI(), nil) - //ctx.ServerError("no right.", errors.New(ctx.Tr("repo.model_noright"))) + re["msg"] = "No right to operation." + ctx.JSON(200, re) return } + if task.ModelType == MODEL_LOCAL_TYPE { + name := ctx.Query("name") + label := ctx.Query("label") + description := ctx.Query("description") + engine := ctx.QueryInt("engine") + aimodels := models.QueryModelByName(name, task.RepoId) + if aimodels != nil && len(aimodels) > 0 { + if len(aimodels) == 1 { + if aimodels[0].ID != task.ID { + re["msg"] = ctx.Tr("repo.model.manage.create_error") + ctx.JSON(200, re) + return + } + } else { + re["msg"] = ctx.Tr("repo.model.manage.create_error") + ctx.JSON(200, re) + return + } + } + err = models.ModifyLocalModel(id, name, label, description, engine) - err = ModifyModel(id, description) + } else { + label := ctx.Query("label") + description := ctx.Query("description") + engine := task.Engine + name := task.Name + err = models.ModifyLocalModel(id, name, label, description, int(engine)) + } if err != nil { - log.Info("modify error," + err.Error()) - ctx.ServerError("error.", err) + re["msg"] = err.Error() + ctx.JSON(200, re) + return } else { - ctx.JSON(200, "success") + re["code"] = "0" + ctx.JSON(200, re) } - } func QueryModelListForPredict(ctx *context.Context) { @@ -1004,3 +1241,25 @@ func QueryOneLevelModelFile(ctx *context.Context) { ctx.JSON(http.StatusOK, fileinfos) } } + +func CreateLocalModel(ctx *context.Context) { + ctx.Data["isModelManage"] = true + ctx.Data["ModelManageAccess"] = ctx.Repo.CanWrite(models.UnitTypeModelManage) + + ctx.HTML(200, tplCreateLocalModelInfo) +} + +func CreateLocalModelForUpload(ctx *context.Context) { + ctx.Data["uuid"] = ctx.Query("uuid") + ctx.Data["isModelManage"] = true + ctx.Data["ModelManageAccess"] = ctx.Repo.CanWrite(models.UnitTypeModelManage) + ctx.Data["max_model_size"] = setting.MaxModelSize * MODEL_MAX_SIZE + ctx.HTML(200, tplCreateLocalForUploadModelInfo) +} + +func CreateOnlineModel(ctx *context.Context) { + ctx.Data["isModelManage"] = true + ctx.Data["ModelManageAccess"] = ctx.Repo.CanWrite(models.UnitTypeModelManage) + + ctx.HTML(200, tplCreateOnlineModelInfo) +} diff --git a/routers/repo/attachment.go b/routers/repo/attachment.go index dc2c417e4..240e78acc 100755 --- a/routers/repo/attachment.go +++ b/routers/repo/attachment.go @@ -11,6 +11,7 @@ import ( "fmt" "mime/multipart" "net/http" + "path" "strconv" "strings" @@ -311,7 +312,8 @@ func GetAttachment(ctx *context.Context) { url = setting.PROXYURL + "/obs_proxy_download?uuid=" + attach.UUID + "&file_name=" + attach.Name log.Info("return url=" + url) } else { - url, err = storage.ObsGetPreSignedUrl(attach.UUID, attach.Name) + objectName := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(attach.UUID[0:1], attach.UUID[1:2], attach.UUID, attach.Name)), "/") + url, err = storage.ObsGetPreSignedUrl(objectName, attach.Name) if err != nil { ctx.ServerError("ObsGetPreSignedUrl", err) return @@ -415,7 +417,7 @@ func AddAttachment(ctx *context.Context) { uuid := ctx.Query("uuid") has := false if typeCloudBrain == models.TypeCloudBrainOne { - has, err = storage.Attachments.HasObject(models.AttachmentRelativePath(uuid)) + has, err = storage.Attachments.HasObject(setting.Attachment.Minio.BasePath + models.AttachmentRelativePath(uuid)) if err != nil { ctx.ServerError("HasObject", err) return @@ -557,7 +559,7 @@ func GetSuccessChunks(ctx *context.Context) { isExist := false if typeCloudBrain == models.TypeCloudBrainOne { - isExist, err = storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID)) + isExist, err = storage.Attachments.HasObject(setting.Attachment.Minio.BasePath + models.AttachmentRelativePath(fileChunk.UUID)) if err != nil { ctx.ServerError("HasObject failed", err) return @@ -593,12 +595,12 @@ func GetSuccessChunks(ctx *context.Context) { } if typeCloudBrain == models.TypeCloudBrainOne { - chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID) + chunks, err = storage.GetPartInfos(strings.TrimPrefix(path.Join(setting.Attachment.Minio.BasePath, path.Join(fileChunk.UUID[0:1], fileChunk.UUID[1:2], fileChunk.UUID)), "/"), fileChunk.UploadID) if err != nil { log.Error("GetPartInfos failed:%v", err.Error()) } } else { - chunks, err = storage.GetObsPartInfos(fileChunk.UUID, fileChunk.UploadID, fileName) + chunks, err = storage.GetObsPartInfos(strings.TrimPrefix(path.Join(setting.BasePath, path.Join(fileChunk.UUID[0:1], fileChunk.UUID[1:2], fileChunk.UUID, fileName)), "/"), fileChunk.UploadID) if err != nil { log.Error("GetObsPartInfos failed:%v", err.Error()) } @@ -699,13 +701,13 @@ func NewMultipart(ctx *context.Context) { uuid := gouuid.NewV4().String() var uploadID string if typeCloudBrain == models.TypeCloudBrainOne { - uploadID, err = storage.NewMultiPartUpload(uuid) + uploadID, err = storage.NewMultiPartUpload(strings.TrimPrefix(path.Join(setting.Attachment.Minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/")) if err != nil { ctx.ServerError("NewMultipart", err) return } } else { - uploadID, err = storage.NewObsMultiPartUpload(uuid, fileName) + uploadID, err = storage.NewObsMultiPartUpload(strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/")) if err != nil { ctx.ServerError("NewObsMultiPartUpload", err) return @@ -749,8 +751,8 @@ func PutOBSProxyUpload(ctx *context.Context) { ctx.Error(500, fmt.Sprintf("FormFile: %v", RequestBody)) return } - - err := storage.ObsMultiPartUpload(uuid, uploadID, partNumber, fileName, RequestBody.ReadCloser()) + objectName := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") + err := storage.ObsMultiPartUpload(objectName, uploadID, partNumber, fileName, RequestBody.ReadCloser()) if err != nil { log.Info("upload error.") } @@ -759,8 +761,8 @@ func PutOBSProxyUpload(ctx *context.Context) { func GetOBSProxyDownload(ctx *context.Context) { uuid := ctx.Query("uuid") fileName := ctx.Query("file_name") - - body, err := storage.ObsDownload(uuid, fileName) + objectName := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") + body, err := storage.ObsDownloadAFile(setting.Bucket, objectName) if err != nil { log.Info("upload error.") } else { @@ -805,7 +807,7 @@ func GetMultipartUploadUrl(ctx *context.Context) { return } - url, err = storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size) + url, err = storage.GenMultiPartSignedUrl(strings.TrimPrefix(path.Join(setting.Attachment.Minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/"), uploadID, partNumber, size) if err != nil { ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err)) return @@ -815,7 +817,7 @@ func GetMultipartUploadUrl(ctx *context.Context) { url = setting.PROXYURL + "/obs_proxy_multipart?uuid=" + uuid + "&uploadId=" + uploadID + "&partNumber=" + fmt.Sprint(partNumber) + "&file_name=" + fileName log.Info("return url=" + url) } else { - url, err = storage.ObsGenMultiPartSignedUrl(uuid, uploadID, partNumber, fileName) + url, err = storage.ObsGenMultiPartSignedUrl(strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/"), uploadID, partNumber) if err != nil { ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err)) return @@ -823,7 +825,6 @@ func GetMultipartUploadUrl(ctx *context.Context) { log.Info("url=" + url) } } - ctx.JSON(200, map[string]string{ "url": url, }) @@ -855,13 +856,13 @@ func CompleteMultipart(ctx *context.Context) { } if typeCloudBrain == models.TypeCloudBrainOne { - _, err = storage.CompleteMultiPartUpload(uuid, uploadID, fileChunk.TotalChunks) + _, err = storage.CompleteMultiPartUpload(strings.TrimPrefix(path.Join(setting.Attachment.Minio.BasePath, path.Join(fileChunk.UUID[0:1], fileChunk.UUID[1:2], fileChunk.UUID)), "/"), uploadID, fileChunk.TotalChunks) if err != nil { ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err)) return } } else { - err = storage.CompleteObsMultiPartUpload(uuid, uploadID, fileName, fileChunk.TotalChunks) + err = storage.CompleteObsMultiPartUpload(strings.TrimPrefix(path.Join(setting.BasePath, path.Join(fileChunk.UUID[0:1], fileChunk.UUID[1:2], fileChunk.UUID, fileName)), "/"), uploadID, fileChunk.TotalChunks) if err != nil { ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err)) return @@ -1013,7 +1014,7 @@ func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) { } for _, attch := range attachs { - has, err := storage.Attachments.HasObject(models.AttachmentRelativePath(attch.UUID)) + has, err := storage.Attachments.HasObject(setting.Attachment.Minio.BasePath + models.AttachmentRelativePath(attch.UUID)) if err != nil || !has { continue } diff --git a/routers/repo/attachment_model.go b/routers/repo/attachment_model.go new file mode 100644 index 000000000..efc7cbe08 --- /dev/null +++ b/routers/repo/attachment_model.go @@ -0,0 +1,323 @@ +package repo + +import ( + "fmt" + "path" + "strconv" + "strings" + + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/context" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/minio_ext" + "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/modules/storage" + "code.gitea.io/gitea/modules/upload" + gouuid "github.com/satori/go.uuid" +) + +func GetModelChunks(ctx *context.Context) { + fileMD5 := ctx.Query("md5") + typeCloudBrain := ctx.QueryInt("type") + fileName := ctx.Query("file_name") + scene := ctx.Query("scene") + modeluuid := ctx.Query("modeluuid") + log.Info("scene=" + scene + " typeCloudBrain=" + fmt.Sprint(typeCloudBrain)) + var chunks string + + err := checkTypeCloudBrain(typeCloudBrain) + if err != nil { + ctx.ServerError("checkTypeCloudBrain failed", err) + return + } + + fileChunk, err := models.GetModelFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain, modeluuid) + if err != nil { + if models.IsErrFileChunkNotExist(err) { + ctx.JSON(200, map[string]string{ + "uuid": "", + "uploaded": "0", + "uploadID": "", + "chunks": "", + }) + } else { + ctx.ServerError("GetFileChunkByMD5", err) + } + return + } + + isExist := false + if typeCloudBrain == models.TypeCloudBrainOne { + isExist, err = storage.Attachments.HasObject(fileChunk.ObjectName) + if isExist { + log.Info("The file is exist in minio. has uploaded.path=" + fileChunk.ObjectName) + } else { + log.Info("The file is not exist in minio..") + } + if err != nil { + ctx.ServerError("HasObject failed", err) + return + } + } else { + isExist, err = storage.ObsHasObject(fileChunk.ObjectName) + if isExist { + log.Info("The file is exist in obs. has uploaded. path=" + fileChunk.ObjectName) + } else { + log.Info("The file is not exist in obs.") + } + if err != nil { + ctx.ServerError("ObsHasObject failed", err) + return + } + } + + if isExist { + if fileChunk.IsUploaded == models.FileNotUploaded { + log.Info("the file has been uploaded but not recorded") + fileChunk.IsUploaded = models.FileUploaded + if err = models.UpdateModelFileChunk(fileChunk); err != nil { + log.Error("UpdateFileChunk failed:", err.Error()) + } + } + modelname := "" + model, err := models.QueryModelById(modeluuid) + if err == nil && model != nil { + modelname = model.Name + } + ctx.JSON(200, map[string]string{ + "uuid": fileChunk.UUID, + "uploaded": strconv.Itoa(fileChunk.IsUploaded), + "uploadID": fileChunk.UploadID, + "chunks": string(chunks), + "attachID": "0", + "modeluuid": modeluuid, + "fileName": fileName, + "modelName": modelname, + }) + } else { + if fileChunk.IsUploaded == models.FileUploaded { + log.Info("the file has been recorded but not uploaded") + fileChunk.IsUploaded = models.FileNotUploaded + if err = models.UpdateModelFileChunk(fileChunk); err != nil { + log.Error("UpdateFileChunk failed:", err.Error()) + } + } + + if typeCloudBrain == models.TypeCloudBrainOne { + chunks, err = storage.GetPartInfos(fileChunk.ObjectName, fileChunk.UploadID) + if err != nil { + log.Error("GetPartInfos failed:%v", err.Error()) + } + } else { + chunks, err = storage.GetObsPartInfos(fileChunk.ObjectName, fileChunk.UploadID) + if err != nil { + log.Error("GetObsPartInfos failed:%v", err.Error()) + } + } + if err != nil { + models.DeleteModelFileChunk(fileChunk) + ctx.JSON(200, map[string]string{ + "uuid": "", + "uploaded": "0", + "uploadID": "", + "chunks": "", + }) + } else { + ctx.JSON(200, map[string]string{ + "uuid": fileChunk.UUID, + "uploaded": strconv.Itoa(fileChunk.IsUploaded), + "uploadID": fileChunk.UploadID, + "chunks": string(chunks), + "attachID": "0", + "datasetID": "0", + "fileName": "", + "datasetName": "", + }) + } + } +} + +func getObjectName(filename string, modeluuid string) string { + return strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, filename)), "/") +} + +func NewModelMultipart(ctx *context.Context) { + if !setting.Attachment.Enabled { + ctx.Error(404, "attachment is not enabled") + return + } + fileName := ctx.Query("file_name") + modeluuid := ctx.Query("modeluuid") + + err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ",")) + if err != nil { + ctx.Error(400, err.Error()) + return + } + + typeCloudBrain := ctx.QueryInt("type") + err = checkTypeCloudBrain(typeCloudBrain) + if err != nil { + ctx.ServerError("checkTypeCloudBrain failed", err) + return + } + + if setting.Attachment.StoreType == storage.MinioStorageType { + totalChunkCounts := ctx.QueryInt("totalChunkCounts") + if totalChunkCounts > minio_ext.MaxPartsCount { + ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts)) + return + } + + fileSize := ctx.QueryInt64("size") + if fileSize > minio_ext.MaxMultipartPutObjectSize { + ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize)) + return + } + + uuid := gouuid.NewV4().String() + var uploadID string + var objectName string + if typeCloudBrain == models.TypeCloudBrainOne { + objectName = strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, fileName)), "/") + uploadID, err = storage.NewMultiPartUpload(objectName) + if err != nil { + ctx.ServerError("NewMultipart", err) + return + } + } else { + + objectName = strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, fileName)), "/") + uploadID, err = storage.NewObsMultiPartUpload(objectName) + if err != nil { + ctx.ServerError("NewObsMultiPartUpload", err) + return + } + } + + _, err = models.InsertModelFileChunk(&models.ModelFileChunk{ + UUID: uuid, + UserID: ctx.User.ID, + UploadID: uploadID, + Md5: ctx.Query("md5"), + Size: fileSize, + ObjectName: objectName, + ModelUUID: modeluuid, + TotalChunks: totalChunkCounts, + Type: typeCloudBrain, + }) + + if err != nil { + ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err)) + return + } + + ctx.JSON(200, map[string]string{ + "uuid": uuid, + "uploadID": uploadID, + }) + } else { + ctx.Error(404, "storage type is not enabled") + return + } +} + +func GetModelMultipartUploadUrl(ctx *context.Context) { + uuid := ctx.Query("uuid") + uploadID := ctx.Query("uploadID") + partNumber := ctx.QueryInt("chunkNumber") + size := ctx.QueryInt64("size") + typeCloudBrain := ctx.QueryInt("type") + err := checkTypeCloudBrain(typeCloudBrain) + if err != nil { + ctx.ServerError("checkTypeCloudBrain failed", err) + return + } + fileChunk, err := models.GetModelFileChunkByUUID(uuid) + if err != nil { + if models.IsErrFileChunkNotExist(err) { + ctx.Error(404) + } else { + ctx.ServerError("GetFileChunkByUUID", err) + } + return + } + url := "" + if typeCloudBrain == models.TypeCloudBrainOne { + if size > minio_ext.MinPartSize { + ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size)) + return + } + url, err = storage.GenMultiPartSignedUrl(fileChunk.ObjectName, uploadID, partNumber, size) + if err != nil { + ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err)) + return + } + } else { + url, err = storage.ObsGenMultiPartSignedUrl(fileChunk.ObjectName, uploadID, partNumber) + if err != nil { + ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err)) + return + } + log.Info("url=" + url) + + } + + ctx.JSON(200, map[string]string{ + "url": url, + }) +} + +func CompleteModelMultipart(ctx *context.Context) { + uuid := ctx.Query("uuid") + uploadID := ctx.Query("uploadID") + typeCloudBrain := ctx.QueryInt("type") + modeluuid := ctx.Query("modeluuid") + log.Warn("uuid:" + uuid) + log.Warn("modeluuid:" + modeluuid) + log.Warn("typeCloudBrain:" + strconv.Itoa(typeCloudBrain)) + + err := checkTypeCloudBrain(typeCloudBrain) + if err != nil { + ctx.ServerError("checkTypeCloudBrain failed", err) + return + } + fileChunk, err := models.GetModelFileChunkByUUID(uuid) + if err != nil { + if models.IsErrFileChunkNotExist(err) { + ctx.Error(404) + } else { + ctx.ServerError("GetFileChunkByUUID", err) + } + return + } + + if typeCloudBrain == models.TypeCloudBrainOne { + _, err = storage.CompleteMultiPartUpload(fileChunk.ObjectName, uploadID, fileChunk.TotalChunks) + if err != nil { + ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err)) + return + } + } else { + err = storage.CompleteObsMultiPartUpload(fileChunk.ObjectName, uploadID, fileChunk.TotalChunks) + if err != nil { + ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err)) + return + } + } + + fileChunk.IsUploaded = models.FileUploaded + + err = models.UpdateModelFileChunk(fileChunk) + if err != nil { + ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err)) + return + } + //更新模型大小信息 + UpdateModelSize(modeluuid) + + ctx.JSON(200, map[string]string{ + "result_code": "0", + }) + +} diff --git a/routers/repo/cloudbrain_statistic.go b/routers/repo/cloudbrain_statistic.go index 3814c2daf..4476b3e45 100644 --- a/routers/repo/cloudbrain_statistic.go +++ b/routers/repo/cloudbrain_statistic.go @@ -1,38 +1,87 @@ package repo import ( + "net/http" "strings" "time" "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/timeutil" + cloudbrainService "code.gitea.io/gitea/services/cloudbrain" ) func CloudbrainDurationStatisticHour() { - - dateTime := time.Now().Format("2006-01-02 15:04:05") - dayTime := time.Now().Format("2006-01-02") + var statisticTime time.Time + var count int64 + recordDurationUpdateTime, err := models.GetDurationRecordUpdateTime() + if err != nil { + log.Error("Can not get GetDurationRecordBeginTime", err) + } now := time.Now() - currentTime := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location()) + if err == nil && len(recordDurationUpdateTime) > 0 { + statisticTime = time.Unix(int64(recordDurationUpdateTime[0].DateTime), 0).Add(+1 * time.Hour) + } else { + statisticTime = currentTime + } + deleteBeginTime := time.Unix(int64(recordDurationUpdateTime[0].DateTime), 0) + + err = models.DeleteCloudbrainDurationStatistic(timeutil.TimeStamp(deleteBeginTime.Unix()), timeutil.TimeStamp(currentTime.Unix())) + if err != nil { + log.Error("DeleteCloudbrainDurationStatistic failed", err) + } + + for statisticTime.Before(currentTime) || statisticTime.Equal(currentTime) { + countEach := summaryDurationStat(statisticTime) + count += countEach + statisticTime = statisticTime.Add(+1 * time.Hour) + } + log.Info("summaryDurationStat count: %v", count) +} +func UpdateDurationStatisticHistoryData(beginTime time.Time, endTime time.Time) int64 { + var count int64 + statisticTime := beginTime + currentTime := endTime + for statisticTime.Before(currentTime) || statisticTime.Equal(currentTime) { + countEach := summaryDurationStat(statisticTime) + count += countEach + statisticTime = statisticTime.Add(+1 * time.Hour) + } + return count +} - m, _ := time.ParseDuration("-1h") - beginTime := currentTime.Add(m).Unix() - endTime := currentTime.Unix() - hourTime := currentTime.Add(m).Hour() +//statisticTime是当前的时辰,比如当前是2019-01-01 12:01:01,那么statisticTime就是2019-01-01 12:00:00 +func summaryDurationStat(statisticTime time.Time) int64 { + var count int64 + dateTime := timeutil.TimeStamp(statisticTime.Add(-1 * time.Hour).Unix()) + beginTime := statisticTime.Add(-1 * time.Hour).Unix() + dayTime := statisticTime.Add(-1 * time.Hour).Format("2006-01-02") + hourTime := statisticTime.Add(-1 * time.Hour).Hour() + endTime := statisticTime.Unix() ciTasks, err := models.GetCloudbrainByTime(beginTime, endTime) if err != nil { log.Info("GetCloudbrainByTime err: %v", err) - return + return 0 } - specMap := make(map[string]*models.Specification) + cloudbrainMap := make(map[string]*models.Cloudbrain) models.LoadSpecs4CloudbrainInfo(ciTasks) + for _, cloudbrain := range ciTasks { - if _, ok := specMap[cloudbrain.Cloudbrain.Spec.AiCenterCode+"/"+cloudbrain.Cloudbrain.Spec.AccCardType]; !ok { - if cloudbrain.Cloudbrain.Spec != nil { - specMap[cloudbrain.Cloudbrain.Spec.AiCenterCode+"/"+cloudbrain.Cloudbrain.Spec.AccCardType] = cloudbrain.Cloudbrain.Spec + if cloudbrain.Cloudbrain.StartTime == 0 { + cloudbrain.Cloudbrain.StartTime = cloudbrain.Cloudbrain.CreatedUnix + } + if cloudbrain.Cloudbrain.EndTime == 0 { + cloudbrain.Cloudbrain.EndTime = cloudbrain.Cloudbrain.UpdatedUnix + } + cloudbrain = cloudbrainService.UpdateCloudbrainAiCenter(cloudbrain) + if cloudbrain.Cloudbrain.Spec != nil { + if _, ok := cloudbrainMap[cloudbrain.Cloudbrain.AiCenter+"/"+cloudbrain.Cloudbrain.Spec.AccCardType]; !ok { + if cloudbrain.Cloudbrain.Spec != nil { + cloudbrainMap[cloudbrain.Cloudbrain.AiCenter+"/"+cloudbrain.Cloudbrain.Spec.AccCardType] = &cloudbrain.Cloudbrain + } } } } @@ -42,69 +91,83 @@ func CloudbrainDurationStatisticHour() { resourceQueues, err := models.GetCanUseCardInfo() if err != nil { log.Info("GetCanUseCardInfo err: %v", err) - return + return 0 } + cardsTotalDurationMap := make(map[string]int) for _, resourceQueue := range resourceQueues { - cardsTotalDurationMap[resourceQueue.Cluster+"/"+resourceQueue.AiCenterName+"/"+resourceQueue.AiCenterCode+"/"+resourceQueue.AccCardType+"/"+resourceQueue.ComputeResource] = resourceQueue.CardsTotalNum * 1 * 60 * 60 + if _, ok := cardsTotalDurationMap[resourceQueue.Cluster+"/"+resourceQueue.AiCenterCode+"/"+resourceQueue.AccCardType]; !ok { + cardsTotalDurationMap[resourceQueue.Cluster+"/"+resourceQueue.AiCenterCode+"/"+resourceQueue.AccCardType] = resourceQueue.CardsTotalNum * 1 * 60 * 60 + } else { + cardsTotalDurationMap[resourceQueue.Cluster+"/"+resourceQueue.AiCenterCode+"/"+resourceQueue.AccCardType] += resourceQueue.CardsTotalNum * 1 * 60 * 60 + } } - for centerCode, CardTypeInfo := range cloudBrainCenterCodeAndCardTypeInfo { - for cardType, cardDuration := range CardTypeInfo { - spec := specMap[centerCode+"/"+cardType] - if spec != nil { - if err := models.DeleteCloudbrainDurationStatisticHour(dayTime, hourTime, centerCode, cardType); err != nil { - log.Error("DeleteCloudbrainDurationStatisticHour failed: %v", err.Error()) - return - } - if _, ok := cardsTotalDurationMap[spec.Cluster+"/"+spec.AiCenterName+"/"+centerCode+"/"+cardType+"/"+spec.ComputeResource]; !ok { - cardsTotalDurationMap[spec.Cluster+"/"+spec.AiCenterName+"/"+centerCode+"/"+cardType+"/"+spec.ComputeResource] = 0 + for centerCode, CardTypes := range cloudBrainCenterCodeAndCardTypeInfo { + for cardType, cardDuration := range CardTypes { + cloudbrainTable := cloudbrainMap[centerCode+"/"+cardType] + if cloudbrainTable != nil { + if _, ok := cardsTotalDurationMap[cloudbrainTable.Cluster+"/"+centerCode+"/"+cardType]; !ok { + cardsTotalDurationMap[cloudbrainTable.Cluster+"/"+centerCode+"/"+cardType] = 0 } cloudbrainDurationStat := models.CloudbrainDurationStatistic{ DateTime: dateTime, DayTime: dayTime, HourTime: hourTime, - Cluster: spec.Cluster, - AiCenterName: spec.AiCenterName, + Cluster: cloudbrainTable.Cluster, + AiCenterName: GetAiCenterNameByCode(centerCode, "zh-CN"), AiCenterCode: centerCode, AccCardType: cardType, - ComputeResource: spec.ComputeResource, CardsUseDuration: cardDuration, - CardsTotalDuration: cardsTotalDurationMap[spec.Cluster+"/"+spec.AiCenterName+"/"+centerCode+"/"+cardType+"/"+spec.ComputeResource], + CardsTotalDuration: cardsTotalDurationMap[cloudbrainTable.Cluster+"/"+centerCode+"/"+cardType], CreatedUnix: timeutil.TimeStampNow(), } if _, err = models.InsertCloudbrainDurationStatistic(&cloudbrainDurationStat); err != nil { log.Error("Insert cloudbrainDurationStat failed: %v", err.Error()) } - delete(cardsTotalDurationMap, spec.Cluster+"/"+spec.AiCenterName+"/"+centerCode+"/"+cardType+"/"+spec.ComputeResource) + count++ + delete(cardsTotalDurationMap, cloudbrainTable.Cluster+"/"+centerCode+"/"+cardType) } } } for key, cardsTotalDuration := range cardsTotalDurationMap { - if err := models.DeleteCloudbrainDurationStatisticHour(dayTime, hourTime, strings.Split(key, "/")[2], strings.Split(key, "/")[3]); err != nil { - log.Error("DeleteCloudbrainDurationStatisticHour failed: %v", err.Error()) - return - } cloudbrainDurationStat := models.CloudbrainDurationStatistic{ DateTime: dateTime, DayTime: dayTime, HourTime: hourTime, Cluster: strings.Split(key, "/")[0], - AiCenterName: strings.Split(key, "/")[1], - AiCenterCode: strings.Split(key, "/")[2], - AccCardType: strings.Split(key, "/")[3], - ComputeResource: strings.Split(key, "/")[4], + AiCenterName: GetAiCenterNameByCode(strings.Split(key, "/")[1], "zh-CN"), + AiCenterCode: strings.Split(key, "/")[1], + AccCardType: strings.Split(key, "/")[2], CardsUseDuration: 0, CardsTotalDuration: cardsTotalDuration, + CardsTotalNum: cardsTotalDuration / 1 / 60 / 60, CreatedUnix: timeutil.TimeStampNow(), } if _, err = models.InsertCloudbrainDurationStatistic(&cloudbrainDurationStat); err != nil { log.Error("Insert cloudbrainDurationStat failed: %v", err.Error()) } + count++ } log.Info("finish summary cloudbrainDurationStat") + return count +} + +func GetAiCenterNameByCode(centerCode string, language string) string { + var aiCenterName string + aiCenterInfo := cloudbrainService.GetAiCenterInfoByCenterCode(centerCode) + if aiCenterInfo != nil { + if language == "zh-CN" { + aiCenterName = aiCenterInfo.Content + } else { + aiCenterName = aiCenterInfo.ContentEN + } + } else { + aiCenterName = centerCode + } + return aiCenterName } func getcloudBrainCenterCodeAndCardTypeInfo(ciTasks []*models.CloudbrainInfo, beginTime int64, endTime int64) map[string]map[string]int { @@ -112,7 +175,7 @@ func getcloudBrainCenterCodeAndCardTypeInfo(ciTasks []*models.CloudbrainInfo, be var AccCardsNum int cloudBrainCenterCodeAndCardType := make(map[string]map[string]int) for _, cloudbrain := range ciTasks { - + cloudbrain = cloudbrainService.UpdateCloudbrainAiCenter(cloudbrain) if cloudbrain.Cloudbrain.StartTime == 0 { cloudbrain.Cloudbrain.StartTime = cloudbrain.Cloudbrain.CreatedUnix } @@ -129,41 +192,70 @@ func getcloudBrainCenterCodeAndCardTypeInfo(ciTasks []*models.CloudbrainInfo, be } else { AccCardsNum = cloudbrain.Cloudbrain.Spec.AccCardsNum } - if _, ok := cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode]; !ok { - cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode] = make(map[string]int) + if _, ok := cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.AiCenter]; !ok { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.AiCenter] = make(map[string]int) } - - if cloudbrain.Cloudbrain.Status == string(models.ModelArtsRunning) { - if _, ok := cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode][cloudbrain.Cloudbrain.Spec.AccCardType]; !ok { - if int64(cloudbrain.Cloudbrain.StartTime) < beginTime { - cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode][cloudbrain.Cloudbrain.Spec.AccCardType] = AccCardsNum * WorkServerNumber * (int(endTime) - int(beginTime)) + if cloudbrain.Cloudbrain.Spec != nil { + if cloudbrain.Cloudbrain.Status == string(models.ModelArtsRunning) { + if _, ok := cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.AiCenter][cloudbrain.Cloudbrain.Spec.AccCardType]; !ok { + if int64(cloudbrain.Cloudbrain.StartTime) < beginTime { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.AiCenter][cloudbrain.Cloudbrain.Spec.AccCardType] = AccCardsNum * WorkServerNumber * (int(endTime) - int(beginTime)) + } else if beginTime <= int64(cloudbrain.Cloudbrain.StartTime) && int64(cloudbrain.Cloudbrain.StartTime) < endTime { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.AiCenter][cloudbrain.Cloudbrain.Spec.AccCardType] = AccCardsNum * WorkServerNumber * (int(endTime) - int(cloudbrain.Cloudbrain.StartTime)) + } else if int64(cloudbrain.Cloudbrain.StartTime) >= endTime { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.AiCenter][cloudbrain.Cloudbrain.Spec.AccCardType] = 0 + } } else { - cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode][cloudbrain.Cloudbrain.Spec.AccCardType] = AccCardsNum * WorkServerNumber * (int(endTime) - int(cloudbrain.Cloudbrain.StartTime)) + if int64(cloudbrain.Cloudbrain.StartTime) < beginTime { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.AiCenter][cloudbrain.Cloudbrain.Spec.AccCardType] += AccCardsNum * WorkServerNumber * (int(endTime) - int(beginTime)) + } else if beginTime <= int64(cloudbrain.Cloudbrain.StartTime) && int64(cloudbrain.Cloudbrain.StartTime) < endTime { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.AiCenter][cloudbrain.Cloudbrain.Spec.AccCardType] += AccCardsNum * WorkServerNumber * (int(endTime) - int(cloudbrain.Cloudbrain.StartTime)) + } else if int64(cloudbrain.Cloudbrain.StartTime) >= endTime { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.AiCenter][cloudbrain.Cloudbrain.Spec.AccCardType] += 0 + } } } else { - if int64(cloudbrain.Cloudbrain.StartTime) < beginTime { - cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode][cloudbrain.Cloudbrain.Spec.AccCardType] += AccCardsNum * WorkServerNumber * (int(endTime) - int(beginTime)) + if _, ok := cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.AiCenter][cloudbrain.Cloudbrain.Spec.AccCardType]; !ok { + if int64(cloudbrain.Cloudbrain.StartTime) <= beginTime && int64(cloudbrain.Cloudbrain.EndTime) <= endTime { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.AiCenter][cloudbrain.Cloudbrain.Spec.AccCardType] = AccCardsNum * WorkServerNumber * (int(cloudbrain.Cloudbrain.EndTime) - int(beginTime)) + } else if int64(cloudbrain.Cloudbrain.StartTime) <= beginTime && int64(cloudbrain.Cloudbrain.EndTime) > endTime { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.AiCenter][cloudbrain.Cloudbrain.Spec.AccCardType] = AccCardsNum * WorkServerNumber * (int(endTime) - int(beginTime)) + } else if beginTime <= int64(cloudbrain.Cloudbrain.StartTime) && int64(cloudbrain.Cloudbrain.StartTime) <= endTime && int64(cloudbrain.Cloudbrain.EndTime) <= endTime { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.AiCenter][cloudbrain.Cloudbrain.Spec.AccCardType] = AccCardsNum * WorkServerNumber * (int(cloudbrain.Cloudbrain.EndTime) - int(cloudbrain.Cloudbrain.StartTime)) + } else if beginTime <= int64(cloudbrain.Cloudbrain.StartTime) && int64(cloudbrain.Cloudbrain.StartTime) <= endTime && int64(cloudbrain.Cloudbrain.EndTime) > endTime { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.AiCenter][cloudbrain.Cloudbrain.Spec.AccCardType] = AccCardsNum * WorkServerNumber * (int(endTime) - int(cloudbrain.Cloudbrain.StartTime)) + } } else { - cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode][cloudbrain.Cloudbrain.Spec.AccCardType] += AccCardsNum * WorkServerNumber * (int(endTime) - int(cloudbrain.Cloudbrain.StartTime)) + if int64(cloudbrain.Cloudbrain.StartTime) <= beginTime && int64(cloudbrain.Cloudbrain.EndTime) <= endTime { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.AiCenter][cloudbrain.Cloudbrain.Spec.AccCardType] += AccCardsNum * WorkServerNumber * (int(cloudbrain.Cloudbrain.EndTime) - int(beginTime)) + } else if int64(cloudbrain.Cloudbrain.StartTime) <= beginTime && int64(cloudbrain.Cloudbrain.EndTime) > endTime { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.AiCenter][cloudbrain.Cloudbrain.Spec.AccCardType] += AccCardsNum * WorkServerNumber * (int(endTime) - int(beginTime)) + } else if beginTime <= int64(cloudbrain.Cloudbrain.StartTime) && int64(cloudbrain.Cloudbrain.StartTime) <= endTime && int64(cloudbrain.Cloudbrain.EndTime) <= endTime { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.AiCenter][cloudbrain.Cloudbrain.Spec.AccCardType] += AccCardsNum * WorkServerNumber * (int(cloudbrain.Cloudbrain.EndTime) - int(cloudbrain.Cloudbrain.StartTime)) + } else if beginTime <= int64(cloudbrain.Cloudbrain.StartTime) && int64(cloudbrain.Cloudbrain.StartTime) <= endTime && int64(cloudbrain.Cloudbrain.EndTime) > endTime { + cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.AiCenter][cloudbrain.Cloudbrain.Spec.AccCardType] += AccCardsNum * WorkServerNumber * (int(endTime) - int(cloudbrain.Cloudbrain.StartTime)) + } } } - } else { - if _, ok := cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode][cloudbrain.Cloudbrain.Spec.AccCardType]; !ok { - if int64(cloudbrain.Cloudbrain.StartTime) < beginTime { - cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode][cloudbrain.Cloudbrain.Spec.AccCardType] = AccCardsNum * WorkServerNumber * (int(cloudbrain.Cloudbrain.EndTime) - int(beginTime)) - } else { - cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode][cloudbrain.Cloudbrain.Spec.AccCardType] = AccCardsNum * WorkServerNumber * (int(cloudbrain.Cloudbrain.EndTime) - int(cloudbrain.Cloudbrain.StartTime)) - } - } else { - if int64(cloudbrain.Cloudbrain.StartTime) < beginTime { - cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode][cloudbrain.Cloudbrain.Spec.AccCardType] += AccCardsNum * WorkServerNumber * (int(cloudbrain.Cloudbrain.EndTime) - int(beginTime)) - } else { - cloudBrainCenterCodeAndCardType[cloudbrain.Cloudbrain.Spec.AiCenterCode][cloudbrain.Cloudbrain.Spec.AccCardType] += AccCardsNum * WorkServerNumber * (int(cloudbrain.Cloudbrain.EndTime) - int(cloudbrain.Cloudbrain.StartTime)) - } - } - } } return cloudBrainCenterCodeAndCardType } + +func CloudbrainUpdateHistoryData(ctx *context.Context) { + beginTimeStr := ctx.QueryTrim("beginTime") + endTimeStr := ctx.QueryTrim("endTime") + beginTime, _ := time.ParseInLocation("2006-01-02 15:04:05", beginTimeStr, time.Local) + endTime, _ := time.ParseInLocation("2006-01-02 15:04:05", endTimeStr, time.Local) + beginTimeUnix := timeutil.TimeStamp(beginTime.Unix()) + endTimeUnix := timeutil.TimeStamp(endTime.Unix()) + + err := models.DeleteCloudbrainDurationStatistic(beginTimeUnix, endTimeUnix) + count := UpdateDurationStatisticHistoryData(beginTime, endTime) + ctx.JSON(http.StatusOK, map[string]interface{}{ + "message": 0, + "count": count, + "err": err, + }) +} diff --git a/routers/repo/grampus.go b/routers/repo/grampus.go index c94bc2a5b..8f3182758 100755 --- a/routers/repo/grampus.go +++ b/routers/repo/grampus.go @@ -1,7 +1,6 @@ package repo import ( - "code.gitea.io/gitea/modules/urfs_client/urchin" "encoding/json" "errors" "fmt" @@ -12,6 +11,9 @@ import ( "strconv" "strings" + "code.gitea.io/gitea/modules/urfs_client/urchin" + "code.gitea.io/gitea/routers/response" + "code.gitea.io/gitea/services/cloudbrain/cloudbrainTask" "code.gitea.io/gitea/modules/dataset" @@ -861,10 +863,10 @@ func GrampusTrainJobShow(ctx *context.Context) { } } } - err = models.UpdateJob(task) - if err != nil { - log.Error("UpdateJob failed:" + err.Error()) - } + } + err = models.UpdateJob(task) + if err != nil { + log.Error("UpdateJob failed:" + err.Error()) } } } @@ -936,15 +938,14 @@ func GrampusGetLog(ctx *context.Context) { content, err := grampus.GetTrainJobLog(job.JobID) if err != nil { log.Error("GetTrainJobLog failed: %v", err, ctx.Data["MsgID"]) - ctx.ServerError(err.Error(), err) + ctx.JSON(http.StatusOK, map[string]interface{}{ + "JobName": job.JobName, + "Content": "", + "CanLogDownload": false, + }) return } - var canLogDownload bool - if err != nil { - canLogDownload = false - } else { - canLogDownload = true - } + canLogDownload := err == nil && job.IsUserHasRight(ctx.User) ctx.JSON(http.StatusOK, map[string]interface{}{ "JobName": job.JobName, "Content": content, @@ -954,6 +955,28 @@ func GrampusGetLog(ctx *context.Context) { return } +func GrampusMetrics(ctx *context.Context) { + jobID := ctx.Params(":jobid") + job, err := models.GetCloudbrainByJobID(jobID) + if err != nil { + log.Error("GetCloudbrainByJobID failed: %v", err, ctx.Data["MsgID"]) + ctx.ServerError(err.Error(), err) + return + } + + result, err := grampus.GetGrampusMetrics(job.JobID) + if err != nil { + log.Error("GetTrainJobLog failed: %v", err, ctx.Data["MsgID"]) + } + ctx.JSON(http.StatusOK, map[string]interface{}{ + "JobID": jobID, + "Interval": result.Interval, + "MetricsInfo": result.MetricsInfo, + }) + + return +} + func generateCommand(repoName, processorType, codeRemotePath, dataRemotePath, bootFile, paramSrc, outputRemotePath, datasetName, pretrainModelPath, pretrainModelFileName, modelRemoteObsUrl string) (string, error) { var command string @@ -1100,3 +1123,38 @@ func downloadZipCode(ctx *context.Context, codePath, branchName string) error { return nil } +func HandleTaskWithAiCenter(ctx *context.Context) { + log.Info("HandleTaskWithAiCenter start") + updateCounts := 0 + cloudBrains, err := models.GetC2NetWithAiCenterWrongJob() + if err != nil { + log.Error("GetC2NetWithAiCenterWrongJob failed:" + err.Error()) + return + } + if len(cloudBrains) == 0 { + log.Info("HandleC2NetWithAiCenterWrongJob:no task need handle") + return + } + cloudBrainCounts := len(cloudBrains) + for _, task := range cloudBrains { + result, err := grampus.GetJob(task.JobID) + if err != nil { + log.Error("GetJob failed:" + err.Error()) + continue + } + if len(result.JobInfo.Tasks) != 0 { + if len(result.JobInfo.Tasks[0].CenterID) == 1 && len(result.JobInfo.Tasks[0].CenterName) == 1 { + task.AiCenter = result.JobInfo.Tasks[0].CenterID[0] + "+" + result.JobInfo.Tasks[0].CenterName[0] + } + err = models.UpdateJob(task) + if err != nil { + log.Error("UpdateJob failed:" + err.Error()) + } + updateCounts++ + } + } + r := make(map[string]interface{}, 0) + r["cloudBrainCounts"] = cloudBrainCounts + r["updateCounts"] = updateCounts + ctx.JSON(http.StatusOK, response.SuccessWithData(r)) +} diff --git a/routers/routes/routes.go b/routers/routes/routes.go index 322f746f4..2b361b507 100755 --- a/routers/routes/routes.go +++ b/routers/routes/routes.go @@ -645,6 +645,7 @@ func RegisterRoutes(m *macaron.Macaron) { m.Group("/specification", func() { m.Get("", admin.GetSpecificationPage) m.Get("/list", admin.GetResourceSpecificationList) + m.Get("/list/all", admin.GetAllResourceSpecificationList) m.Get("/scenes/:id", admin.GetResourceSpecificationScenes) m.Post("/grampus/sync", admin.SyncGrampusSpecs) m.Post("/add", binding.Bind(models.ResourceSpecificationReq{}), admin.AddResourceSpecification) @@ -728,6 +729,13 @@ func RegisterRoutes(m *macaron.Macaron) { m.Post("/complete_multipart", repo.CompleteMultipart) }) + m.Group("/attachments/model", func() { + m.Get("/get_chunks", repo.GetModelChunks) + m.Get("/new_multipart", repo.NewModelMultipart) + m.Get("/get_multipart_url", repo.GetModelMultipartUploadUrl) + m.Post("/complete_multipart", repo.CompleteModelMultipart) + }) + m.Group("/attachments", func() { m.Get("/public/query", repo.QueryAllPublicDataset) m.Get("/private/:username", repo.QueryPrivateDataset) @@ -1228,6 +1236,12 @@ func RegisterRoutes(m *macaron.Macaron) { }) }, context.RepoRef()) m.Group("/modelmanage", func() { + m.Get("/create_local_model_1", repo.CreateLocalModel) + m.Get("/create_local_model_2", repo.CreateLocalModelForUpload) + m.Get("/create_online_model", repo.CreateOnlineModel) + m.Post("/create_local_model", repo.SaveLocalModel) + m.Delete("/delete_model_file", repo.DeleteModelFile) + m.Post("/create_model", repo.SaveModel) m.Post("/create_model_convert", reqWechatBind, reqRepoModelManageWriter, repo.SaveModelConvert) m.Post("/create_new_model", repo.SaveNewNameModel) @@ -1487,6 +1501,12 @@ func RegisterRoutes(m *macaron.Macaron) { m.Get("/record/list", point.GetPointRecordList) }, reqSignIn) + m.Group("/resources", func() { + m.Group("/queue", func() { + m.Get("/centers", admin.GetResourceAiCenters) + }) + }) + if setting.API.EnableSwagger { m.Get("/swagger.v1.json", templates.JSONRenderer(), routers.SwaggerV1Json) } diff --git a/routers/user/home.go b/routers/user/home.go index b6ab28f95..62b0357ad 100755 --- a/routers/user/home.go +++ b/routers/user/home.go @@ -23,6 +23,8 @@ import ( "code.gitea.io/gitea/modules/modelarts" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/util" + "code.gitea.io/gitea/routers/repo" + cloudbrainService "code.gitea.io/gitea/services/cloudbrain" issue_service "code.gitea.io/gitea/services/issue" pull_service "code.gitea.io/gitea/services/pull" @@ -837,6 +839,8 @@ func Cloudbrains(ctx *context.Context) { } models.LoadSpecs4CloudbrainInfo(ciTasks) for i, _ := range ciTasks { + ciTasks[i] = cloudbrainService.UpdateCloudbrainAiCenter(ciTasks[i]) + ciTasks[i].Cloudbrain.AiCenter = repo.GetAiCenterNameByCode(ciTasks[i].Cloudbrain.AiCenter, ctx.Language()) ciTasks[i].CanDebug = true ciTasks[i].CanDel = true ciTasks[i].Cloudbrain.ComputeResource = ciTasks[i].ComputeResource diff --git a/services/cloudbrain/cloudbrainTask/sync_status.go b/services/cloudbrain/cloudbrainTask/sync_status.go index 7153a7ec0..67dc4d3b7 100644 --- a/services/cloudbrain/cloudbrainTask/sync_status.go +++ b/services/cloudbrain/cloudbrainTask/sync_status.go @@ -14,7 +14,7 @@ import ( var noteBookOKMap = make(map[int64]int, 20) //if a task notebook url can get two times, the notebook can browser. -const successfulCount = 2 +const successfulCount = 3 func SyncCloudBrainOneStatus(task *models.Cloudbrain) (*models.Cloudbrain, error) { jobResult, err := cloudbrain.GetJob(task.JobID) diff --git a/services/cloudbrain/resource/resource_queue.go b/services/cloudbrain/resource/resource_queue.go index 2798a2b11..4e2dac8de 100644 --- a/services/cloudbrain/resource/resource_queue.go +++ b/services/cloudbrain/resource/resource_queue.go @@ -16,7 +16,7 @@ func AddResourceQueue(req models.ResourceQueueReq) error { } func UpdateResourceQueue(queueId int64, req models.ResourceQueueReq) error { - if _, err := models.UpdateResourceQueueById(queueId, models.ResourceQueue{ + if _, err := models.UpdateResourceCardsTotalNum(queueId, models.ResourceQueue{ CardsTotalNum: req.CardsTotalNum, Remark: req.Remark, }); err != nil { diff --git a/services/cloudbrain/resource/resource_specification.go b/services/cloudbrain/resource/resource_specification.go index 93abb2923..8f4182d87 100644 --- a/services/cloudbrain/resource/resource_specification.go +++ b/services/cloudbrain/resource/resource_specification.go @@ -130,10 +130,49 @@ func GetResourceSpecificationList(opts models.SearchResourceSpecificationOptions if err != nil { return nil, err } - return models.NewResourceSpecAndQueueListRes(n, r), nil } +//GetAllDistinctResourceSpecification returns specification and queue after distinct +//totalSize is always 0 here +func GetAllDistinctResourceSpecification(opts models.SearchResourceSpecificationOptions) (*models.ResourceSpecAndQueueListRes, error) { + opts.Page = 0 + opts.PageSize = 1000 + opts.OrderBy = models.SearchSpecOrder4Standard + _, r, err := models.SearchResourceSpecification(opts) + if err != nil { + return nil, err + } + nr := distinctResourceSpecAndQueue(r) + return models.NewResourceSpecAndQueueListRes(0, nr), nil +} + +func distinctResourceSpecAndQueue(r []models.ResourceSpecAndQueue) []models.ResourceSpecAndQueue { + specs := make([]models.ResourceSpecAndQueue, 0, len(r)) + sourceSpecIdMap := make(map[string]models.ResourceSpecAndQueue, 0) + for i := 0; i < len(r); i++ { + spec := r[i] + if spec.SourceSpecId == "" { + specs = append(specs, spec) + continue + } + if _, has := sourceSpecIdMap[spec.SourceSpecId]; has { + //prefer to use on-shelf spec + if sourceSpecIdMap[spec.SourceSpecId].Status != spec.Status && spec.Status == models.SpecOnShelf { + for k, v := range specs { + if v.ResourceSpecification.ID == sourceSpecIdMap[spec.SourceSpecId].ResourceSpecification.ID { + specs[k] = spec + } + } + } + continue + } + specs = append(specs, spec) + sourceSpecIdMap[spec.SourceSpecId] = spec + } + return specs +} + func GetResourceSpecificationScenes(specId int64) ([]models.ResourceSceneBriefRes, error) { r, err := models.GetSpecScenes(specId) if err != nil { @@ -200,6 +239,7 @@ func AddSpecOperateLog(doerId int64, operateType string, newValue, oldValue *mod } func FindAvailableSpecs(userId int64, opts models.FindSpecsOptions) ([]*models.Specification, error) { + opts.SpecStatus = models.SpecOnShelf r, err := models.FindSpecs(opts) if err != nil { log.Error("FindAvailableSpecs error.%v", err) diff --git a/services/cloudbrain/util.go b/services/cloudbrain/util.go index d9f0510be..0a3096e3f 100644 --- a/services/cloudbrain/util.go +++ b/services/cloudbrain/util.go @@ -6,6 +6,8 @@ import ( "strings" "time" + + "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/modules/setting" ) @@ -34,6 +36,7 @@ func GetAiCenterShow(aiCenter string, ctx *context.Context) string { return "" } + func GetDisplayJobName(username string) string { t := time.Now() return jobNamePrefixValid(cutString(username, 5)) + t.Format("2006010215") + strconv.Itoa(int(t.Unix()))[5:] @@ -54,5 +57,42 @@ func jobNamePrefixValid(s string) string { re = regexp.MustCompile(`^[_\\-]+`) return re.ReplaceAllString(removeSpecial, "") +} + +func GetAiCenterInfoByCenterCode(aiCenterCode string) *setting.C2NetSequenceInfo { + if setting.AiCenterCodeAndNameMapInfo != nil { + if info, ok := setting.AiCenterCodeAndNameMapInfo[aiCenterCode]; ok { + return info + } else { + return nil + } + } else { + return nil + } +} + +func getAiCenterCode(aiCenter string) string { + aiCenterInfo := strings.Split(aiCenter, "+") + return aiCenterInfo[0] +} + +func UpdateCloudbrainAiCenter(cloudbrain *models.CloudbrainInfo) *models.CloudbrainInfo { + if cloudbrain.Cloudbrain.Type == models.TypeCloudBrainOne { + cloudbrain.Cloudbrain.AiCenter = models.AICenterOfCloudBrainOne + cloudbrain.Cloudbrain.Cluster = models.OpenICluster + } + if cloudbrain.Cloudbrain.Type == models.TypeCloudBrainTwo { + cloudbrain.Cloudbrain.AiCenter = models.AICenterOfCloudBrainTwo + cloudbrain.Cloudbrain.Cluster = models.OpenICluster + } + if cloudbrain.Cloudbrain.Type == models.TypeCDCenter { + cloudbrain.Cloudbrain.AiCenter = models.AICenterOfChengdu + cloudbrain.Cloudbrain.Cluster = models.OpenICluster + } + if cloudbrain.Cloudbrain.Type == models.TypeC2Net { + cloudbrain.Cloudbrain.AiCenter = getAiCenterCode(cloudbrain.Cloudbrain.AiCenter) + cloudbrain.Cloudbrain.Cluster = models.C2NetCluster + } + return cloudbrain } diff --git a/templates/admin/cloudbrain/list.tmpl b/templates/admin/cloudbrain/list.tmpl index 92ca35c5b..94f80c0fa 100755 --- a/templates/admin/cloudbrain/list.tmpl +++ b/templates/admin/cloudbrain/list.tmpl @@ -170,7 +170,7 @@
- {{if .AiCenter}}{{.AiCenter}}{{else}}--{{end}} + {{if .AiCenter}}{{.AiCenter}}{{else}}--{{end}}
@@ -184,16 +184,16 @@ spanEl.setAttribute('title', cardType); spanEl.innerText = cardType; - var cluster = spec.Cluster || '--'; + var cluster = {{.Cluster}} || '--'; var clusterName = document.querySelector('.cloudbrain_debug').dataset['cluster' + cluster[0] + cluster.toLocaleLowerCase().slice(1)] || '--'; spanEl = document.querySelector('.cluster_{{.DisplayJobName}}_{{$JobID}}'); spanEl.setAttribute('title', cluster); spanEl.innerText = clusterName; - var aiCenter = spec.AiCenterName || '--'; - spanEl = document.querySelector('.aicenter_{{.DisplayJobName}}_{{$JobID}}'); - spanEl.setAttribute('title', aiCenter); - spanEl.innerText = aiCenter; + // var aiCenter = spec.AiCenterName || '--'; + // spanEl = document.querySelector('.aicenter_{{.DisplayJobName}}_{{$JobID}}'); + // spanEl.setAttribute('title', aiCenter); + // spanEl.innerText = aiCenter; })(); diff --git a/templates/admin/cloudbrain/search.tmpl b/templates/admin/cloudbrain/search.tmpl index 12e8a7515..09e5f865e 100644 --- a/templates/admin/cloudbrain/search.tmpl +++ b/templates/admin/cloudbrain/search.tmpl @@ -71,22 +71,20 @@ document.addEventListener('DOMContentLoaded', function() { $.ajax({ type: "GET", - url: "/api/v1/cloudbrain/get_center_info", + url: "/api/v1/cloudbrainboard/cloudbrain/resource_queues", dataType: "json", data: {}, success: function (res) { - var data = res || []; + var data = res.resourceQueues || []; var aiCenterSelEl = $('#aiCenter-sel'); var itemEl = aiCenterSelEl.find('.menu .item').eq(0); var selectAiCenterCode = aiCenterSelEl.find('.default').attr('aicenter'); var selectAiCenterName = ''; var lang = document.querySelector('html').getAttribute('lang') || 'en-US'; - var except = ['', 'more']; for (var i = 0, iLen = data.length; i < iLen; i++) { var dataI = data[i]; - var aiCenterCode = dataI.name; - if (except.indexOf(aiCenterCode) >= 0) continue; - var aiCenterName = lang === 'en-US' ? dataI.content_en : dataI.content; + var aiCenterCode = dataI.AiCenterCode; + var aiCenterName = dataI.AiCenterName; var itemClone = itemEl.clone(); var oHref = itemClone.attr('href'); var oId = itemClone.attr('id'); diff --git a/templates/admin/cloudbrain/search_dashboard.tmpl b/templates/admin/cloudbrain/search_dashboard.tmpl index 2bf738dc9..7c4c1527d 100644 --- a/templates/admin/cloudbrain/search_dashboard.tmpl +++ b/templates/admin/cloudbrain/search_dashboard.tmpl @@ -85,22 +85,20 @@ document.addEventListener('DOMContentLoaded', function() { $.ajax({ type: "GET", - url: "/api/v1/cloudbrain/get_center_info", + url: "/api/v1/cloudbrainboard/cloudbrain/resource_queues", dataType: "json", data: {}, success: function (res) { - var data = res || []; + var data = res.resourceQueues || []; var aiCenterSelEl = $('#aiCenter-sel'); var itemEl = aiCenterSelEl.find('.menu .item').eq(0); var selectAiCenterCode = aiCenterSelEl.find('.default').attr('aicenter'); var selectAiCenterName = ''; var lang = document.querySelector('html').getAttribute('lang') || 'en-US'; - var except = ['', 'more']; for (var i = 0, iLen = data.length; i < iLen; i++) { var dataI = data[i]; - var aiCenterCode = dataI.name; - if (except.indexOf(aiCenterCode) >= 0) continue; - var aiCenterName = lang === 'en-US' ? dataI.content_en : dataI.content; + var aiCenterCode = dataI.AiCenterCode; + var aiCenterName = dataI.AiCenterName; var itemClone = itemEl.clone(); var oHref = itemClone.attr('href'); var oId = itemClone.attr('id'); diff --git a/templates/base/footer_content.tmpl b/templates/base/footer_content.tmpl index cb732bbbe..b4c8518c4 100755 --- a/templates/base/footer_content.tmpl +++ b/templates/base/footer_content.tmpl @@ -24,11 +24,30 @@
{{.LangName}}
- + {{.i18n.Tr "custom.Platform_Tutorial"}} {{if .EnableSwagger}} API{{end}} {{if .IsSigned}} diff --git a/templates/base/footer_content_fluid.tmpl b/templates/base/footer_content_fluid.tmpl index 723c78045..be17f2781 100755 --- a/templates/base/footer_content_fluid.tmpl +++ b/templates/base/footer_content_fluid.tmpl @@ -22,10 +22,30 @@
{{.LangName}}
+ {{.i18n.Tr "custom.Platform_Tutorial"}} {{if .EnableSwagger}} API{{end}} {{if .IsSigned}} diff --git a/templates/repo/cloudbrain/inference/show.tmpl b/templates/repo/cloudbrain/inference/show.tmpl index 3154b8ac6..aee08d659 100644 --- a/templates/repo/cloudbrain/inference/show.tmpl +++ b/templates/repo/cloudbrain/inference/show.tmpl @@ -262,8 +262,6 @@ - -
-
- -
- - - - - -
- -
- -
-

"; - html += "

" + podEventArray[i]["message"] + "

"; - html += "

" + podEventArray[i]["action"] + "

"; - } - } - } - let extras = jsonObj["extras"]; - if (extras != null) { - for (var i = 0; i < extras.length; i++) { - if (extras[i]["reason"] != "") { - html += "

[" + extras[i]["reason"] + "]

"; - html += "

" + extras[i]["message"] + "

"; - html += "

" + extras[i]["action"] + "

"; - } - } - } - } - - let string = document.getElementById("ExitDiagnostics").value; - string = string.replace(/\r\n/g, "
") - string = string.replace(/\n/g, "
"); - string = string.replace(/(\r\n)|(\n)/g, '
'); - - if (string != "") { - html += "

[ExitDiagnostics]

"; - html += "

" + string + "

"; - } - - document.getElementById("info_display").innerHTML = html; - } - ;(function() { var SPEC = {{ .Spec }}; var showPoint = false; diff --git a/templates/repo/cloudbrain/trainjob/show.tmpl b/templates/repo/cloudbrain/trainjob/show.tmpl index 7bc3f2c82..75cad03b4 100644 --- a/templates/repo/cloudbrain/trainjob/show.tmpl +++ b/templates/repo/cloudbrain/trainjob/show.tmpl @@ -284,15 +284,10 @@
@@ -430,9 +425,6 @@
- - - {{$.i18n.Tr "repo.modelarts.train_job.run_parameter"}} @@ -504,25 +496,6 @@
- -
-
- -
- - - - - -
- -
- -
-
-
- -
-
-
{{$.i18n.Tr "repo.file_limit_100"}}
- @@ -700,11 +666,8 @@ - - - {{template "base/footer" .}} @@ -712,7 +675,15 @@ - +{{template "base/footer" .}} diff --git a/templates/repo/modelmanage/create_local_2.tmpl b/templates/repo/modelmanage/create_local_2.tmpl new file mode 100644 index 000000000..5780c6194 --- /dev/null +++ b/templates/repo/modelmanage/create_local_2.tmpl @@ -0,0 +1,11 @@ +{{template "base/head" .}} + +
+ {{template "repo/header" .}} + +
+
+
+
+ +{{template "base/footer" .}} diff --git a/templates/repo/modelmanage/create_online.tmpl b/templates/repo/modelmanage/create_online.tmpl new file mode 100644 index 000000000..32503d1f0 --- /dev/null +++ b/templates/repo/modelmanage/create_online.tmpl @@ -0,0 +1,581 @@ +{{template "base/head" .}} + + +
+
+
+
+
+
+
+
+
+{{$repository := .Repository.ID}} +
+ {{template "repo/header" .}} +
+
+
+
+

{{.i18n.Tr "repo.model.manage.import_online_model"}}

+
+
+
+ +
+ +
+
+ +
+
+ + +   + +
+
+
+
+ +
+
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+
+ + +
+
+
+
+ +
+
+ +
+
+
+
+ +
+
+ +
+
+
+
+ + +
+
+
+
+
+
+{{template "base/footer" .}} + + + diff --git a/templates/repo/modelmanage/index.tmpl b/templates/repo/modelmanage/index.tmpl index 6a42d96f7..b358384e3 100644 --- a/templates/repo/modelmanage/index.tmpl +++ b/templates/repo/modelmanage/index.tmpl @@ -25,6 +25,23 @@ border-bottom-left-radius: 4px; box-shadow: 0 2px 3px 0 rgb(34 36 38 / 15%); } + .m-blue-btn { + background-color: rgb(22, 132, 252) !important; + } + .m-blue-btn:hover { + background-color: #66b1ff !important; + color: #fff; + } + + .m-blue-btn:focus { + background-color: #66b1ff !important; + color: #fff; + } + + .m-blue-btn:active { + background-color: #3a8ee6 !important; + color: #fff; + } @@ -57,8 +74,10 @@
+ {{$.i18n.Tr "repo.model.manage.import_local_model"}} {{$.i18n.Tr "repo.model.manage.import_new_model"}} + href="{{.RepoLink}}/modelmanage/create_online_model">{{$.i18n.Tr "repo.model.manage.import_online_model"}}
{{if eq $.MODEL_COUNT 0}} @@ -66,6 +85,7 @@
{{$.i18n.Tr "repo.model.manage.notcreatemodel"}}
+ +
{{$.i18n.Tr "repo.model.manage.createmodel_tip"}} {{$.i18n.Tr "repo.model.manage.createtrainjob"}}
{{$.i18n.Tr "repo.platform_instructions1"}} {{$.i18n.Tr "repo.platform_instructions2"}} {{$.i18n.Tr "repo.platform_instructions3"}}
@@ -421,7 +443,8 @@ let train_html = ''; modelData = data; for (let i = 0; i < n_length; i++) { - train_html += `
${data[i].VersionName}
` + var VersionName = data[i].VersionName || 'V0001'; + train_html += `
${VersionName}
` train_html += '' } if (data.length) { @@ -568,5 +591,4 @@ $("#choice_Engine").removeClass('disabled'); } } - \ No newline at end of file diff --git a/templates/repo/modelmanage/showinfo.tmpl b/templates/repo/modelmanage/showinfo.tmpl index 0a29375f1..1b153bb45 100644 --- a/templates/repo/modelmanage/showinfo.tmpl +++ b/templates/repo/modelmanage/showinfo.tmpl @@ -1,533 +1,10 @@ {{template "base/head" .}} -
+ +
{{template "repo/header" .}} -
-

- - - -

-
- -
-
- {{$.i18n.Tr "repo.model.manage.baseinfo"}} - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
{{$.i18n.Tr "repo.model.manage.model_name"}}
{{$.i18n.Tr "repo.model.manage.version"}}
{{$.i18n.Tr "repo.migrate_items_labels"}} -
- -
- - -
{{$.i18n.Tr "repo.modelarts.model_size"}}
{{$.i18n.Tr "repo.modelarts.createtime"}}
{{$.i18n.Tr "repo.model.manage.description"}} -
- - -
-
{{$.i18n.Tr "repo.modelarts.train_job"}} - - - -
{{$.i18n.Tr "repo.modelarts.code_version"}}
{{$.i18n.Tr "repo.modelarts.train_job.start_file"}}
{{$.i18n.Tr "repo.modelarts.train_job.train_dataset"}}
{{$.i18n.Tr "repo.modelarts.train_job.run_parameter"}}
{{$.i18n.Tr "repo.modelarts.train_job.AI_Engine"}}
{{$.i18n.Tr "repo.modelarts.train_job.standard"}}
{{$.i18n.Tr "repo.modelarts.train_job.compute_node"}}
-
-
- {{$.i18n.Tr "repo.model.manage.model_accuracy"}} - - - - - - - - - - - - - - - - - - - -
{{$.i18n.Tr "repo.model.manage.Accuracy"}}
F1
{{$.i18n.Tr "repo.model.manage.Precision"}}
{{$.i18n.Tr "repo.model.manage.Recall"}}
-
-
-
-
- - - -
- -
-
-
-
+
+
+ {{template "base/footer" .}} - \ No newline at end of file diff --git a/templates/repo/modelsafety/show.tmpl b/templates/repo/modelsafety/show.tmpl index dfd1bd5b9..a46077443 100644 --- a/templates/repo/modelsafety/show.tmpl +++ b/templates/repo/modelsafety/show.tmpl @@ -861,7 +861,8 @@ $('td.ti-text-form-content.spec div').text(specStr); SPEC && $('td.ti-text-form-content.resorce_type div').text(getListValueWithKey(ACC_CARD_TYPE, SPEC.AccCardType)); } - var oLogHref = $('#-log-down').attr('href'); + var repoPath = {{$.RepoRelPath}}; + var oLogHref = `/api/v1/repos/${repoPath}/cloudbrain`; $('#-log-down').attr('href', oLogHref + `/${res.ID}/download_log_file`); $('.full-log-dialog').attr('data-href', oLogHref + `/${res.ID}/download_log_file`); if (res.ResultJson) { diff --git a/templates/user/dashboard/cloudbrains.tmpl b/templates/user/dashboard/cloudbrains.tmpl index fe7689944..9af17fb54 100755 --- a/templates/user/dashboard/cloudbrains.tmpl +++ b/templates/user/dashboard/cloudbrains.tmpl @@ -65,7 +65,7 @@ - {{range .Tasks}} + {{range .Tasks}} {{if .Repo}}
@@ -155,7 +155,7 @@
- {{if .AiCenter}}{{.AiCenter}}{{else}}--{{end}} + {{if .AiCenter}}{{.AiCenter}}{{else}}--{{end}}
@@ -169,16 +169,16 @@ spanEl.setAttribute('title', cardType); spanEl.innerText = cardType; - var cluster = spec.Cluster || '--'; + var cluster = {{.Cluster}} || '--'; var clusterName = document.querySelector('.cloudbrain_debug').dataset['cluster' + cluster[0] + cluster.toLocaleLowerCase().slice(1)] || '--'; spanEl = document.querySelector('.cluster_{{.DisplayJobName}}_{{$JobID}}'); spanEl.setAttribute('title', cluster); spanEl.innerText = clusterName; - var aiCenter = spec.AiCenterName || '--'; - spanEl = document.querySelector('.aicenter_{{.DisplayJobName}}_{{$JobID}}'); - spanEl.setAttribute('title', aiCenter); - spanEl.innerText = aiCenter; + // var aiCenter = spec.AiCenterName || '--'; + // spanEl = document.querySelector('.aicenter_{{.DisplayJobName}}_{{$JobID}}'); + // spanEl.setAttribute('title', aiCenter); + // spanEl.innerText = aiCenter; })(); diff --git a/web_src/js/components/Model.vue b/web_src/js/components/Model.vue index 7362246c4..02b8643ae 100644 --- a/web_src/js/components/Model.vue +++ b/web_src/js/components/Model.vue @@ -16,16 +16,17 @@ prop="name" :label="i18n.model_name" align="left" - min-width="17%" + min-width="20%" >