| @@ -60,6 +60,15 @@ const ( | |||||
| ActionCreateGPUTrainTask //31 | ActionCreateGPUTrainTask //31 | ||||
| ActionCreateGrampusNPUTrainTask //32 | ActionCreateGrampusNPUTrainTask //32 | ||||
| ActionCreateGrampusGPUTrainTask //33 | ActionCreateGrampusGPUTrainTask //33 | ||||
| ActionBindWechat //34 | |||||
| ActionCreateCloudbrainTask //35 | |||||
| ActionDatasetRecommended //36 | |||||
| ActionCreateImage //37 | |||||
| ActionImageRecommend //38 | |||||
| ActionChangeUserAvatar //39 | |||||
| ActionPushCommits //40 | |||||
| ActionForkRepo //41 | |||||
| ) | ) | ||||
| // Action represents user operation type and other information to | // Action represents user operation type and other information to | ||||
| @@ -81,6 +90,18 @@ type Action struct { | |||||
| IsTransformed bool `xorm:"INDEX NOT NULL DEFAULT false"` | IsTransformed bool `xorm:"INDEX NOT NULL DEFAULT false"` | ||||
| Content string `xorm:"TEXT"` | Content string `xorm:"TEXT"` | ||||
| CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` | CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` | ||||
| Cloudbrain *Cloudbrain `xorm:"-"` | |||||
| } | |||||
| type ActionShow struct { | |||||
| OpType ActionType | |||||
| RepoLink string | |||||
| ShortRepoFullDisplayName string | |||||
| Content string | |||||
| RefName string | |||||
| IssueInfos []string | |||||
| CommentLink string | |||||
| Cloudbrain *CloudbrainShow4Action | |||||
| } | } | ||||
| // GetOpType gets the ActionType of this action. | // GetOpType gets the ActionType of this action. | ||||
| @@ -218,6 +239,47 @@ func (a *Action) GetRepoLink() string { | |||||
| return "/" + a.GetRepoPath() | return "/" + a.GetRepoPath() | ||||
| } | } | ||||
| func (a *Action) ToShow() *ActionShow { | |||||
| actionShow := &ActionShow{} | |||||
| actionShow.OpType = GetTaskOptType(*a) | |||||
| actionShow.Content = a.Content | |||||
| actionShow.RefName = a.RefName | |||||
| if strings.Contains(a.Content, "|") && a.IsIssueAction() { | |||||
| actionShow.IssueInfos = a.GetIssueInfos() | |||||
| } | |||||
| if a.Repo != nil { | |||||
| actionShow.RepoLink = a.GetRepoLink() | |||||
| actionShow.ShortRepoFullDisplayName = a.ShortRepoFullDisplayName() | |||||
| } | |||||
| if a.Comment != nil { | |||||
| actionShow.CommentLink = a.GetCommentLink() | |||||
| } | |||||
| if a.Cloudbrain != nil { | |||||
| c := &CloudbrainShow4Action{ | |||||
| ID: a.Cloudbrain.ID, | |||||
| JobID: a.Cloudbrain.JobID, | |||||
| Type: a.Cloudbrain.Type, | |||||
| JobType: a.Cloudbrain.JobType, | |||||
| DisplayJobName: a.Cloudbrain.DisplayJobName, | |||||
| ComputeResource: a.Cloudbrain.ComputeResource, | |||||
| } | |||||
| actionShow.Cloudbrain = c | |||||
| } | |||||
| return actionShow | |||||
| } | |||||
| func GetTaskOptType(action Action) ActionType { | |||||
| //Convert all types of cloudbrain tasks action into ActionCreateCloudbrainTask | |||||
| if action.IsCloudbrainAction() { | |||||
| return ActionCreateCloudbrainTask | |||||
| } | |||||
| return action.OpType | |||||
| } | |||||
| // GetRepositoryFromMatch returns a *Repository from a username and repo strings | // GetRepositoryFromMatch returns a *Repository from a username and repo strings | ||||
| func GetRepositoryFromMatch(ownerName string, repoName string) (*Repository, error) { | func GetRepositoryFromMatch(ownerName string, repoName string) (*Repository, error) { | ||||
| var err error | var err error | ||||
| @@ -315,6 +377,39 @@ func (a *Action) GetIssueContent() string { | |||||
| return issue.Content | return issue.Content | ||||
| } | } | ||||
| func (a *Action) IsCloudbrainAction() bool { | |||||
| switch a.OpType { | |||||
| case ActionCreateDebugGPUTask, | |||||
| ActionCreateDebugNPUTask, | |||||
| ActionCreateTrainTask, | |||||
| ActionCreateInferenceTask, | |||||
| ActionCreateBenchMarkTask, | |||||
| ActionCreateGPUTrainTask, | |||||
| ActionCreateGrampusNPUTrainTask, | |||||
| ActionCreateGrampusGPUTrainTask: | |||||
| return true | |||||
| } | |||||
| return false | |||||
| } | |||||
| func (a *Action) IsIssueAction() bool { | |||||
| switch a.OpType { | |||||
| case ActionCreateIssue, | |||||
| ActionCloseIssue, | |||||
| ActionClosePullRequest, | |||||
| ActionReopenIssue, | |||||
| ActionReopenPullRequest, | |||||
| ActionCommentPull, | |||||
| ActionCommentIssue, | |||||
| ActionCreatePullRequest, | |||||
| ActionApprovePullRequest, | |||||
| ActionRejectPullRequest, | |||||
| ActionMergePullRequest: | |||||
| return true | |||||
| } | |||||
| return false | |||||
| } | |||||
| // GetFeedsOptions options for retrieving feeds | // GetFeedsOptions options for retrieving feeds | ||||
| type GetFeedsOptions struct { | type GetFeedsOptions struct { | ||||
| RequestedUser *User // the user we want activity for | RequestedUser *User // the user we want activity for | ||||
| @@ -404,3 +499,18 @@ func GetUnTransformedActions() ([]*Action, error) { | |||||
| Find(&actions) | Find(&actions) | ||||
| return actions, err | return actions, err | ||||
| } | } | ||||
| func GetActionByIds(ids []int64) ([]*Action, error) { | |||||
| if len(ids) == 0 { | |||||
| return nil, nil | |||||
| } | |||||
| actions := make([]*Action, 0) | |||||
| err := x.In("id", ids).Find(&actions) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if err := ActionList(actions).LoadAllAttributes(); err != nil { | |||||
| return nil, fmt.Errorf("ActionList loadAttributes: %v", err) | |||||
| } | |||||
| return actions, nil | |||||
| } | |||||
| @@ -4,7 +4,11 @@ | |||||
| package models | package models | ||||
| import "fmt" | |||||
| import ( | |||||
| "fmt" | |||||
| "strconv" | |||||
| "xorm.io/builder" | |||||
| ) | |||||
| // ActionList defines a list of actions | // ActionList defines a list of actions | ||||
| type ActionList []*Action | type ActionList []*Action | ||||
| @@ -26,6 +30,9 @@ func (actions ActionList) loadUsers(e Engine) ([]*User, error) { | |||||
| userIDs := actions.getUserIDs() | userIDs := actions.getUserIDs() | ||||
| userMaps := make(map[int64]*User, len(userIDs)) | userMaps := make(map[int64]*User, len(userIDs)) | ||||
| if len(userIDs) == 0 { | |||||
| return make([]*User, 0), nil | |||||
| } | |||||
| err := e. | err := e. | ||||
| In("id", userIDs). | In("id", userIDs). | ||||
| Find(&userMaps) | Find(&userMaps) | ||||
| @@ -61,6 +68,9 @@ func (actions ActionList) loadRepositories(e Engine) ([]*Repository, error) { | |||||
| repoIDs := actions.getRepoIDs() | repoIDs := actions.getRepoIDs() | ||||
| repoMaps := make(map[int64]*Repository, len(repoIDs)) | repoMaps := make(map[int64]*Repository, len(repoIDs)) | ||||
| if len(repoIDs) == 0 { | |||||
| return make([]*Repository, 0), nil | |||||
| } | |||||
| err := e. | err := e. | ||||
| In("id", repoIDs). | In("id", repoIDs). | ||||
| Find(&repoMaps) | Find(&repoMaps) | ||||
| @@ -79,6 +89,133 @@ func (actions ActionList) LoadRepositories() ([]*Repository, error) { | |||||
| return actions.loadRepositories(x) | return actions.loadRepositories(x) | ||||
| } | } | ||||
| func (actions ActionList) getCommentIDs() []int64 { | |||||
| commentIDs := make(map[int64]struct{}, len(actions)) | |||||
| for _, action := range actions { | |||||
| if action.CommentID == 0 { | |||||
| continue | |||||
| } | |||||
| if _, ok := commentIDs[action.CommentID]; !ok { | |||||
| commentIDs[action.CommentID] = struct{}{} | |||||
| } | |||||
| } | |||||
| return keysInt64(commentIDs) | |||||
| } | |||||
| func (actions ActionList) loadComments(e Engine) ([]*Comment, error) { | |||||
| if len(actions) == 0 { | |||||
| return nil, nil | |||||
| } | |||||
| commentIDs := actions.getCommentIDs() | |||||
| commentMaps := make(map[int64]*Comment, len(commentIDs)) | |||||
| if len(commentIDs) == 0 { | |||||
| return make([]*Comment, 0), nil | |||||
| } | |||||
| err := e. | |||||
| In("id", commentIDs). | |||||
| Find(&commentMaps) | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("find comment: %v", err) | |||||
| } | |||||
| for _, action := range actions { | |||||
| if action.CommentID > 0 { | |||||
| action.Comment = commentMaps[action.CommentID] | |||||
| } | |||||
| } | |||||
| return valuesComment(commentMaps), nil | |||||
| } | |||||
| // LoadComments loads actions' all comments | |||||
| func (actions ActionList) LoadComments() ([]*Comment, error) { | |||||
| return actions.loadComments(x) | |||||
| } | |||||
| func (actions ActionList) getCloudbrainIDs() []int64 { | |||||
| cloudbrainIDs := make(map[int64]struct{}, 0) | |||||
| for _, action := range actions { | |||||
| if !action.IsCloudbrainAction() { | |||||
| continue | |||||
| } | |||||
| cloudbrainId, _ := strconv.ParseInt(action.Content, 10, 64) | |||||
| if _, ok := cloudbrainIDs[cloudbrainId]; !ok { | |||||
| cloudbrainIDs[cloudbrainId] = struct{}{} | |||||
| } | |||||
| } | |||||
| return keysInt64(cloudbrainIDs) | |||||
| } | |||||
| func (actions ActionList) getCloudbrainJobIDs() []string { | |||||
| cloudbrainJobIDs := make(map[string]struct{}, 0) | |||||
| for _, action := range actions { | |||||
| if !action.IsCloudbrainAction() { | |||||
| continue | |||||
| } | |||||
| if _, ok := cloudbrainJobIDs[action.Content]; !ok { | |||||
| cloudbrainJobIDs[action.Content] = struct{}{} | |||||
| } | |||||
| } | |||||
| return keysString(cloudbrainJobIDs) | |||||
| } | |||||
| func (actions ActionList) loadCloudbrains(e Engine) ([]*Cloudbrain, error) { | |||||
| if len(actions) == 0 { | |||||
| return nil, nil | |||||
| } | |||||
| cloudbrainIDs := actions.getCloudbrainIDs() | |||||
| cloudbrainJobIDs := actions.getCloudbrainJobIDs() | |||||
| cloudbrainMaps := make(map[int64]*Cloudbrain, len(cloudbrainIDs)) | |||||
| if len(cloudbrainIDs) == 0 { | |||||
| return make([]*Cloudbrain, 0), nil | |||||
| } | |||||
| //由于各个类型的云脑任务在发布action的时候,content字段保存的ID含义不同,部分取的是ID,部分取的是jobId | |||||
| //所以在查询action对应的cloudbrain对象时,以这两个字段做为条件查询 | |||||
| cond := builder.Or(builder.In("id", cloudbrainIDs)).Or(builder.In("job_id", cloudbrainJobIDs)) | |||||
| err := e. | |||||
| Where(cond).Unscoped(). | |||||
| Find(&cloudbrainMaps) | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("find cloudbrain: %v", err) | |||||
| } | |||||
| cloudBrainJobIdMap := make(map[string]*Cloudbrain, len(cloudbrainIDs)) | |||||
| for _, v := range cloudbrainMaps { | |||||
| cloudBrainJobIdMap[v.JobID] = v | |||||
| } | |||||
| for _, action := range actions { | |||||
| if !action.IsCloudbrainAction() { | |||||
| continue | |||||
| } | |||||
| cloudbrainId, _ := strconv.ParseInt(action.Content, 10, 64) | |||||
| if cloudbrainId > 0 { | |||||
| if c, ok := cloudbrainMaps[cloudbrainId]; ok { | |||||
| if c.DisplayJobName == action.RefName || c.JobName == action.RefName { | |||||
| action.Cloudbrain = c | |||||
| continue | |||||
| } | |||||
| } | |||||
| } | |||||
| if c, ok := cloudBrainJobIdMap[action.Content]; ok { | |||||
| if c.DisplayJobName == action.RefName || c.JobName == action.RefName { | |||||
| action.Cloudbrain = c | |||||
| continue | |||||
| } | |||||
| } | |||||
| } | |||||
| return valuesCloudbrain(cloudbrainMaps), nil | |||||
| } | |||||
| // LoadComments loads actions' all comments | |||||
| func (actions ActionList) LoadCloudbrains() ([]*Comment, error) { | |||||
| return actions.loadComments(x) | |||||
| } | |||||
| // loadAttributes loads all attributes | // loadAttributes loads all attributes | ||||
| func (actions ActionList) loadAttributes(e Engine) (err error) { | func (actions ActionList) loadAttributes(e Engine) (err error) { | ||||
| if _, err = actions.loadUsers(e); err != nil { | if _, err = actions.loadUsers(e); err != nil { | ||||
| @@ -96,3 +233,30 @@ func (actions ActionList) loadAttributes(e Engine) (err error) { | |||||
| func (actions ActionList) LoadAttributes() error { | func (actions ActionList) LoadAttributes() error { | ||||
| return actions.loadAttributes(x) | return actions.loadAttributes(x) | ||||
| } | } | ||||
| // LoadAllAttributes loads all attributes of the actions | |||||
| // compare with LoadAttributes() ,LoadAllAttributes() loads Comment and Cloudbrain attribute | |||||
| func (actions ActionList) LoadAllAttributes() error { | |||||
| return actions.loadAllAttributes(x) | |||||
| } | |||||
| // loadAllAttributes | |||||
| func (actions ActionList) loadAllAttributes(e Engine) (err error) { | |||||
| if _, err = actions.loadUsers(e); err != nil { | |||||
| return | |||||
| } | |||||
| if _, err = actions.loadRepositories(e); err != nil { | |||||
| return | |||||
| } | |||||
| if _, err = actions.loadComments(e); err != nil { | |||||
| return | |||||
| } | |||||
| if _, err = actions.loadCloudbrains(e); err != nil { | |||||
| return | |||||
| } | |||||
| return nil | |||||
| } | |||||
| @@ -701,3 +701,11 @@ func Attachments(opts *AttachmentsOptions) ([]*AttachmentInfo, int64, error) { | |||||
| return attachments, count, nil | return attachments, count, nil | ||||
| } | } | ||||
| func GetAllDatasetContributorByDatasetId(datasetId int64) ([]*User, error) { | |||||
| r := make([]*User, 0) | |||||
| if err := x.Select("distinct(public.user.*)").Table("attachment").Join("LEFT", "user", "public.user.ID = attachment.uploader_id").Where("attachment.dataset_id = ?", datasetId).Find(&r); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return r, nil | |||||
| } | |||||
| @@ -25,7 +25,8 @@ type ModelArtsJobStatus string | |||||
| const ( | const ( | ||||
| TypeCloudBrainOne int = iota | TypeCloudBrainOne int = iota | ||||
| TypeCloudBrainTwo | TypeCloudBrainTwo | ||||
| TypeC2Net //智算网络 | |||||
| TypeC2Net //智算网络 | |||||
| TypeCDCenter //成都智算中心 | |||||
| TypeCloudBrainAll = -1 | TypeCloudBrainAll = -1 | ||||
| ) | ) | ||||
| @@ -120,6 +121,11 @@ const ( | |||||
| //AI center | //AI center | ||||
| AICenterOfCloudBrainOne = "OpenIOne" | AICenterOfCloudBrainOne = "OpenIOne" | ||||
| AICenterOfCloudBrainTwo = "OpenITwo" | AICenterOfCloudBrainTwo = "OpenITwo" | ||||
| AICenterOfChengdu = "OpenIChengdu" | |||||
| //ComputeResource | |||||
| GPU = "GPU" | |||||
| NPU = "NPU" | |||||
| ) | ) | ||||
| type Cloudbrain struct { | type Cloudbrain struct { | ||||
| @@ -190,6 +196,46 @@ type Cloudbrain struct { | |||||
| BenchmarkTypeRankLink string `xorm:"-"` | BenchmarkTypeRankLink string `xorm:"-"` | ||||
| StartTime timeutil.TimeStamp | StartTime timeutil.TimeStamp | ||||
| EndTime timeutil.TimeStamp | EndTime timeutil.TimeStamp | ||||
| Spec *Specification `xorm:"-"` | |||||
| } | |||||
| type CloudbrainShow struct { | |||||
| ID int64 | |||||
| JobID string | |||||
| RepoFullName string | |||||
| Type int | |||||
| JobType string | |||||
| DisplayJobName string | |||||
| Duration string | |||||
| ResourceSpec *Specification | |||||
| ComputeResource string | |||||
| AiCenter string | |||||
| } | |||||
| type CloudbrainShow4Action struct { | |||||
| ID int64 | |||||
| JobID string | |||||
| Type int | |||||
| JobType string | |||||
| DisplayJobName string | |||||
| ComputeResource string | |||||
| } | |||||
| func (task *Cloudbrain) ToShow() *CloudbrainShow { | |||||
| c := &CloudbrainShow{ | |||||
| ID: task.ID, | |||||
| JobID: task.JobID, | |||||
| JobType: task.JobType, | |||||
| Type: task.Type, | |||||
| DisplayJobName: task.DisplayJobName, | |||||
| Duration: task.TrainJobDuration, | |||||
| ResourceSpec: task.Spec, | |||||
| ComputeResource: task.ComputeResource, | |||||
| } | |||||
| if task.Repo != nil { | |||||
| c.RepoFullName = task.Repo.FullName() | |||||
| } | |||||
| return c | |||||
| } | } | ||||
| func (task *Cloudbrain) ComputeAndSetDuration() { | func (task *Cloudbrain) ComputeAndSetDuration() { | ||||
| @@ -589,11 +635,12 @@ type ResourceSpecs struct { | |||||
| } | } | ||||
| type ResourceSpec struct { | type ResourceSpec struct { | ||||
| Id int `json:"id"` | |||||
| CpuNum int `json:"cpu"` | |||||
| GpuNum int `json:"gpu"` | |||||
| MemMiB int `json:"memMiB"` | |||||
| ShareMemMiB int `json:"shareMemMiB"` | |||||
| Id int `json:"id"` | |||||
| CpuNum int `json:"cpu"` | |||||
| GpuNum int `json:"gpu"` | |||||
| MemMiB int `json:"memMiB"` | |||||
| ShareMemMiB int `json:"shareMemMiB"` | |||||
| UnitPrice int64 `json:"unitPrice"` | |||||
| } | } | ||||
| type FlavorInfos struct { | type FlavorInfos struct { | ||||
| @@ -601,32 +648,23 @@ type FlavorInfos struct { | |||||
| } | } | ||||
| type FlavorInfo struct { | type FlavorInfo struct { | ||||
| Id int `json:"id"` | |||||
| Value string `json:"value"` | |||||
| Desc string `json:"desc"` | |||||
| Id int `json:"id"` | |||||
| Value string `json:"value"` | |||||
| Desc string `json:"desc"` | |||||
| UnitPrice int64 `json:"unitPrice"` | |||||
| } | } | ||||
| type SpecialPools struct { | type SpecialPools struct { | ||||
| Pools []*SpecialPool `json:"pools"` | Pools []*SpecialPool `json:"pools"` | ||||
| } | } | ||||
| type SpecialPool struct { | type SpecialPool struct { | ||||
| Org string `json:"org"` | |||||
| Type string `json:"type"` | |||||
| IsExclusive bool `json:"isExclusive"` | |||||
| Pool []*GpuInfo `json:"pool"` | |||||
| JobType []string `json:"jobType"` | |||||
| ResourceSpec []*ResourceSpec `json:"resourceSpecs"` | |||||
| Flavor []*FlavorInfo `json:"flavor"` | |||||
| } | |||||
| type ImageInfosModelArts struct { | |||||
| ImageInfo []*ImageInfoModelArts `json:"image_info"` | |||||
| } | |||||
| type ImageInfoModelArts struct { | |||||
| Id string `json:"id"` | |||||
| Value string `json:"value"` | |||||
| Desc string `json:"desc"` | |||||
| Org string `json:"org"` | |||||
| Type string `json:"type"` | |||||
| IsExclusive bool `json:"isExclusive"` | |||||
| Pool []*GpuInfo `json:"pool"` | |||||
| JobType []string `json:"jobType"` | |||||
| ResourceSpec []*ResourceSpec `json:"resourceSpecs"` | |||||
| Flavor []*setting.FlavorInfo `json:"flavor"` | |||||
| } | } | ||||
| type PoolInfos struct { | type PoolInfos struct { | ||||
| @@ -732,6 +770,17 @@ type CreateNotebook2Params struct { | |||||
| Volume VolumeReq `json:"volume"` | Volume VolumeReq `json:"volume"` | ||||
| } | } | ||||
| type CreateNotebookWithoutPoolParams struct { | |||||
| JobName string `json:"name"` | |||||
| Description string `json:"description"` | |||||
| Duration int64 `json:"duration"` //ms | |||||
| Feature string `json:"feature"` | |||||
| Flavor string `json:"flavor"` | |||||
| ImageID string `json:"image_id"` | |||||
| WorkspaceID string `json:"workspace_id"` | |||||
| Volume VolumeReq `json:"volume"` | |||||
| } | |||||
| type VolumeReq struct { | type VolumeReq struct { | ||||
| Capacity int `json:"capacity"` | Capacity int `json:"capacity"` | ||||
| Category string `json:"category"` | Category string `json:"category"` | ||||
| @@ -955,6 +1004,7 @@ type NotebookGetJobTokenResult struct { | |||||
| } | } | ||||
| type NotebookDelResult struct { | type NotebookDelResult struct { | ||||
| NotebookResult | |||||
| InstanceID string `json:"instance_id"` | InstanceID string `json:"instance_id"` | ||||
| } | } | ||||
| @@ -1481,12 +1531,6 @@ func Cloudbrains(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int64, error) { | |||||
| ) | ) | ||||
| } | } | ||||
| if len(opts.ComputeResource) > 0 { | |||||
| cond = cond.And( | |||||
| builder.Eq{"cloudbrain.compute_resource": opts.ComputeResource}, | |||||
| ) | |||||
| } | |||||
| if len(opts.JobTypes) > 0 { | if len(opts.JobTypes) > 0 { | ||||
| if opts.JobTypeNot { | if opts.JobTypeNot { | ||||
| cond = cond.And( | cond = cond.And( | ||||
| @@ -1506,7 +1550,7 @@ func Cloudbrains(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int64, error) { | |||||
| if (opts.Cluster) != "" { | if (opts.Cluster) != "" { | ||||
| if opts.Cluster == "resource_cluster_openi" { | if opts.Cluster == "resource_cluster_openi" { | ||||
| cond = cond.And( | cond = cond.And( | ||||
| builder.Or(builder.Eq{"cloudbrain.type": TypeCloudBrainOne}, builder.Eq{"cloudbrain.type": TypeCloudBrainTwo}), | |||||
| builder.Or(builder.Eq{"cloudbrain.type": TypeCloudBrainOne}, builder.Eq{"cloudbrain.type": TypeCloudBrainTwo}, builder.Eq{"cloudbrain.type": TypeCDCenter}), | |||||
| ) | ) | ||||
| } | } | ||||
| if opts.Cluster == "resource_cluster_c2net" { | if opts.Cluster == "resource_cluster_c2net" { | ||||
| @@ -1720,11 +1764,24 @@ func CloudbrainsVersionList(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int, e | |||||
| } | } | ||||
| func CreateCloudbrain(cloudbrain *Cloudbrain) (err error) { | func CreateCloudbrain(cloudbrain *Cloudbrain) (err error) { | ||||
| session := x.NewSession() | |||||
| defer session.Close() | |||||
| err = session.Begin() | |||||
| cloudbrain.TrainJobDuration = DURATION_STR_ZERO | cloudbrain.TrainJobDuration = DURATION_STR_ZERO | ||||
| if _, err = x.NoAutoTime().Insert(cloudbrain); err != nil { | |||||
| if _, err = session.NoAutoTime().InsertOne(cloudbrain); err != nil { | |||||
| session.Rollback() | |||||
| return err | return err | ||||
| } | } | ||||
| if cloudbrain.Spec != nil { | |||||
| if _, err = session.Insert(NewCloudBrainSpec(cloudbrain.ID, *cloudbrain.Spec)); err != nil { | |||||
| session.Rollback() | |||||
| return err | |||||
| } | |||||
| } | |||||
| session.Commit() | |||||
| go IncreaseDatasetUseCount(cloudbrain.Uuid) | go IncreaseDatasetUseCount(cloudbrain.Uuid) | ||||
| return nil | return nil | ||||
| } | } | ||||
| @@ -1959,7 +2016,7 @@ func GetWaitingCloudbrainCount(cloudbrainType int, computeResource string, jobTy | |||||
| func GetCloudbrainNotebookCountByUserID(userID int64) (int, error) { | func GetCloudbrainNotebookCountByUserID(userID int64) (int, error) { | ||||
| count, err := x.In("status", ModelArtsCreateQueue, ModelArtsCreating, ModelArtsStarting, ModelArtsReadyToStart, ModelArtsResizing, ModelArtsStartQueuing, ModelArtsRunning, ModelArtsRestarting). | count, err := x.In("status", ModelArtsCreateQueue, ModelArtsCreating, ModelArtsStarting, ModelArtsReadyToStart, ModelArtsResizing, ModelArtsStartQueuing, ModelArtsRunning, ModelArtsRestarting). | ||||
| And("job_type = ? and user_id = ? and type = ?", JobTypeDebug, userID, TypeCloudBrainTwo).Count(new(Cloudbrain)) | |||||
| And("job_type = ? and user_id = ? and type in (?,?)", JobTypeDebug, userID, TypeCloudBrainTwo, TypeCDCenter).Count(new(Cloudbrain)) | |||||
| return int(count), err | return int(count), err | ||||
| } | } | ||||
| @@ -2003,11 +2060,18 @@ func RestartCloudbrain(old *Cloudbrain, new *Cloudbrain) (err error) { | |||||
| return err | return err | ||||
| } | } | ||||
| if _, err = sess.NoAutoTime().Insert(new); err != nil { | |||||
| if _, err = sess.NoAutoTime().InsertOne(new); err != nil { | |||||
| sess.Rollback() | sess.Rollback() | ||||
| return err | return err | ||||
| } | } | ||||
| if new.Spec != nil { | |||||
| if _, err = sess.Insert(NewCloudBrainSpec(new.ID, *new.Spec)); err != nil { | |||||
| sess.Rollback() | |||||
| return err | |||||
| } | |||||
| } | |||||
| if err = sess.Commit(); err != nil { | if err = sess.Commit(); err != nil { | ||||
| return err | return err | ||||
| } | } | ||||
| @@ -2207,6 +2271,27 @@ func CloudbrainAllStatic(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int64, er | |||||
| return cloudbrains, count, nil | return cloudbrains, count, nil | ||||
| } | } | ||||
| func GetStartedCloudbrainTaskByUpdatedUnix(startTime, endTime time.Time) ([]Cloudbrain, error) { | |||||
| r := make([]Cloudbrain, 0) | |||||
| err := x.Where("updated_unix >= ? and updated_unix <= ? and start_time > 0", startTime.Unix(), endTime.Unix()).Unscoped().Find(&r) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return r, nil | |||||
| } | |||||
| func GetCloudbrainByIds(ids []int64) ([]*Cloudbrain, error) { | |||||
| if len(ids) == 0 { | |||||
| return nil, nil | |||||
| } | |||||
| cloudbrains := make([]*Cloudbrain, 0) | |||||
| err := x.In("id", ids).Unscoped().Find(&cloudbrains) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return cloudbrains, nil | |||||
| } | |||||
| type DatasetInfo struct { | type DatasetInfo struct { | ||||
| DataLocalPath string | DataLocalPath string | ||||
| Name string | Name string | ||||
| @@ -2399,7 +2484,57 @@ func GetCloudbrainByIDs(ids []int64) ([]*Cloudbrain, error) { | |||||
| Find(&cloudbrains) | Find(&cloudbrains) | ||||
| } | } | ||||
| func GetCloudbrainWithDeletedByIDs(ids []int64) ([]*Cloudbrain, error) { | |||||
| cloudbrains := make([]*Cloudbrain, 0) | |||||
| return cloudbrains, x. | |||||
| In("id", ids).Unscoped().Find(&cloudbrains) | |||||
| } | |||||
| func GetCloudbrainCountByJobName(jobName, jobType string, typeCloudbrain int) (int, error) { | func GetCloudbrainCountByJobName(jobName, jobType string, typeCloudbrain int) (int, error) { | ||||
| count, err := x.Where("job_name = ? and job_type= ? and type = ?", jobName, jobType, typeCloudbrain).Count(new(Cloudbrain)) | count, err := x.Where("job_name = ? and job_type= ? and type = ?", jobName, jobType, typeCloudbrain).Count(new(Cloudbrain)) | ||||
| return int(count), err | return int(count), err | ||||
| } | } | ||||
| func LoadSpecs(tasks []*Cloudbrain) error { | |||||
| cloudbrainIds := make([]int64, len(tasks)) | |||||
| for i, v := range tasks { | |||||
| cloudbrainIds[i] = v.ID | |||||
| } | |||||
| specs := make([]*CloudbrainSpec, 0) | |||||
| err := x.In("cloudbrain_id", cloudbrainIds).Find(&specs) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| specMap := make(map[int64]*CloudbrainSpec) | |||||
| for _, v := range specs { | |||||
| specMap[v.SpecId] = v | |||||
| } | |||||
| for _, v := range tasks { | |||||
| if specMap[v.ID] != nil { | |||||
| v.Spec = specMap[v.ID].ConvertToSpecification() | |||||
| } | |||||
| } | |||||
| return nil | |||||
| } | |||||
| func LoadSpecs4CloudbrainInfo(tasks []*CloudbrainInfo) error { | |||||
| cloudbrainIds := make([]int64, len(tasks)) | |||||
| for i, v := range tasks { | |||||
| cloudbrainIds[i] = v.Cloudbrain.ID | |||||
| } | |||||
| specs := make([]*CloudbrainSpec, 0) | |||||
| err := x.In("cloudbrain_id", cloudbrainIds).Find(&specs) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| specMap := make(map[int64]*CloudbrainSpec) | |||||
| for _, v := range specs { | |||||
| specMap[v.CloudbrainID] = v | |||||
| } | |||||
| for _, v := range tasks { | |||||
| if specMap[v.Cloudbrain.ID] != nil { | |||||
| v.Cloudbrain.Spec = specMap[v.Cloudbrain.ID].ConvertToSpecification() | |||||
| } | |||||
| } | |||||
| return nil | |||||
| } | |||||
| @@ -0,0 +1,132 @@ | |||||
| package models | |||||
| import ( | |||||
| "code.gitea.io/gitea/modules/timeutil" | |||||
| ) | |||||
| type CloudbrainSpec struct { | |||||
| CloudbrainID int64 `xorm:"pk"` | |||||
| SpecId int64 `xorm:"index"` | |||||
| SourceSpecId string | |||||
| AccCardsNum int | |||||
| AccCardType string | |||||
| CpuCores int | |||||
| MemGiB float32 | |||||
| GPUMemGiB float32 | |||||
| ShareMemGiB float32 | |||||
| ComputeResource string | |||||
| UnitPrice int | |||||
| QueueId int64 | |||||
| QueueCode string | |||||
| Cluster string | |||||
| AiCenterCode string | |||||
| AiCenterName string | |||||
| IsExclusive bool | |||||
| ExclusiveOrg string | |||||
| CreatedTime timeutil.TimeStamp `xorm:"created"` | |||||
| UpdatedTime timeutil.TimeStamp `xorm:"updated"` | |||||
| } | |||||
| func (s CloudbrainSpec) ConvertToSpecification() *Specification { | |||||
| return &Specification{ | |||||
| ID: s.SpecId, | |||||
| SourceSpecId: s.SourceSpecId, | |||||
| AccCardsNum: s.AccCardsNum, | |||||
| AccCardType: s.AccCardType, | |||||
| CpuCores: s.CpuCores, | |||||
| MemGiB: s.MemGiB, | |||||
| GPUMemGiB: s.GPUMemGiB, | |||||
| ShareMemGiB: s.ShareMemGiB, | |||||
| ComputeResource: s.ComputeResource, | |||||
| UnitPrice: s.UnitPrice, | |||||
| QueueId: s.QueueId, | |||||
| QueueCode: s.QueueCode, | |||||
| Cluster: s.Cluster, | |||||
| AiCenterCode: s.AiCenterCode, | |||||
| AiCenterName: s.AiCenterName, | |||||
| IsExclusive: s.IsExclusive, | |||||
| ExclusiveOrg: s.ExclusiveOrg, | |||||
| } | |||||
| } | |||||
| func NewCloudBrainSpec(cloudbrainId int64, s Specification) CloudbrainSpec { | |||||
| return CloudbrainSpec{ | |||||
| CloudbrainID: cloudbrainId, | |||||
| SpecId: s.ID, | |||||
| SourceSpecId: s.SourceSpecId, | |||||
| AccCardsNum: s.AccCardsNum, | |||||
| AccCardType: s.AccCardType, | |||||
| CpuCores: s.CpuCores, | |||||
| MemGiB: s.MemGiB, | |||||
| GPUMemGiB: s.GPUMemGiB, | |||||
| ShareMemGiB: s.ShareMemGiB, | |||||
| ComputeResource: s.ComputeResource, | |||||
| UnitPrice: s.UnitPrice, | |||||
| QueueId: s.QueueId, | |||||
| QueueCode: s.QueueCode, | |||||
| Cluster: s.Cluster, | |||||
| AiCenterCode: s.AiCenterCode, | |||||
| AiCenterName: s.AiCenterName, | |||||
| IsExclusive: s.IsExclusive, | |||||
| ExclusiveOrg: s.ExclusiveOrg, | |||||
| } | |||||
| } | |||||
| var StatusChangeChan = make(chan *Cloudbrain, 50) | |||||
| func InsertCloudbrainSpec(c CloudbrainSpec) (int64, error) { | |||||
| return x.Insert(&c) | |||||
| } | |||||
| func GetCloudbrainSpecByID(cloudbrainId int64) (*CloudbrainSpec, error) { | |||||
| r := &CloudbrainSpec{} | |||||
| if has, err := x.Where("cloudbrain_id = ?", cloudbrainId).Get(r); err != nil { | |||||
| return nil, err | |||||
| } else if !has { | |||||
| return nil, nil | |||||
| } | |||||
| return r, nil | |||||
| } | |||||
| func FindCloudbrainTask(page, pageSize int) ([]*Cloudbrain, error) { | |||||
| r := make([]*Cloudbrain, 0) | |||||
| err := x.Unscoped(). | |||||
| Limit(pageSize, (page-1)*pageSize). | |||||
| OrderBy("cloudbrain.id"). | |||||
| Find(&r) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return r, nil | |||||
| } | |||||
| func CountNoSpecHistoricTask() (int64, error) { | |||||
| n, err := x.Unscoped(). | |||||
| Where(" 1=1 and not exists (select 1 from cloudbrain_spec where cloudbrain.id = cloudbrain_spec.cloudbrain_id)"). | |||||
| Count(&Cloudbrain{}) | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| return n, nil | |||||
| } | |||||
| // GetResourceSpecMapByCloudbrainIDs | |||||
| func GetResourceSpecMapByCloudbrainIDs(ids []int64) (map[int64]*Specification, error) { | |||||
| specs := make([]*CloudbrainSpec, 0) | |||||
| if err := x.In("cloudbrain_id", ids).Find(&specs); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| r := make(map[int64]*Specification, len(ids)) | |||||
| for _, s := range specs { | |||||
| r[s.CloudbrainID] = s.ConvertToSpecification() | |||||
| } | |||||
| return r, nil | |||||
| } | |||||
| func GetCloudbrainTaskUnitPrice(cloudbrainId int64) (int, error) { | |||||
| s, err := GetCloudbrainSpecByID(cloudbrainId) | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| return s.UnitPrice, nil | |||||
| } | |||||
| @@ -131,13 +131,17 @@ func (datasets DatasetList) loadAttachmentAttributes(opts *SearchDatasetOptions) | |||||
| permission = false | permission = false | ||||
| datasets[i].Repo.GetOwner() | datasets[i].Repo.GetOwner() | ||||
| if !permission { | if !permission { | ||||
| isCollaborator, _ := datasets[i].Repo.IsCollaborator(opts.User.ID) | |||||
| isInRepoTeam,_:=datasets[i].Repo.IsInRepoTeam(opts.User.ID) | |||||
| if isCollaborator ||isInRepoTeam { | |||||
| log.Info("Collaborator user may visit the attach.") | |||||
| if datasets[i].Repo.OwnerID==opts.User.ID{ | |||||
| permission = true | permission = true | ||||
| }else{ | |||||
| isCollaborator, _ := datasets[i].Repo.IsCollaborator(opts.User.ID) | |||||
| isInRepoTeam,_:=datasets[i].Repo.IsInRepoTeam(opts.User.ID) | |||||
| if isCollaborator ||isInRepoTeam { | |||||
| permission = true | |||||
| } | |||||
| } | } | ||||
| } | } | ||||
| permissionMap[datasets[i].ID] = permission | permissionMap[datasets[i].ID] = permission | ||||
| @@ -2012,3 +2012,27 @@ func IsErrTagNotExist(err error) bool { | |||||
| _, ok := err.(ErrTagNotExist) | _, ok := err.(ErrTagNotExist) | ||||
| return ok | return ok | ||||
| } | } | ||||
| type ErrRecordNotExist struct { | |||||
| } | |||||
| func IsErrRecordNotExist(err error) bool { | |||||
| _, ok := err.(ErrRecordNotExist) | |||||
| return ok | |||||
| } | |||||
| func (err ErrRecordNotExist) Error() string { | |||||
| return fmt.Sprintf("record not exist in database") | |||||
| } | |||||
| type ErrInsufficientPointsBalance struct { | |||||
| } | |||||
| func IsErrInsufficientPointsBalance(err error) bool { | |||||
| _, ok := err.(ErrInsufficientPointsBalance) | |||||
| return ok | |||||
| } | |||||
| func (err ErrInsufficientPointsBalance) Error() string { | |||||
| return fmt.Sprintf("Insufficient points balance") | |||||
| } | |||||
| @@ -11,6 +11,13 @@ func keysInt64(m map[int64]struct{}) []int64 { | |||||
| } | } | ||||
| return keys | return keys | ||||
| } | } | ||||
| func keysString(m map[string]struct{}) []string { | |||||
| var keys = make([]string, 0, len(m)) | |||||
| for k := range m { | |||||
| keys = append(keys, k) | |||||
| } | |||||
| return keys | |||||
| } | |||||
| func valuesRepository(m map[int64]*Repository) []*Repository { | func valuesRepository(m map[int64]*Repository) []*Repository { | ||||
| var values = make([]*Repository, 0, len(m)) | var values = make([]*Repository, 0, len(m)) | ||||
| @@ -27,3 +34,18 @@ func valuesUser(m map[int64]*User) []*User { | |||||
| } | } | ||||
| return values | return values | ||||
| } | } | ||||
| func valuesComment(m map[int64]*Comment) []*Comment { | |||||
| var values = make([]*Comment, 0, len(m)) | |||||
| for _, v := range m { | |||||
| values = append(values, v) | |||||
| } | |||||
| return values | |||||
| } | |||||
| func valuesCloudbrain(m map[int64]*Cloudbrain) []*Cloudbrain { | |||||
| var values = make([]*Cloudbrain, 0, len(m)) | |||||
| for _, v := range m { | |||||
| values = append(values, v) | |||||
| } | |||||
| return values | |||||
| } | |||||
| @@ -0,0 +1,184 @@ | |||||
| package models | |||||
| import ( | |||||
| "code.gitea.io/gitea/modules/timeutil" | |||||
| "xorm.io/builder" | |||||
| ) | |||||
| type LimitType string | |||||
| const ( | |||||
| LimitTypeTask LimitType = "TASK" | |||||
| LimitTypeRewardPoint LimitType = "REWARD_POINT" | |||||
| ) | |||||
| func (l LimitType) Name() string { | |||||
| switch l { | |||||
| case LimitTypeTask: | |||||
| return "TASK" | |||||
| case LimitTypeRewardPoint: | |||||
| return "REWARD_POINT" | |||||
| default: | |||||
| return "" | |||||
| } | |||||
| } | |||||
| type LimitScope string | |||||
| const ( | |||||
| LimitScopeAllUsers LimitScope = "ALL_USERS" | |||||
| LimitScopeSingleUser LimitScope = "SINGLE_USER" | |||||
| ) | |||||
| func (l LimitScope) Name() string { | |||||
| switch l { | |||||
| case LimitScopeAllUsers: | |||||
| return "ALL_USERS" | |||||
| case LimitScopeSingleUser: | |||||
| return "SINGLE_USER" | |||||
| default: | |||||
| return "" | |||||
| } | |||||
| } | |||||
| type LimiterRejectPolicy string | |||||
| const ( | |||||
| JustReject LimiterRejectPolicy = "JUST_REJECT" | |||||
| PermittedOnce LimiterRejectPolicy = "PERMITTED_ONCE" | |||||
| FillUp LimiterRejectPolicy = "FillUp" | |||||
| ) | |||||
| type LimitConfig struct { | |||||
| ID int64 `xorm:"pk autoincr"` | |||||
| Title string | |||||
| RefreshRate string `xorm:"NOT NULL"` | |||||
| Scope string `xorm:"NOT NULL"` | |||||
| LimitNum int64 `xorm:"NOT NULL"` | |||||
| LimitCode string | |||||
| LimitType string `xorm:"NOT NULL"` | |||||
| RelatedId int64 `xorm:"INDEX"` | |||||
| CreatorId int64 `xorm:"NOT NULL"` | |||||
| CreatorName string | |||||
| DeleterId int64 | |||||
| DeleterName string | |||||
| CreatedUnix timeutil.TimeStamp `xorm:"created"` | |||||
| DeletedAt timeutil.TimeStamp `xorm:"deleted"` | |||||
| } | |||||
| type LimitConfigQueryOpts struct { | |||||
| RefreshRate string | |||||
| Scope LimitScope | |||||
| LimitCode string | |||||
| LimitType LimitType | |||||
| } | |||||
| type LimitConfigVO struct { | |||||
| ID int64 | |||||
| Title string | |||||
| RefreshRate string | |||||
| Scope string | |||||
| LimitNum int64 | |||||
| LimitCode string | |||||
| LimitType string | |||||
| Creator string | |||||
| CreatedUnix timeutil.TimeStamp | |||||
| } | |||||
| func (l *LimitConfig) ToLimitConfigVO() *LimitConfigVO { | |||||
| return &LimitConfigVO{ | |||||
| ID: l.ID, | |||||
| Title: l.Title, | |||||
| RefreshRate: l.RefreshRate, | |||||
| Scope: l.Scope, | |||||
| LimitNum: l.LimitNum, | |||||
| LimitCode: l.LimitCode, | |||||
| LimitType: l.LimitType, | |||||
| Creator: l.CreatorName, | |||||
| CreatedUnix: l.CreatedUnix, | |||||
| } | |||||
| } | |||||
| func GetLimitConfigByLimitType(limitType LimitType) ([]LimitConfig, error) { | |||||
| r := make([]LimitConfig, 0) | |||||
| err := x.Where(" limit_type = ?", limitType.Name()).Find(&r) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } else if len(r) == 0 { | |||||
| return nil, ErrRecordNotExist{} | |||||
| } | |||||
| return r, nil | |||||
| } | |||||
| func GetLimitersByRelatedIdWithDeleted(limitType LimitType) ([]LimitConfig, error) { | |||||
| r := make([]LimitConfig, 0) | |||||
| err := x.Unscoped().Where(" = ?", limitType.Name()).Find(&r) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } else if len(r) == 0 { | |||||
| return nil, ErrRecordNotExist{} | |||||
| } | |||||
| return r, nil | |||||
| } | |||||
| func AddLimitConfig(l *LimitConfig) error { | |||||
| sess := x.NewSession() | |||||
| defer sess.Close() | |||||
| //delete old limit config | |||||
| cond := builder.NewCond() | |||||
| cond = cond.And(builder.Eq{"limit_type": l.LimitType}) | |||||
| cond = cond.And(builder.Eq{"scope": l.Scope}) | |||||
| if l.LimitCode == "" { | |||||
| subCond := builder.NewCond() | |||||
| subCond = subCond.Or(builder.IsNull{"limit_code"}) | |||||
| subCond = subCond.Or(builder.Eq{"limit_code": ""}) | |||||
| cond = cond.And(subCond) | |||||
| } else { | |||||
| cond = cond.And(builder.Eq{"limit_code": l.LimitCode}) | |||||
| } | |||||
| _, err := sess.Where(cond).Delete(&LimitConfig{}) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return err | |||||
| } | |||||
| //add new config | |||||
| _, err = sess.Insert(l) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return err | |||||
| } | |||||
| sess.Commit() | |||||
| return nil | |||||
| } | |||||
| func DeleteLimitConfig(config LimitConfig, deleterId int64, deleterName string) error { | |||||
| sess := x.NewSession() | |||||
| defer sess.Close() | |||||
| _, err := x.ID(config.ID).Update(&LimitConfig{DeleterName: deleterName, DeleterId: deleterId}) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return err | |||||
| } | |||||
| _, err = x.ID(config.ID).Delete(&LimitConfig{}) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return err | |||||
| } | |||||
| sess.Commit() | |||||
| return nil | |||||
| } | |||||
| func GetLimitConfigById(id int64) (*LimitConfig, error) { | |||||
| r := &LimitConfig{} | |||||
| isOk, err := x.ID(id).Get(r) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } else if !isOk { | |||||
| return nil, nil | |||||
| } | |||||
| return r, nil | |||||
| } | |||||
| @@ -144,12 +144,21 @@ func init() { | |||||
| new(WechatBindLog), | new(WechatBindLog), | ||||
| new(OrgStatistic), | new(OrgStatistic), | ||||
| new(SearchRecord), | new(SearchRecord), | ||||
| new(TaskConfig), | |||||
| new(TaskAccomplishLog), | |||||
| new(RewardOperateRecord), | |||||
| new(LimitConfig), | |||||
| new(RewardPeriodicTask), | |||||
| new(PointAccountLog), | |||||
| new(PointAccount), | |||||
| new(RewardAdminLog), | |||||
| new(AiModelConvert), | new(AiModelConvert), | ||||
| new(ResourceQueue), | new(ResourceQueue), | ||||
| new(ResourceSpecification), | new(ResourceSpecification), | ||||
| new(ResourceScene), | new(ResourceScene), | ||||
| new(ResourceSceneSpec), | new(ResourceSceneSpec), | ||||
| new(AdminOperateLog), | new(AdminOperateLog), | ||||
| new(CloudbrainSpec), | |||||
| new(CloudbrainTemp), | new(CloudbrainTemp), | ||||
| new(DatasetReference), | new(DatasetReference), | ||||
| ) | ) | ||||
| @@ -0,0 +1,142 @@ | |||||
| package models | |||||
| import ( | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| "code.gitea.io/gitea/modules/timeutil" | |||||
| ) | |||||
| type PointAccountStatus int | |||||
| // Possible PointAccountStatus types. | |||||
| const ( | |||||
| PointAccountNormal int = iota + 1 // 1 | |||||
| PointAccountFreeze // 2 | |||||
| PointAccountDeleted // 3 | |||||
| ) | |||||
| type PointAccount struct { | |||||
| ID int64 `xorm:"pk autoincr"` | |||||
| AccountCode string `xorm:"INDEX NOT NULL"` | |||||
| Balance int64 `xorm:"NOT NULL DEFAULT 0"` | |||||
| TotalEarned int64 `xorm:"NOT NULL DEFAULT 0"` | |||||
| TotalConsumed int64 `xorm:"NOT NULL DEFAULT 0"` | |||||
| UserId int64 `xorm:"INDEX NOT NULL"` | |||||
| Status int `xorm:"NOT NULL"` | |||||
| Version int64 `xorm:"NOT NULL"` | |||||
| CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` | |||||
| UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` | |||||
| } | |||||
| func (account *PointAccount) Increase(amount int64, sourceId string) error { | |||||
| sess := x.NewSession() | |||||
| defer sess.Close() | |||||
| sql := "update point_account set balance = balance + ?,total_earned = total_earned + ? ,version = version + 1 where account_code = ? " | |||||
| _, err := sess.Exec(sql, amount, amount, account.AccountCode) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return err | |||||
| } | |||||
| accountLog := &PointAccountLog{ | |||||
| AccountCode: account.AccountCode, | |||||
| UserId: account.UserId, | |||||
| Type: IncreaseAccountBalance, | |||||
| SourceId: sourceId, | |||||
| PointsAmount: amount, | |||||
| BalanceBefore: account.Balance, | |||||
| BalanceAfter: account.Balance + amount, | |||||
| AccountVersion: account.Version, | |||||
| } | |||||
| _, err = sess.Insert(accountLog) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return err | |||||
| } | |||||
| sess.Commit() | |||||
| return nil | |||||
| } | |||||
| func (account *PointAccount) Decrease(amount int64, sourceId string) error { | |||||
| sess := x.NewSession() | |||||
| defer sess.Close() | |||||
| sql := "update point_account set balance = balance - ?,total_consumed = total_consumed + ? ,version = version + 1 where account_code = ? " | |||||
| _, err := sess.Exec(sql, amount, amount, account.AccountCode) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return err | |||||
| } | |||||
| accountLog := &PointAccountLog{ | |||||
| AccountCode: account.AccountCode, | |||||
| UserId: account.UserId, | |||||
| Type: DecreaseAccountBalance, | |||||
| SourceId: sourceId, | |||||
| PointsAmount: amount, | |||||
| BalanceBefore: account.Balance, | |||||
| BalanceAfter: account.Balance - amount, | |||||
| AccountVersion: account.Version, | |||||
| } | |||||
| _, err = sess.Insert(accountLog) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return err | |||||
| } | |||||
| sess.Commit() | |||||
| return nil | |||||
| } | |||||
| func GetAccountByUserId(userId int64) (*PointAccount, error) { | |||||
| p := &PointAccount{} | |||||
| has, err := x.Where("user_id = ?", userId).Get(p) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if !has { | |||||
| return nil, ErrRecordNotExist{} | |||||
| } | |||||
| return p, nil | |||||
| } | |||||
| func InsertAccount(tl *PointAccount) (int64, error) { | |||||
| return x.Insert(tl) | |||||
| } | |||||
| type SearchPointAccountOpts struct { | |||||
| ListOptions | |||||
| Keyword string | |||||
| } | |||||
| type SearchPointAccountResponse struct { | |||||
| Records []*UserPointAccount | |||||
| PageSize int | |||||
| Page int | |||||
| Total int64 | |||||
| } | |||||
| type UserPointAccount struct { | |||||
| UserId int64 | |||||
| UserName string | |||||
| Email string | |||||
| Balance int64 | |||||
| TotalEarned int64 | |||||
| TotalConsumed int64 | |||||
| } | |||||
| func (UserPointAccount) TableName() string { | |||||
| return "user" | |||||
| } | |||||
| func GetPointAccountMapByUserIds(userIds []int64) (map[int64]*PointAccount, error) { | |||||
| if len(userIds) == 0 { | |||||
| return make(map[int64]*PointAccount, 0), nil | |||||
| } | |||||
| accounts := make([]*PointAccount, 0) | |||||
| err := x.In("user_id", userIds).Find(&accounts) | |||||
| if err != nil { | |||||
| log.Error("GetPointAccountMapByUserIds error.%v", err) | |||||
| return nil, err | |||||
| } | |||||
| accountMap := make(map[int64]*PointAccount, 0) | |||||
| for _, v := range accounts { | |||||
| accountMap[v.UserId] = v | |||||
| } | |||||
| return accountMap, nil | |||||
| } | |||||
| @@ -0,0 +1,21 @@ | |||||
| package models | |||||
| import "code.gitea.io/gitea/modules/timeutil" | |||||
| const ( | |||||
| IncreaseAccountBalance = "increase" | |||||
| DecreaseAccountBalance = "decrease" | |||||
| ) | |||||
| type PointAccountLog struct { | |||||
| ID int64 `xorm:"pk autoincr"` | |||||
| AccountCode string `xorm:"INDEX NOT NULL"` | |||||
| UserId int64 `xorm:"INDEX NOT NULL"` | |||||
| Type string `xorm:"NOT NULL"` | |||||
| SourceId string `xorm:"INDEX NOT NULL"` | |||||
| PointsAmount int64 `xorm:"NOT NULL"` | |||||
| BalanceBefore int64 `xorm:"NOT NULL"` | |||||
| BalanceAfter int64 `xorm:"NOT NULL"` | |||||
| AccountVersion int64 `xorm:"NOT NULL"` | |||||
| CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` | |||||
| } | |||||
| @@ -237,6 +237,12 @@ type Repository struct { | |||||
| LowerAlias string `xorm:"INDEX"` | LowerAlias string `xorm:"INDEX"` | ||||
| } | } | ||||
| type RepositoryShow struct { | |||||
| Name string | |||||
| RepoType RepoType | |||||
| Alias string | |||||
| } | |||||
| // SanitizedOriginalURL returns a sanitized OriginalURL | // SanitizedOriginalURL returns a sanitized OriginalURL | ||||
| func (repo *Repository) SanitizedOriginalURL() string { | func (repo *Repository) SanitizedOriginalURL() string { | ||||
| if repo.OriginalURL == "" { | if repo.OriginalURL == "" { | ||||
| @@ -25,6 +25,7 @@ const ( | |||||
| ) | ) | ||||
| var ActionChan = make(chan *Action, 200) | var ActionChan = make(chan *Action, 200) | ||||
| var ActionChan4Task = make(chan Action, 200) | |||||
| // Watch is connection request for receiving repository notification. | // Watch is connection request for receiving repository notification. | ||||
| type Watch struct { | type Watch struct { | ||||
| @@ -199,6 +200,14 @@ func notifyWatchers(e Engine, actions ...*Action) error { | |||||
| if _, err = e.InsertOne(act); err != nil { | if _, err = e.InsertOne(act); err != nil { | ||||
| return fmt.Errorf("insert new actioner: %v", err) | return fmt.Errorf("insert new actioner: %v", err) | ||||
| } | } | ||||
| // After InsertOne(act),the act has ID | |||||
| // Send the act to task chan | |||||
| ActionChan4Task <- *act | |||||
| // If it has nothing to do with repo, return directly | |||||
| if act.Repo == nil && act.RepoID == 0 { | |||||
| return nil | |||||
| } | |||||
| if repoChanged { | if repoChanged { | ||||
| act.loadRepo() | act.loadRepo() | ||||
| @@ -279,7 +288,6 @@ func notifyWatchers(e Engine, actions ...*Action) error { | |||||
| // NotifyWatchers creates batch of actions for every watcher. | // NotifyWatchers creates batch of actions for every watcher. | ||||
| func NotifyWatchers(actions ...*Action) error { | func NotifyWatchers(actions ...*Action) error { | ||||
| error := notifyWatchers(x, actions...) | error := notifyWatchers(x, actions...) | ||||
| producer(actions...) | producer(actions...) | ||||
| return error | return error | ||||
| @@ -287,7 +295,7 @@ func NotifyWatchers(actions ...*Action) error { | |||||
| func producer(actions ...*Action) { | func producer(actions ...*Action) { | ||||
| for _, action := range actions { | for _, action := range actions { | ||||
| if !action.IsPrivate{ | |||||
| if !action.IsPrivate { | |||||
| ActionChan <- action | ActionChan <- action | ||||
| } | } | ||||
| } | } | ||||
| @@ -71,6 +71,8 @@ func (r ResourceQueueReq) ToDTO() ResourceQueue { | |||||
| q.AiCenterName = "云脑一" | q.AiCenterName = "云脑一" | ||||
| } else if r.AiCenterCode == AICenterOfCloudBrainTwo { | } else if r.AiCenterCode == AICenterOfCloudBrainTwo { | ||||
| q.AiCenterName = "云脑二" | q.AiCenterName = "云脑二" | ||||
| } else if r.AiCenterCode == AICenterOfChengdu { | |||||
| q.AiCenterName = "启智成都智算" | |||||
| } | } | ||||
| } | } | ||||
| return q | return q | ||||
| @@ -2,6 +2,7 @@ package models | |||||
| import ( | import ( | ||||
| "code.gitea.io/gitea/modules/timeutil" | "code.gitea.io/gitea/modules/timeutil" | ||||
| "fmt" | |||||
| "xorm.io/builder" | "xorm.io/builder" | ||||
| ) | ) | ||||
| @@ -22,6 +23,7 @@ type ResourceSpecification struct { | |||||
| ShareMemGiB float32 | ShareMemGiB float32 | ||||
| UnitPrice int | UnitPrice int | ||||
| Status int | Status int | ||||
| IsAvailable bool | |||||
| IsAutomaticSync bool | IsAutomaticSync bool | ||||
| CreatedTime timeutil.TimeStamp `xorm:"created"` | CreatedTime timeutil.TimeStamp `xorm:"created"` | ||||
| CreatedBy int64 | CreatedBy int64 | ||||
| @@ -40,6 +42,7 @@ func (r ResourceSpecification) ConvertToRes() *ResourceSpecificationRes { | |||||
| GPUMemGiB: r.GPUMemGiB, | GPUMemGiB: r.GPUMemGiB, | ||||
| UnitPrice: r.UnitPrice, | UnitPrice: r.UnitPrice, | ||||
| Status: r.Status, | Status: r.Status, | ||||
| IsAvailable: r.IsAvailable, | |||||
| UpdatedTime: r.UpdatedTime, | UpdatedTime: r.UpdatedTime, | ||||
| } | } | ||||
| } | } | ||||
| @@ -72,14 +75,16 @@ func (r ResourceSpecificationReq) ToDTO() ResourceSpecification { | |||||
| IsAutomaticSync: r.IsAutomaticSync, | IsAutomaticSync: r.IsAutomaticSync, | ||||
| CreatedBy: r.CreatorId, | CreatedBy: r.CreatorId, | ||||
| UpdatedBy: r.CreatorId, | UpdatedBy: r.CreatorId, | ||||
| IsAvailable: true, | |||||
| } | } | ||||
| } | } | ||||
| type SearchResourceSpecificationOptions struct { | type SearchResourceSpecificationOptions struct { | ||||
| ListOptions | ListOptions | ||||
| QueueId int64 | |||||
| Status int | |||||
| Cluster string | |||||
| QueueId int64 | |||||
| Status int | |||||
| Cluster string | |||||
| AvailableCode int | |||||
| } | } | ||||
| type SearchResourceBriefSpecificationOptions struct { | type SearchResourceBriefSpecificationOptions struct { | ||||
| @@ -113,6 +118,7 @@ type ResourceSpecificationRes struct { | |||||
| ShareMemGiB float32 | ShareMemGiB float32 | ||||
| UnitPrice int | UnitPrice int | ||||
| Status int | Status int | ||||
| IsAvailable bool | |||||
| UpdatedTime timeutil.TimeStamp | UpdatedTime timeutil.TimeStamp | ||||
| } | } | ||||
| @@ -141,6 +147,53 @@ func (r ResourceSpecAndQueue) ConvertToRes() *ResourceSpecAndQueueRes { | |||||
| } | } | ||||
| } | } | ||||
| type FindSpecsOptions struct { | |||||
| JobType JobType | |||||
| ComputeResource string | |||||
| Cluster string | |||||
| AiCenterCode string | |||||
| SpecId int64 | |||||
| QueueCode string | |||||
| SourceSpecId string | |||||
| AccCardsNum int | |||||
| UseAccCardsNum bool | |||||
| AccCardType string | |||||
| CpuCores int | |||||
| UseCpuCores bool | |||||
| MemGiB float32 | |||||
| UseMemGiB bool | |||||
| GPUMemGiB float32 | |||||
| UseGPUMemGiB bool | |||||
| ShareMemGiB float32 | |||||
| UseShareMemGiB bool | |||||
| //if true,find specs no matter used or not used in scene. if false,only find specs used in scene | |||||
| RequestAll bool | |||||
| } | |||||
| type Specification struct { | |||||
| ID int64 | |||||
| SourceSpecId string | |||||
| AccCardsNum int | |||||
| AccCardType string | |||||
| CpuCores int | |||||
| MemGiB float32 | |||||
| GPUMemGiB float32 | |||||
| ShareMemGiB float32 | |||||
| ComputeResource string | |||||
| UnitPrice int | |||||
| QueueId int64 | |||||
| QueueCode string | |||||
| Cluster string | |||||
| AiCenterCode string | |||||
| AiCenterName string | |||||
| IsExclusive bool | |||||
| ExclusiveOrg string | |||||
| } | |||||
| func (Specification) TableName() string { | |||||
| return "resource_specification" | |||||
| } | |||||
| func InsertResourceSpecification(r ResourceSpecification) (int64, error) { | func InsertResourceSpecification(r ResourceSpecification) (int64, error) { | ||||
| return x.Insert(&r) | return x.Insert(&r) | ||||
| } | } | ||||
| @@ -167,6 +220,11 @@ func SearchResourceSpecification(opts SearchResourceSpecificationOptions) (int64 | |||||
| if opts.Cluster != "" { | if opts.Cluster != "" { | ||||
| cond = cond.And(builder.Eq{"resource_queue.cluster": opts.Cluster}) | cond = cond.And(builder.Eq{"resource_queue.cluster": opts.Cluster}) | ||||
| } | } | ||||
| if opts.AvailableCode == 1 { | |||||
| cond = cond.And(builder.Eq{"resource_specification.is_available": true}) | |||||
| } else if opts.AvailableCode == 2 { | |||||
| cond = cond.And(builder.Eq{"resource_specification.is_available": false}) | |||||
| } | |||||
| //cond = cond.And(builder.Or(builder.Eq{"resource_queue.deleted_time": 0}).Or(builder.IsNull{"resource_queue.deleted_time"})) | //cond = cond.And(builder.Or(builder.Eq{"resource_queue.deleted_time": 0}).Or(builder.IsNull{"resource_queue.deleted_time"})) | ||||
| n, err := x.Where(cond).Join("INNER", "resource_queue", "resource_queue.ID = resource_specification.queue_id"). | n, err := x.Where(cond).Join("INNER", "resource_queue", "resource_queue.ID = resource_specification.queue_id"). | ||||
| Unscoped().Count(&ResourceSpecAndQueue{}) | Unscoped().Count(&ResourceSpecAndQueue{}) | ||||
| @@ -256,7 +314,7 @@ func SyncGrampusSpecs(updateList []ResourceSpecification, insertList []ResourceS | |||||
| return err | return err | ||||
| } | } | ||||
| if len(deleteIds) > 0 { | if len(deleteIds) > 0 { | ||||
| if _, err = sess.In("id", deleteIds).Update(&ResourceSpecification{Status: SpecOffShelf}); err != nil { | |||||
| if _, err = sess.Cols("status", "is_available").In("id", deleteIds).Update(&ResourceSpecification{Status: SpecOffShelf, IsAvailable: false}); err != nil { | |||||
| return err | return err | ||||
| } | } | ||||
| if _, err = sess.In("spec_id", deleteIds).Delete(&ResourceSceneSpec{}); err != nil { | if _, err = sess.In("spec_id", deleteIds).Delete(&ResourceSceneSpec{}); err != nil { | ||||
| @@ -267,7 +325,7 @@ func SyncGrampusSpecs(updateList []ResourceSpecification, insertList []ResourceS | |||||
| //update exists specs | //update exists specs | ||||
| if len(updateList) > 0 { | if len(updateList) > 0 { | ||||
| for _, v := range updateList { | for _, v := range updateList { | ||||
| if _, err = sess.ID(v.ID).Update(&v); err != nil { | |||||
| if _, err = sess.ID(v.ID).UseBool("is_available").Update(&v); err != nil { | |||||
| return err | return err | ||||
| } | } | ||||
| } | } | ||||
| @@ -283,3 +341,221 @@ func SyncGrampusSpecs(updateList []ResourceSpecification, insertList []ResourceS | |||||
| return sess.Commit() | return sess.Commit() | ||||
| } | } | ||||
| //FindSpecs | |||||
| func FindSpecs(opts FindSpecsOptions) ([]*Specification, error) { | |||||
| var cond = builder.NewCond() | |||||
| if !opts.RequestAll && opts.JobType != "" { | |||||
| cond = cond.And(builder.Eq{"resource_scene.job_type": opts.JobType}) | |||||
| } | |||||
| if opts.ComputeResource != "" { | |||||
| cond = cond.And(builder.Eq{"resource_queue.compute_resource": opts.ComputeResource}) | |||||
| } | |||||
| if opts.Cluster != "" { | |||||
| cond = cond.And(builder.Eq{"resource_queue.cluster": opts.Cluster}) | |||||
| } | |||||
| if opts.AiCenterCode != "" { | |||||
| cond = cond.And(builder.Eq{"resource_queue.ai_center_code": opts.AiCenterCode}) | |||||
| } | |||||
| if opts.SpecId > 0 { | |||||
| cond = cond.And(builder.Eq{"resource_specification.id": opts.SpecId}) | |||||
| } | |||||
| if opts.QueueCode != "" { | |||||
| cond = cond.And(builder.Eq{"resource_queue.queue_code": opts.QueueCode}) | |||||
| } | |||||
| if opts.SourceSpecId != "" { | |||||
| cond = cond.And(builder.Eq{"resource_specification.source_spec_id": opts.SourceSpecId}) | |||||
| } | |||||
| if opts.UseAccCardsNum { | |||||
| cond = cond.And(builder.Eq{"resource_specification.acc_cards_num": opts.AccCardsNum}) | |||||
| } | |||||
| if opts.AccCardType != "" { | |||||
| cond = cond.And(builder.Eq{"resource_queue.acc_card_type": opts.AccCardType}) | |||||
| } | |||||
| if opts.UseCpuCores { | |||||
| cond = cond.And(builder.Eq{"resource_specification.cpu_cores": opts.CpuCores}) | |||||
| } | |||||
| if opts.UseMemGiB { | |||||
| cond = cond.And(builder.Eq{"resource_specification.mem_gi_b": opts.MemGiB}) | |||||
| } | |||||
| if opts.UseGPUMemGiB { | |||||
| cond = cond.And(builder.Eq{"resource_specification.gpu_mem_gi_b": opts.GPUMemGiB}) | |||||
| } | |||||
| if opts.UseShareMemGiB { | |||||
| cond = cond.And(builder.Eq{"resource_specification.share_mem_gi_b": opts.ShareMemGiB}) | |||||
| } | |||||
| r := make([]*Specification, 0) | |||||
| s := x.Where(cond). | |||||
| Join("INNER", "resource_queue", "resource_queue.id = resource_specification.queue_id") | |||||
| if !opts.RequestAll { | |||||
| s = s.Join("INNER", "resource_scene_spec", "resource_scene_spec.spec_id = resource_specification.id"). | |||||
| Join("INNER", "resource_scene", "resource_scene_spec.scene_id = resource_scene.id") | |||||
| } | |||||
| err := s.OrderBy("resource_queue.compute_resource asc,resource_queue.acc_card_type asc,resource_specification.acc_cards_num asc,resource_specification.cpu_cores asc,resource_specification.mem_gi_b asc,resource_specification.share_mem_gi_b asc"). | |||||
| Unscoped().Find(&r) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return r, nil | |||||
| } | |||||
| func InitQueueAndSpec(queue ResourceQueue, spec ResourceSpecification) (*Specification, error) { | |||||
| sess := x.NewSession() | |||||
| defer sess.Close() | |||||
| sess.Begin() | |||||
| param := ResourceQueue{ | |||||
| QueueCode: queue.QueueCode, | |||||
| Cluster: queue.Cluster, | |||||
| AiCenterCode: queue.AiCenterCode, | |||||
| ComputeResource: queue.ComputeResource, | |||||
| AccCardType: queue.AccCardType, | |||||
| } | |||||
| _, err := sess.Get(¶m) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return nil, err | |||||
| } | |||||
| if param.ID == 0 { | |||||
| _, err = sess.InsertOne(&queue) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return nil, err | |||||
| } | |||||
| } else { | |||||
| queue = param | |||||
| } | |||||
| spec.QueueId = queue.ID | |||||
| _, err = sess.InsertOne(&spec) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return nil, err | |||||
| } | |||||
| sess.Commit() | |||||
| return BuildSpecification(queue, spec), nil | |||||
| } | |||||
| func BuildSpecification(queue ResourceQueue, spec ResourceSpecification) *Specification { | |||||
| return &Specification{ | |||||
| ID: spec.ID, | |||||
| SourceSpecId: spec.SourceSpecId, | |||||
| AccCardsNum: spec.AccCardsNum, | |||||
| AccCardType: queue.AccCardType, | |||||
| CpuCores: spec.CpuCores, | |||||
| MemGiB: spec.MemGiB, | |||||
| GPUMemGiB: spec.GPUMemGiB, | |||||
| ShareMemGiB: spec.ShareMemGiB, | |||||
| ComputeResource: queue.ComputeResource, | |||||
| UnitPrice: spec.UnitPrice, | |||||
| QueueId: queue.ID, | |||||
| QueueCode: queue.QueueCode, | |||||
| Cluster: queue.Cluster, | |||||
| AiCenterCode: queue.AiCenterCode, | |||||
| AiCenterName: queue.AiCenterName, | |||||
| } | |||||
| } | |||||
| func GetCloudbrainOneAccCardType(queueCode string) string { | |||||
| switch queueCode { | |||||
| case "a100": | |||||
| return "A100" | |||||
| case "openidebug": | |||||
| return "T4" | |||||
| case "openidgx": | |||||
| return "V100" | |||||
| } | |||||
| return "" | |||||
| } | |||||
| var cloudbrainTwoSpecsInitFlag = false | |||||
| var cloudbrainTwoSpecs map[string]*Specification | |||||
| func GetCloudbrainTwoSpecs() (map[string]*Specification, error) { | |||||
| if !cloudbrainTwoSpecsInitFlag { | |||||
| r, err := InitCloudbrainTwoSpecs() | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| cloudbrainTwoSpecsInitFlag = true | |||||
| cloudbrainTwoSpecs = r | |||||
| } | |||||
| return cloudbrainTwoSpecs, nil | |||||
| } | |||||
| func InitCloudbrainTwoSpecs() (map[string]*Specification, error) { | |||||
| r := make(map[string]*Specification, 0) | |||||
| queue, err := GetResourceQueue(&ResourceQueue{QueueCode: "openisupport"}) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if queue == nil { | |||||
| queue = &ResourceQueue{ | |||||
| QueueCode: "openisupport", | |||||
| Cluster: OpenICluster, | |||||
| AiCenterCode: AICenterOfCloudBrainTwo, | |||||
| AiCenterName: "云脑二", | |||||
| ComputeResource: NPU, | |||||
| AccCardType: "ASCEND910", | |||||
| Remark: "处理历史云脑任务时自动生成", | |||||
| } | |||||
| _, err = x.InsertOne(queue) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| } | |||||
| for i := 1; i <= 8; i = i * 2 { | |||||
| sourceSpecId := "modelarts.bm.910.arm.public." + fmt.Sprint(i) | |||||
| spec, err := GetResourceSpecification(&ResourceSpecification{ | |||||
| SourceSpecId: sourceSpecId, | |||||
| QueueId: queue.ID, | |||||
| }) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if spec == nil { | |||||
| spec = &ResourceSpecification{ | |||||
| QueueId: queue.ID, | |||||
| SourceSpecId: sourceSpecId, | |||||
| AccCardsNum: i, | |||||
| CpuCores: i * 24, | |||||
| MemGiB: float32(i * 256), | |||||
| GPUMemGiB: float32(32), | |||||
| Status: SpecOffShelf, | |||||
| IsAvailable: true, | |||||
| } | |||||
| _, err = x.Insert(spec) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| } | |||||
| r[sourceSpecId] = BuildSpecification(*queue, *spec) | |||||
| } | |||||
| return r, nil | |||||
| } | |||||
| var grampusSpecsInitFlag = false | |||||
| var grampusSpecs map[string]*Specification | |||||
| func GetGrampusSpecs() (map[string]*Specification, error) { | |||||
| if !grampusSpecsInitFlag { | |||||
| specMap := make(map[string]*Specification, 0) | |||||
| r, err := FindSpecs(FindSpecsOptions{ | |||||
| Cluster: C2NetCluster, | |||||
| RequestAll: true, | |||||
| }) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| for _, spec := range r { | |||||
| specMap[spec.SourceSpecId] = spec | |||||
| specMap[spec.SourceSpecId+"_"+spec.AiCenterCode] = spec | |||||
| } | |||||
| grampusSpecsInitFlag = true | |||||
| grampusSpecs = specMap | |||||
| } | |||||
| return grampusSpecs, nil | |||||
| } | |||||
| @@ -0,0 +1,79 @@ | |||||
| package models | |||||
| import ( | |||||
| "code.gitea.io/gitea/modules/timeutil" | |||||
| ) | |||||
| const ( | |||||
| RewardAdminLogProcessing = 1 | |||||
| RewardAdminLogSuccess = 2 | |||||
| RewardAdminLogFailed = 3 | |||||
| ) | |||||
| type RewardAdminLog struct { | |||||
| ID int64 `xorm:"pk autoincr"` | |||||
| LogId string `xorm:"INDEX NOT NULL"` | |||||
| Amount int64 `xorm:"NOT NULL"` | |||||
| RewardType string | |||||
| Remark string | |||||
| Status int | |||||
| TargetUserId int64 `xorm:"INDEX NOT NULL"` | |||||
| CreatorId int64 `xorm:"NOT NULL"` | |||||
| CreatorName string | |||||
| CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` | |||||
| } | |||||
| func (r *RewardAdminLog) ToShow() *RewardAdminLogShow { | |||||
| return &RewardAdminLogShow{ | |||||
| CreatorName: r.CreatorName, | |||||
| } | |||||
| } | |||||
| type RewardAdminLogShow struct { | |||||
| CreatorName string | |||||
| } | |||||
| type AdminLogAndUser struct { | |||||
| AdminRewardAdminLog RewardAdminLog `xorm:"extends"` | |||||
| User User `xorm:"extends"` | |||||
| } | |||||
| func getRewardAdminLog(ra *RewardAdminLog) (*RewardAdminLog, error) { | |||||
| has, err := x.Get(ra) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } else if !has { | |||||
| return nil, ErrRecordNotExist{} | |||||
| } | |||||
| return ra, nil | |||||
| } | |||||
| func InsertRewardAdminLog(ra *RewardAdminLog) (int64, error) { | |||||
| return x.Insert(ra) | |||||
| } | |||||
| func UpdateRewardAdminLogStatus(logId string, oldStatus, newStatus int) error { | |||||
| _, err := x.Where("log_id = ? and status = ?", logId, oldStatus).Update(&RewardAdminLog{Status: newStatus}) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| return nil | |||||
| } | |||||
| func GetRewardAdminLogByLogIds(logIds []string) ([]*RewardAdminLog, error) { | |||||
| if len(logIds) == 0 { | |||||
| return nil, nil | |||||
| } | |||||
| adminLogs := make([]*AdminLogAndUser, 0) | |||||
| err := x.Table("reward_admin_log").Join("LEFT", "user", "reward_admin_log.creator_id = public.user.id").In("reward_admin_log.log_id", logIds).Find(&adminLogs) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| r := make([]*RewardAdminLog, len(adminLogs)) | |||||
| for i, v := range adminLogs { | |||||
| temp := &v.AdminRewardAdminLog | |||||
| temp.CreatorName = v.User.Name | |||||
| r[i] = temp | |||||
| } | |||||
| return r, nil | |||||
| } | |||||
| @@ -0,0 +1,459 @@ | |||||
| package models | |||||
| import ( | |||||
| "code.gitea.io/gitea/modules/timeutil" | |||||
| "fmt" | |||||
| "strconv" | |||||
| "strings" | |||||
| "xorm.io/builder" | |||||
| ) | |||||
| type SourceType string | |||||
| const ( | |||||
| SourceTypeAccomplishTask SourceType = "ACCOMPLISH_TASK" | |||||
| SourceTypeAdminOperate SourceType = "ADMIN_OPERATE" | |||||
| SourceTypeRunCloudbrainTask SourceType = "RUN_CLOUDBRAIN_TASK" | |||||
| ) | |||||
| func (r SourceType) Name() string { | |||||
| switch r { | |||||
| case SourceTypeAccomplishTask: | |||||
| return "ACCOMPLISH_TASK" | |||||
| case SourceTypeAdminOperate: | |||||
| return "ADMIN_OPERATE" | |||||
| case SourceTypeRunCloudbrainTask: | |||||
| return "RUN_CLOUDBRAIN_TASK" | |||||
| default: | |||||
| return "" | |||||
| } | |||||
| } | |||||
| type RewardType string | |||||
| const ( | |||||
| RewardTypePoint RewardType = "POINT" | |||||
| ) | |||||
| func (r RewardType) Name() string { | |||||
| switch r { | |||||
| case RewardTypePoint: | |||||
| return "POINT" | |||||
| default: | |||||
| return "" | |||||
| } | |||||
| } | |||||
| func (r RewardType) Show() string { | |||||
| switch r { | |||||
| case RewardTypePoint: | |||||
| return "积分" | |||||
| default: | |||||
| return "" | |||||
| } | |||||
| } | |||||
| func GetRewardTypeInstance(s string) RewardType { | |||||
| switch s { | |||||
| case RewardTypePoint.Name(): | |||||
| return RewardTypePoint | |||||
| default: | |||||
| return "" | |||||
| } | |||||
| } | |||||
| type RewardOperateType string | |||||
| func (r RewardOperateType) Name() string { | |||||
| switch r { | |||||
| case OperateTypeIncrease: | |||||
| return "INCREASE" | |||||
| case OperateTypeDecrease: | |||||
| return "DECREASE" | |||||
| default: | |||||
| return "" | |||||
| } | |||||
| } | |||||
| func (r RewardOperateType) Show() string { | |||||
| switch r { | |||||
| case OperateTypeIncrease: | |||||
| return "奖励" | |||||
| case OperateTypeDecrease: | |||||
| return "扣减" | |||||
| default: | |||||
| return "" | |||||
| } | |||||
| } | |||||
| func GetRewardOperateTypeInstance(s string) RewardOperateType { | |||||
| switch s { | |||||
| case OperateTypeIncrease.Name(): | |||||
| return OperateTypeIncrease | |||||
| case OperateTypeDecrease.Name(): | |||||
| return OperateTypeDecrease | |||||
| default: | |||||
| return "" | |||||
| } | |||||
| } | |||||
| const ( | |||||
| OperateTypeIncrease RewardOperateType = "INCREASE" | |||||
| OperateTypeDecrease RewardOperateType = "DECREASE" | |||||
| OperateTypeNull RewardOperateType = "NIL" | |||||
| ) | |||||
| const ( | |||||
| OperateStatusOperating = "OPERATING" | |||||
| OperateStatusSucceeded = "SUCCEEDED" | |||||
| OperateStatusFailed = "FAILED" | |||||
| ) | |||||
| const Semicolon = ";" | |||||
| type RewardOperateOrderBy string | |||||
| const ( | |||||
| RewardOrderByIDDesc RewardOperateOrderBy = "reward_operate_record.id desc" | |||||
| ) | |||||
| type RewardRecordList []*RewardOperateRecord | |||||
| type RewardRecordShowList []*RewardOperateRecordShow | |||||
| func (l RewardRecordShowList) loadAttribute(isAdmin bool) { | |||||
| l.loadAction() | |||||
| l.loadCloudbrain() | |||||
| if isAdmin { | |||||
| l.loadAdminLog() | |||||
| } | |||||
| } | |||||
| func (l RewardRecordShowList) loadAction() error { | |||||
| if len(l) == 0 { | |||||
| return nil | |||||
| } | |||||
| actionIds := make([]int64, 0) | |||||
| actionIdMap := make(map[int64]*RewardOperateRecordShow, 0) | |||||
| for _, r := range l { | |||||
| if r.SourceType != SourceTypeAccomplishTask.Name() { | |||||
| continue | |||||
| } | |||||
| i, _ := strconv.ParseInt(r.SourceId, 10, 64) | |||||
| actionIds = append(actionIds, i) | |||||
| actionIdMap[i] = r | |||||
| } | |||||
| actions, err := GetActionByIds(actionIds) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| for _, v := range actions { | |||||
| actionIdMap[v.ID].Action = v.ToShow() | |||||
| } | |||||
| return nil | |||||
| } | |||||
| func (l RewardRecordShowList) loadCloudbrain() error { | |||||
| if len(l) == 0 { | |||||
| return nil | |||||
| } | |||||
| cloudbrainIds := make([]int64, 0) | |||||
| cloudbrainMap := make(map[int64]*RewardOperateRecordShow, 0) | |||||
| for _, r := range l { | |||||
| if r.SourceType != SourceTypeRunCloudbrainTask.Name() { | |||||
| continue | |||||
| } | |||||
| i, _ := strconv.ParseInt(r.SourceId, 10, 64) | |||||
| cloudbrainIds = append(cloudbrainIds, i) | |||||
| cloudbrainMap[i] = r | |||||
| } | |||||
| cloudbrains, err := GetCloudbrainByIds(cloudbrainIds) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| var repoIds []int64 | |||||
| var taskIds []int64 | |||||
| for _, task := range cloudbrains { | |||||
| repoIds = append(repoIds, task.RepoID) | |||||
| taskIds = append(taskIds, task.ID) | |||||
| } | |||||
| repositoryMap, err := GetRepositoriesMapByIDs(repoIds) | |||||
| specMap, err := GetResourceSpecMapByCloudbrainIDs(taskIds) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| for _, v := range cloudbrains { | |||||
| v.Repo = repositoryMap[v.RepoID] | |||||
| v.Spec = specMap[v.ID] | |||||
| cloudbrainMap[v.ID].Cloudbrain = v.ToShow() | |||||
| } | |||||
| return nil | |||||
| } | |||||
| func (l RewardRecordShowList) loadAdminLog() error { | |||||
| if len(l) == 0 { | |||||
| return nil | |||||
| } | |||||
| logIds := make([]string, 0) | |||||
| logMap := make(map[string]*RewardOperateRecordShow, 0) | |||||
| for _, r := range l { | |||||
| if r.SourceType != SourceTypeAdminOperate.Name() { | |||||
| continue | |||||
| } | |||||
| logIds = append(logIds, r.SourceId) | |||||
| logMap[r.SourceId] = r | |||||
| } | |||||
| adminLogs, err := GetRewardAdminLogByLogIds(logIds) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| for _, v := range adminLogs { | |||||
| logMap[v.LogId].AdminLog = v.ToShow() | |||||
| } | |||||
| return nil | |||||
| } | |||||
| type RewardOperateRecord struct { | |||||
| ID int64 `xorm:"pk autoincr"` | |||||
| SerialNo string `xorm:"INDEX NOT NULL"` | |||||
| UserId int64 `xorm:"INDEX NOT NULL"` | |||||
| Amount int64 `xorm:"NOT NULL"` | |||||
| LossAmount int64 | |||||
| Title string | |||||
| RewardType string `xorm:"NOT NULL"` | |||||
| SourceType string `xorm:"NOT NULL"` | |||||
| SourceId string `xorm:"INDEX NOT NULL"` | |||||
| SourceTemplateId string | |||||
| RequestId string `xorm:"INDEX NOT NULL"` | |||||
| OperateType string `xorm:"NOT NULL"` | |||||
| Status string `xorm:"NOT NULL"` | |||||
| Remark string | |||||
| CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` | |||||
| UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` | |||||
| LastOperateUnix timeutil.TimeStamp `xorm:"INDEX"` | |||||
| } | |||||
| type AdminRewardOperateReq struct { | |||||
| TargetUserId int64 `binding:"Required"` | |||||
| OperateType RewardOperateType `binding:"Required"` | |||||
| Amount int64 `binding:"Required;Range(1,100000)"` | |||||
| Remark string | |||||
| RewardType RewardType | |||||
| } | |||||
| type RewardOperateRecordShow struct { | |||||
| SerialNo string | |||||
| Status string | |||||
| OperateType string | |||||
| SourceId string | |||||
| Amount int64 | |||||
| LossAmount int64 | |||||
| BalanceAfter int64 | |||||
| Remark string | |||||
| SourceType string | |||||
| UserName string | |||||
| LastOperateDate timeutil.TimeStamp | |||||
| UnitPrice int64 | |||||
| SuccessCount int | |||||
| Action *ActionShow | |||||
| Cloudbrain *CloudbrainShow | |||||
| AdminLog *RewardAdminLogShow | |||||
| } | |||||
| func getPointOperateRecord(tl *RewardOperateRecord) (*RewardOperateRecord, error) { | |||||
| has, err := x.Get(tl) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } else if !has { | |||||
| return nil, ErrRecordNotExist{} | |||||
| } | |||||
| return tl, nil | |||||
| } | |||||
| func GetPointOperateRecordBySourceTypeAndRequestId(sourceType, requestId, operateType string) (*RewardOperateRecord, error) { | |||||
| t := &RewardOperateRecord{ | |||||
| SourceType: sourceType, | |||||
| RequestId: requestId, | |||||
| OperateType: operateType, | |||||
| } | |||||
| return getPointOperateRecord(t) | |||||
| } | |||||
| func GetPointOperateRecordBySerialNo(serialNo string) (*RewardOperateRecord, error) { | |||||
| t := &RewardOperateRecord{ | |||||
| SerialNo: serialNo, | |||||
| } | |||||
| return getPointOperateRecord(t) | |||||
| } | |||||
| func InsertRewardOperateRecord(tl *RewardOperateRecord) (int64, error) { | |||||
| return x.Insert(tl) | |||||
| } | |||||
| func UpdateRewardRecordToFinalStatus(sourceType, requestId, newStatus string) (int64, error) { | |||||
| r := &RewardOperateRecord{ | |||||
| Status: newStatus, | |||||
| LastOperateUnix: timeutil.TimeStampNow(), | |||||
| } | |||||
| return x.Cols("status", "last_operate_unix").Where("source_type=? and request_id=? and status=?", sourceType, requestId, OperateStatusOperating).Update(r) | |||||
| } | |||||
| func SumRewardAmountInTaskPeriod(rewardType string, sourceType string, userId int64, period *PeriodResult) (int64, error) { | |||||
| var cond = builder.NewCond() | |||||
| if period != nil { | |||||
| cond = cond.And(builder.Gte{"created_unix": period.StartTime.Unix()}) | |||||
| cond = cond.And(builder.Lt{"created_unix": period.EndTime.Unix()}) | |||||
| } | |||||
| if sourceType != "" { | |||||
| cond = cond.And(builder.Eq{"source_type": sourceType}) | |||||
| } | |||||
| cond = cond.And(builder.Eq{"reward_type": rewardType}) | |||||
| cond = cond.And(builder.Eq{"user_id": userId}) | |||||
| return x.Where(cond).SumInt(&RewardOperateRecord{}, "amount") | |||||
| } | |||||
| type RewardOperateContext struct { | |||||
| SourceType SourceType | |||||
| SourceId string | |||||
| SourceTemplateId string | |||||
| Title string | |||||
| Remark string | |||||
| Reward Reward | |||||
| TargetUserId int64 | |||||
| RequestId string | |||||
| OperateType RewardOperateType | |||||
| RejectPolicy LimiterRejectPolicy | |||||
| PermittedNegative bool | |||||
| LossAmount int64 | |||||
| } | |||||
| type Reward struct { | |||||
| Amount int64 | |||||
| Type RewardType | |||||
| } | |||||
| type UserRewardOperationRedis struct { | |||||
| UserId int64 | |||||
| Amount int64 | |||||
| RewardType RewardType | |||||
| OperateType RewardOperateType | |||||
| } | |||||
| type UserRewardOperation struct { | |||||
| UserId int64 | |||||
| Msg string | |||||
| } | |||||
| func AppendRemark(remark, appendStr string) string { | |||||
| return strings.TrimPrefix(remark+Semicolon+appendStr, Semicolon) | |||||
| } | |||||
| type RewardRecordListOpts struct { | |||||
| ListOptions | |||||
| UserId int64 | |||||
| UserName string | |||||
| OperateType RewardOperateType | |||||
| RewardType RewardType | |||||
| SourceType string | |||||
| ActionType int | |||||
| SerialNo string | |||||
| OrderBy RewardOperateOrderBy | |||||
| IsAdmin bool | |||||
| Status string | |||||
| } | |||||
| func (opts *RewardRecordListOpts) toCond() builder.Cond { | |||||
| if opts.Page <= 0 { | |||||
| opts.Page = 1 | |||||
| } | |||||
| if len(opts.OrderBy) == 0 { | |||||
| opts.OrderBy = RewardOrderByIDDesc | |||||
| } | |||||
| cond := builder.NewCond() | |||||
| if opts.UserId > 0 { | |||||
| cond = cond.And(builder.Eq{"reward_operate_record.user_id": opts.UserId}) | |||||
| } | |||||
| if opts.OperateType != OperateTypeNull { | |||||
| cond = cond.And(builder.Eq{"reward_operate_record.operate_type": opts.OperateType.Name()}) | |||||
| } | |||||
| if opts.SourceType != "" { | |||||
| cond = cond.And(builder.Eq{"reward_operate_record.source_type": opts.SourceType}) | |||||
| } | |||||
| if opts.ActionType > 0 { | |||||
| cond = cond.And(builder.Eq{"reward_operate_record.source_template_id": fmt.Sprint(opts.ActionType)}) | |||||
| } | |||||
| if opts.SerialNo != "" { | |||||
| cond = cond.And(builder.Like{"reward_operate_record.serial_no", opts.SerialNo}) | |||||
| } | |||||
| if opts.Status != "" { | |||||
| cond = cond.And(builder.Like{"reward_operate_record.status", opts.Status}) | |||||
| } | |||||
| cond = cond.And(builder.Eq{"reward_operate_record.reward_type": opts.RewardType.Name()}) | |||||
| cond = cond.And(builder.Gt{"reward_operate_record.amount": 0}) | |||||
| return cond | |||||
| } | |||||
| type TestTT struct { | |||||
| SerialNo string | |||||
| UserId int64 | |||||
| Amount int64 | |||||
| UserName string | |||||
| } | |||||
| func GetRewardRecordShowList(opts *RewardRecordListOpts) (RewardRecordShowList, int64, error) { | |||||
| cond := opts.toCond() | |||||
| count, err := x.Where(cond).Count(&RewardOperateRecord{}) | |||||
| if err != nil { | |||||
| return nil, 0, err | |||||
| } | |||||
| r := make([]*RewardOperateRecordShow, 0) | |||||
| err = x.Table("reward_operate_record").Cols("reward_operate_record.source_id", "reward_operate_record.serial_no", | |||||
| "reward_operate_record.status", "reward_operate_record.operate_type", "reward_operate_record.amount", | |||||
| "reward_operate_record.loss_amount", "reward_operate_record.remark", "reward_operate_record.source_type", | |||||
| "reward_operate_record.last_operate_unix as last_operate_date"). | |||||
| Where(cond).Limit(opts.PageSize, (opts.Page-1)*opts.PageSize).OrderBy(string(opts.OrderBy)).Find(&r) | |||||
| if err != nil { | |||||
| return nil, 0, err | |||||
| } | |||||
| RewardRecordShowList(r).loadAttribute(false) | |||||
| return r, count, nil | |||||
| } | |||||
| func GetAdminRewardRecordShowList(opts *RewardRecordListOpts) (RewardRecordShowList, int64, error) { | |||||
| cond := opts.toCond() | |||||
| count, err := x.Where(cond).Count(&RewardOperateRecord{}) | |||||
| if err != nil { | |||||
| return nil, 0, err | |||||
| } | |||||
| r := make([]*RewardOperateRecordShow, 0) | |||||
| switch opts.OperateType { | |||||
| case OperateTypeIncrease: | |||||
| err = x.Table("reward_operate_record").Cols("reward_operate_record.source_id", "reward_operate_record.serial_no", | |||||
| "reward_operate_record.status", "reward_operate_record.operate_type", "reward_operate_record.amount", | |||||
| "reward_operate_record.loss_amount", "reward_operate_record.remark", "reward_operate_record.source_type", | |||||
| "reward_operate_record.last_operate_unix as last_operate_date", "public.user.name as user_name", | |||||
| "point_account_log.balance_after"). | |||||
| Join("LEFT", "public.user", "reward_operate_record.user_id = public.user.id"). | |||||
| Join("LEFT", "point_account_log", " reward_operate_record.serial_no = point_account_log.source_id"). | |||||
| Where(cond).Limit(opts.PageSize, (opts.Page-1)*opts.PageSize).OrderBy(string(opts.OrderBy)).Find(&r) | |||||
| case OperateTypeDecrease: | |||||
| err = x.Table("reward_operate_record").Cols("reward_operate_record.source_id", "reward_operate_record.serial_no", | |||||
| "reward_operate_record.status", "reward_operate_record.operate_type", "reward_operate_record.amount", | |||||
| "reward_operate_record.loss_amount", "reward_operate_record.remark", "reward_operate_record.source_type", | |||||
| "reward_operate_record.last_operate_unix as last_operate_date", "public.user.name as user_name", | |||||
| "reward_periodic_task.amount as unit_price", "reward_periodic_task.success_count"). | |||||
| Join("LEFT", "public.user", "reward_operate_record.user_id = public.user.id"). | |||||
| Join("LEFT", "reward_periodic_task", "reward_operate_record.serial_no = reward_periodic_task.operate_serial_no"). | |||||
| Where(cond).Limit(opts.PageSize, (opts.Page-1)*opts.PageSize).OrderBy(string(opts.OrderBy)).Find(&r) | |||||
| } | |||||
| if err != nil { | |||||
| return nil, 0, err | |||||
| } | |||||
| RewardRecordShowList(r).loadAttribute(true) | |||||
| return r, count, nil | |||||
| } | |||||
| @@ -0,0 +1,115 @@ | |||||
| package models | |||||
| import ( | |||||
| "code.gitea.io/gitea/modules/timeutil" | |||||
| "time" | |||||
| ) | |||||
| type PeriodicTaskStatus int | |||||
| const ( | |||||
| PeriodicTaskStatusRunning = iota + 1 // 1 | |||||
| PeriodicTaskStatusFinished // 2 | |||||
| ) | |||||
| type PeriodType string | |||||
| const ( | |||||
| PeriodType30MinutesFree1HourCost PeriodType = "30MF1HC" | |||||
| ) | |||||
| func (r PeriodType) Name() string { | |||||
| switch r { | |||||
| case PeriodType30MinutesFree1HourCost: | |||||
| return "30MF1HC" | |||||
| default: | |||||
| return "" | |||||
| } | |||||
| } | |||||
| type RewardPeriodicTask struct { | |||||
| ID int64 `xorm:"pk autoincr"` | |||||
| OperateSerialNo string `xorm:"INDEX NOT NULL"` | |||||
| DelaySeconds int64 | |||||
| IntervalSeconds int64 | |||||
| Amount int64 `xorm:"NOT NULL"` | |||||
| NextExecuteTime timeutil.TimeStamp `xorm:"INDEX NOT NULL"` | |||||
| SuccessCount int `xorm:"NOT NULL default 0"` | |||||
| Status int `xorm:"NOT NULL"` | |||||
| CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` | |||||
| FinishedUnix timeutil.TimeStamp `xorm:"INDEX"` | |||||
| UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` | |||||
| } | |||||
| type StartPeriodicTaskOpts struct { | |||||
| SourceType SourceType | |||||
| SourceId string | |||||
| Remark string | |||||
| Title string | |||||
| TargetUserId int64 | |||||
| RequestId string | |||||
| OperateType RewardOperateType | |||||
| Delay time.Duration | |||||
| Interval time.Duration | |||||
| UnitAmount int | |||||
| RewardType RewardType | |||||
| StartTime time.Time | |||||
| } | |||||
| func InsertPeriodicTask(tl *RewardPeriodicTask) (int64, error) { | |||||
| return x.Insert(tl) | |||||
| } | |||||
| func GetRunningRewardTask(now time.Time) ([]RewardPeriodicTask, error) { | |||||
| r := make([]RewardPeriodicTask, 0) | |||||
| err := x.Where("next_execute_time <= ? and status = ?", now.Unix(), PeriodicTaskStatusRunning).Find(&r) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return r, err | |||||
| } | |||||
| func IncrRewardTaskSuccessCount(t RewardPeriodicTask, count int64, nextTime timeutil.TimeStamp) error { | |||||
| sess := x.NewSession() | |||||
| defer sess.Close() | |||||
| _, err := sess.Exec("update reward_periodic_task set success_count = success_count + ? , next_execute_time = ?, updated_unix = ? where id = ?", count, nextTime, timeutil.TimeStampNow(), t.ID) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return err | |||||
| } | |||||
| _, err = sess.Exec("update reward_operate_record set amount = amount + ? ,updated_unix = ? ,last_operate_unix = ? where serial_no = ?", t.Amount, timeutil.TimeStampNow(), timeutil.TimeStampNow(), t.OperateSerialNo) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return err | |||||
| } | |||||
| sess.Commit() | |||||
| return nil | |||||
| } | |||||
| func GetPeriodicTaskBySourceIdAndType(sourceType SourceType, sourceId string, operateType RewardOperateType) (*RewardPeriodicTask, error) { | |||||
| r := RewardPeriodicTask{} | |||||
| _, err := x.SQL("select rpt.* from reward_periodic_task rpt "+ | |||||
| "inner join reward_operate_record ror on rpt.operate_serial_no = ror.serial_no"+ | |||||
| " where ror.source_type = ? and ror.source_id = ? and ror.operate_type = ? ", sourceType.Name(), sourceId, operateType.Name()).Get(&r) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return &r, nil | |||||
| } | |||||
| func StopPeriodicTask(taskId int64, operateSerialNo string, stopTime time.Time) error { | |||||
| sess := x.NewSession() | |||||
| defer sess.Close() | |||||
| _, err := sess.Where("id = ? and status = ?", taskId, PeriodicTaskStatusRunning).Update(&RewardPeriodicTask{Status: PeriodicTaskStatusFinished, FinishedUnix: timeutil.TimeStamp(stopTime.Unix())}) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return err | |||||
| } | |||||
| _, err = sess.Where("serial_no = ? and status = ?", operateSerialNo, OperateStatusOperating).Update(&RewardOperateRecord{Status: OperateStatusSucceeded}) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return err | |||||
| } | |||||
| sess.Commit() | |||||
| return nil | |||||
| } | |||||
| @@ -0,0 +1,44 @@ | |||||
| package models | |||||
| import ( | |||||
| "code.gitea.io/gitea/modules/timeutil" | |||||
| "time" | |||||
| ) | |||||
| type TaskAccomplishLog struct { | |||||
| ID int64 `xorm:"pk autoincr"` | |||||
| ConfigId int64 `xorm:"NOT NULL"` | |||||
| TaskCode string `xorm:"NOT NULL"` | |||||
| UserId int64 `xorm:"INDEX NOT NULL"` | |||||
| ActionId int64 | |||||
| CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` | |||||
| } | |||||
| type PeriodResult struct { | |||||
| StartTime time.Time | |||||
| EndTime time.Time | |||||
| LeftTime time.Duration | |||||
| } | |||||
| func getTaskAccomplishLog(tl *TaskAccomplishLog) (*TaskAccomplishLog, error) { | |||||
| has, err := x.Get(tl) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } else if !has { | |||||
| return nil, ErrRecordNotExist{} | |||||
| } | |||||
| return tl, nil | |||||
| } | |||||
| func CountTaskAccomplishLogInTaskPeriod(taskCode string, userId int64, period *PeriodResult) (int64, error) { | |||||
| if period == nil { | |||||
| return x.Where("task_code = ? and user_id = ?", taskCode, userId).Count(&TaskAccomplishLog{}) | |||||
| } else { | |||||
| return x.Where("task_code = ? and user_id = ? and created_unix >= ? and created_unix < ? ", taskCode, userId, period.StartTime.Unix(), period.EndTime.Unix()).Count(&TaskAccomplishLog{}) | |||||
| } | |||||
| } | |||||
| func InsertTaskAccomplishLog(tl *TaskAccomplishLog) (int64, error) { | |||||
| return x.Insert(tl) | |||||
| } | |||||
| @@ -0,0 +1,302 @@ | |||||
| package models | |||||
| import ( | |||||
| "code.gitea.io/gitea/modules/timeutil" | |||||
| "fmt" | |||||
| "xorm.io/builder" | |||||
| ) | |||||
| const ( | |||||
| PeriodNotCycle = "NOT_CYCLE" | |||||
| PeriodDaily = "DAILY" | |||||
| ) | |||||
| //PointTaskConfig Only add and delete are allowed, edit is not allowed | |||||
| //so if you want to edit config for some task code,please delete first and add new one | |||||
| type TaskConfig struct { | |||||
| ID int64 `xorm:"pk autoincr"` | |||||
| TaskCode string `xorm:"NOT NULL"` | |||||
| Title string | |||||
| AwardType string `xorm:"NOT NULL"` | |||||
| AwardAmount int64 `xorm:"NOT NULL"` | |||||
| CreatorId int64 `xorm:"NOT NULL"` | |||||
| CreatorName string | |||||
| CreatedUnix timeutil.TimeStamp `xorm:"created"` | |||||
| DeletedAt timeutil.TimeStamp `xorm:"deleted"` | |||||
| DeleterId int64 | |||||
| DeleterName string | |||||
| } | |||||
| type TaskConfigWithLimit struct { | |||||
| ID int64 | |||||
| TaskCode string | |||||
| Title string | |||||
| AwardType string | |||||
| AwardAmount int64 | |||||
| Creator string | |||||
| IsDeleted bool | |||||
| CreatedUnix timeutil.TimeStamp | |||||
| DeleteAt timeutil.TimeStamp | |||||
| Limiters []*LimitConfigVO | |||||
| } | |||||
| type TaskConfigWithLimitResponse struct { | |||||
| Records []*TaskConfigWithSingleLimit | |||||
| Total int64 | |||||
| PageSize int | |||||
| Page int | |||||
| } | |||||
| type TaskConfigWithSingleLimit struct { | |||||
| ID int64 | |||||
| TaskCode string | |||||
| AwardType string | |||||
| AwardAmount int64 | |||||
| Creator string | |||||
| IsDeleted bool | |||||
| CreatedUnix timeutil.TimeStamp | |||||
| DeleteAt timeutil.TimeStamp | |||||
| RefreshRate string | |||||
| LimitNum int64 | |||||
| } | |||||
| type TaskAndLimiterConfig struct { | |||||
| TaskConfig TaskConfig `xorm:"extends"` | |||||
| LimitConfig LimitConfig `xorm:"extends"` | |||||
| } | |||||
| func (TaskAndLimiterConfig) TableName() string { | |||||
| return "task_config" | |||||
| } | |||||
| type BatchLimitConfigVO struct { | |||||
| ConfigList []TaskConfigWithLimit | |||||
| } | |||||
| func getTaskConfig(t *TaskConfig) (*TaskConfig, error) { | |||||
| has, err := x.Get(t) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } else if !has { | |||||
| return nil, ErrRecordNotExist{} | |||||
| } | |||||
| return t, nil | |||||
| } | |||||
| func GetTaskConfigByTaskCode(taskCode string) (*TaskConfig, error) { | |||||
| t := &TaskConfig{ | |||||
| TaskCode: taskCode, | |||||
| } | |||||
| return getTaskConfig(t) | |||||
| } | |||||
| func GetTaskConfigByID(id int64) (*TaskConfig, error) { | |||||
| t := &TaskConfig{ | |||||
| ID: id, | |||||
| } | |||||
| return getTaskConfig(t) | |||||
| } | |||||
| func GetTaskConfigList() ([]*TaskConfig, error) { | |||||
| r := make([]*TaskConfig, 0) | |||||
| err := x.Find(&r) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if len(r) == 0 { | |||||
| return nil, ErrRecordNotExist{} | |||||
| } | |||||
| return r, nil | |||||
| } | |||||
| type GetTaskConfigOpts struct { | |||||
| ListOptions | |||||
| Status int //1 normal 2 deleted | |||||
| ActionType int | |||||
| } | |||||
| func GetTaskConfigPageWithDeleted(opt GetTaskConfigOpts) ([]*TaskAndLimiterConfig, int64, error) { | |||||
| if opt.Page <= 0 { | |||||
| opt.Page = 1 | |||||
| } | |||||
| cond := builder.NewCond() | |||||
| if opt.ActionType > 0 { | |||||
| cond = cond.And(builder.Eq{"task_code": fmt.Sprint(opt.ActionType)}) | |||||
| } | |||||
| var count int64 | |||||
| var err error | |||||
| if opt.Status == 1 { | |||||
| subCond := builder.NewCond() | |||||
| subCond = subCond.Or(builder.IsNull{"task_config.deleted_at"}) | |||||
| subCond = subCond.Or(builder.Eq{"task_config.deleted_at": 0}) | |||||
| cond = cond.And(subCond) | |||||
| } else if opt.Status == 2 { | |||||
| cond = cond.And(builder.Gt{"task_config.deleted_at": 0}) | |||||
| } | |||||
| count, err = x.Unscoped().Where(cond).Count(&TaskConfig{}) | |||||
| if err != nil { | |||||
| return nil, 0, err | |||||
| } | |||||
| r := make([]*TaskAndLimiterConfig, 0) | |||||
| err = x.Join("LEFT", "limit_config", "task_config.id = limit_config.related_id"). | |||||
| Unscoped().Where(cond).Limit(opt.PageSize, (opt.Page-1)*opt.PageSize). | |||||
| OrderBy("task_config.deleted_at desc,task_config.id desc").Find(&r) | |||||
| if len(r) == 0 { | |||||
| return nil, 0, ErrRecordNotExist{} | |||||
| } | |||||
| return r, count, nil | |||||
| } | |||||
| func EditTaskConfig(config TaskConfigWithLimit, doer *User) error { | |||||
| sess := x.NewSession() | |||||
| defer sess.Close() | |||||
| //delete old task config | |||||
| p := &TaskConfig{ | |||||
| ID: config.ID, | |||||
| } | |||||
| _, err := sess.Delete(p) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return err | |||||
| } | |||||
| //update deleter | |||||
| p.DeleterId = doer.ID | |||||
| p.DeleterName = doer.Name | |||||
| sess.Where("id = ?", config.ID).Unscoped().Update(p) | |||||
| //add new config | |||||
| t := &TaskConfig{ | |||||
| TaskCode: config.TaskCode, | |||||
| Title: config.Title, | |||||
| AwardType: config.AwardType, | |||||
| AwardAmount: config.AwardAmount, | |||||
| CreatorId: doer.ID, | |||||
| CreatorName: doer.Name, | |||||
| } | |||||
| _, err = sess.InsertOne(t) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return err | |||||
| } | |||||
| //delete old limiter config | |||||
| lp := &LimitConfig{ | |||||
| RelatedId: config.ID, | |||||
| } | |||||
| _, err = sess.Delete(lp) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return err | |||||
| } | |||||
| lp.DeleterName = doer.Name | |||||
| lp.DeleterId = doer.ID | |||||
| //update deleter | |||||
| sess.Where("related_id = ?", config.ID).Unscoped().Update(lp) | |||||
| //add new limiter config | |||||
| if config.Limiters != nil && len(config.Limiters) > 0 { | |||||
| for _, v := range config.Limiters { | |||||
| //add new config | |||||
| l := &LimitConfig{ | |||||
| Title: v.Title, | |||||
| RefreshRate: v.RefreshRate, | |||||
| Scope: v.Scope, | |||||
| LimitNum: v.LimitNum, | |||||
| LimitCode: config.TaskCode, | |||||
| LimitType: LimitTypeTask.Name(), | |||||
| CreatorId: doer.ID, | |||||
| CreatorName: doer.Name, | |||||
| RelatedId: t.ID, | |||||
| } | |||||
| _, err = sess.Insert(l) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return err | |||||
| } | |||||
| } | |||||
| } | |||||
| sess.Commit() | |||||
| return nil | |||||
| } | |||||
| func NewTaskConfig(config TaskConfigWithLimit, doer *User) error { | |||||
| sess := x.NewSession() | |||||
| defer sess.Close() | |||||
| //add new config | |||||
| t := &TaskConfig{ | |||||
| TaskCode: config.TaskCode, | |||||
| Title: config.Title, | |||||
| AwardType: config.AwardType, | |||||
| AwardAmount: config.AwardAmount, | |||||
| CreatorId: doer.ID, | |||||
| CreatorName: doer.Name, | |||||
| } | |||||
| _, err := sess.InsertOne(t) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return err | |||||
| } | |||||
| //add new limiter config | |||||
| if config.Limiters != nil && len(config.Limiters) > 0 { | |||||
| for _, v := range config.Limiters { | |||||
| //add new config | |||||
| l := &LimitConfig{ | |||||
| RelatedId: t.ID, | |||||
| Title: v.Title, | |||||
| RefreshRate: v.RefreshRate, | |||||
| Scope: v.Scope, | |||||
| LimitNum: v.LimitNum, | |||||
| LimitCode: config.TaskCode, | |||||
| LimitType: LimitTypeTask.Name(), | |||||
| CreatorId: doer.ID, | |||||
| CreatorName: doer.Name, | |||||
| } | |||||
| _, err = sess.Insert(l) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return err | |||||
| } | |||||
| } | |||||
| } | |||||
| sess.Commit() | |||||
| return nil | |||||
| } | |||||
| func DelTaskConfig(id int64, doer *User) error { | |||||
| sess := x.NewSession() | |||||
| defer sess.Close() | |||||
| //delete old task config | |||||
| p := &TaskConfig{ | |||||
| ID: id, | |||||
| } | |||||
| _, err := sess.Delete(p) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return err | |||||
| } | |||||
| //update deleter | |||||
| p.DeleterId = doer.ID | |||||
| p.DeleterName = doer.Name | |||||
| sess.Where("id = ?", id).Unscoped().Update(p) | |||||
| //delete old limiter config | |||||
| lp := &LimitConfig{ | |||||
| RelatedId: id, | |||||
| } | |||||
| _, err = sess.Delete(lp) | |||||
| if err != nil { | |||||
| sess.Rollback() | |||||
| return err | |||||
| } | |||||
| lp.DeleterName = doer.Name | |||||
| lp.DeleterId = doer.ID | |||||
| //update deleter | |||||
| sess.Where("related_id = ?", id).Unscoped().Update(lp) | |||||
| sess.Commit() | |||||
| return nil | |||||
| } | |||||
| @@ -188,6 +188,10 @@ type User struct { | |||||
| PhoneNumber string `xorm:"VARCHAR(255)"` | PhoneNumber string `xorm:"VARCHAR(255)"` | ||||
| } | } | ||||
| type UserShow struct { | |||||
| Name string | |||||
| } | |||||
| // SearchOrganizationsOptions options to filter organizations | // SearchOrganizationsOptions options to filter organizations | ||||
| type SearchOrganizationsOptions struct { | type SearchOrganizationsOptions struct { | ||||
| ListOptions | ListOptions | ||||
| @@ -96,3 +96,7 @@ func UnbindWechatOpenId(userId int64, oldWechatOpenID string) error { | |||||
| sess.Insert(logParam) | sess.Insert(logParam) | ||||
| return sess.Commit() | return sess.Commit() | ||||
| } | } | ||||
| func CountWechatBindLog(wechatOpenId string, action WechatBindAction) (int64, error) { | |||||
| return x.Where("wechat_open_id = ? and action = ?", action, wechatOpenId).Count(&WechatBindLog{}) | |||||
| } | |||||
| @@ -24,6 +24,7 @@ type CreateCloudBrainForm struct { | |||||
| Params string `form:"run_para_list"` | Params string `form:"run_para_list"` | ||||
| BranchName string `form:"branch_name"` | BranchName string `form:"branch_name"` | ||||
| DatasetName string `form:"dataset_name"` | DatasetName string `form:"dataset_name"` | ||||
| SpecId int64 `form:"spec_id"` | |||||
| } | } | ||||
| type CommitImageCloudBrainForm struct { | type CommitImageCloudBrainForm struct { | ||||
| @@ -72,6 +73,7 @@ type CreateCloudBrainInferencForm struct { | |||||
| CkptName string `form:"ckpt_name" binding:"Required"` | CkptName string `form:"ckpt_name" binding:"Required"` | ||||
| LabelName string `form:"label_names" binding:"Required"` | LabelName string `form:"label_names" binding:"Required"` | ||||
| DatasetName string `form:"dataset_name"` | DatasetName string `form:"dataset_name"` | ||||
| SpecId int64 `form:"spec_id"` | |||||
| } | } | ||||
| func (f *CreateCloudBrainForm) Validate(ctx *macaron.Context, errs binding.Errors) binding.Errors { | func (f *CreateCloudBrainForm) Validate(ctx *macaron.Context, errs binding.Errors) binding.Errors { | ||||
| @@ -11,15 +11,14 @@ type CreateGrampusTrainJobForm struct { | |||||
| Attachment string `form:"attachment" binding:"Required"` | Attachment string `form:"attachment" binding:"Required"` | ||||
| BootFile string `form:"boot_file" binding:"Required"` | BootFile string `form:"boot_file" binding:"Required"` | ||||
| ImageID string `form:"image_id" binding:"Required"` | ImageID string `form:"image_id" binding:"Required"` | ||||
| FlavorID string `form:"flavor" binding:"Required"` | |||||
| Params string `form:"run_para_list" binding:"Required"` | Params string `form:"run_para_list" binding:"Required"` | ||||
| Description string `form:"description"` | Description string `form:"description"` | ||||
| BranchName string `form:"branch_name" binding:"Required"` | BranchName string `form:"branch_name" binding:"Required"` | ||||
| FlavorName string `form:"flavor_name" binding:"Required"` | |||||
| EngineName string `form:"engine_name" binding:"Required"` | EngineName string `form:"engine_name" binding:"Required"` | ||||
| WorkServerNumber int `form:"work_server_number" binding:"Required"` | WorkServerNumber int `form:"work_server_number" binding:"Required"` | ||||
| Image string `form:"image"` | Image string `form:"image"` | ||||
| DatasetName string `form:"dataset_name"` | DatasetName string `form:"dataset_name"` | ||||
| SpecId int64 `form:"spec_id"` | |||||
| } | } | ||||
| func (f *CreateGrampusTrainJobForm) Validate(ctx *macaron.Context, errs binding.Errors) binding.Errors { | func (f *CreateGrampusTrainJobForm) Validate(ctx *macaron.Context, errs binding.Errors) binding.Errors { | ||||
| @@ -22,6 +22,7 @@ type CreateModelArtsNotebookForm struct { | |||||
| Description string `form:"description"` | Description string `form:"description"` | ||||
| Flavor string `form:"flavor" binding:"Required"` | Flavor string `form:"flavor" binding:"Required"` | ||||
| ImageId string `form:"image_id" binding:"Required"` | ImageId string `form:"image_id" binding:"Required"` | ||||
| SpecId int64 `form:"spec_id" binding:"Required"` | |||||
| } | } | ||||
| func (f *CreateModelArtsNotebookForm) Validate(ctx *macaron.Context, errs binding.Errors) binding.Errors { | func (f *CreateModelArtsNotebookForm) Validate(ctx *macaron.Context, errs binding.Errors) binding.Errors { | ||||
| @@ -46,6 +47,7 @@ type CreateModelArtsTrainJobForm struct { | |||||
| VersionName string `form:"version_name" binding:"Required"` | VersionName string `form:"version_name" binding:"Required"` | ||||
| FlavorName string `form:"flaver_names" binding:"Required"` | FlavorName string `form:"flaver_names" binding:"Required"` | ||||
| EngineName string `form:"engine_names" binding:"Required"` | EngineName string `form:"engine_names" binding:"Required"` | ||||
| SpecId int64 `form:"spec_id" binding:"Required"` | |||||
| } | } | ||||
| type CreateModelArtsInferenceJobForm struct { | type CreateModelArtsInferenceJobForm struct { | ||||
| @@ -71,6 +73,7 @@ type CreateModelArtsInferenceJobForm struct { | |||||
| ModelName string `form:"model_name" binding:"Required"` | ModelName string `form:"model_name" binding:"Required"` | ||||
| ModelVersion string `form:"model_version" binding:"Required"` | ModelVersion string `form:"model_version" binding:"Required"` | ||||
| CkptName string `form:"ckpt_name" binding:"Required"` | CkptName string `form:"ckpt_name" binding:"Required"` | ||||
| SpecId int64 `form:"spec_id" binding:"Required"` | |||||
| } | } | ||||
| func (f *CreateModelArtsTrainJobForm) Validate(ctx *macaron.Context, errs binding.Errors) binding.Errors { | func (f *CreateModelArtsTrainJobForm) Validate(ctx *macaron.Context, errs binding.Errors) binding.Errors { | ||||
| @@ -7,14 +7,12 @@ import ( | |||||
| "time" | "time" | ||||
| ) | ) | ||||
| const EMPTY_REDIS_VAL = "Nil" | |||||
| var accessTokenLock = redis_lock.NewDistributeLock(redis_key.AccessTokenLockKey()) | var accessTokenLock = redis_lock.NewDistributeLock(redis_key.AccessTokenLockKey()) | ||||
| func GetWechatAccessToken() string { | func GetWechatAccessToken() string { | ||||
| token, _ := redis_client.Get(redis_key.WechatAccessTokenKey()) | token, _ := redis_client.Get(redis_key.WechatAccessTokenKey()) | ||||
| if token != "" { | if token != "" { | ||||
| if token == EMPTY_REDIS_VAL { | |||||
| if token == redis_key.EMPTY_REDIS_VAL { | |||||
| return "" | return "" | ||||
| } | } | ||||
| live, _ := redis_client.TTL(redis_key.WechatAccessTokenKey()) | live, _ := redis_client.TTL(redis_key.WechatAccessTokenKey()) | ||||
| @@ -28,18 +26,22 @@ func GetWechatAccessToken() string { | |||||
| } | } | ||||
| func refreshAccessToken() { | func refreshAccessToken() { | ||||
| if ok := accessTokenLock.Lock(3 * time.Second); ok { | |||||
| if ok, _ := accessTokenLock.Lock(3 * time.Second); ok { | |||||
| defer accessTokenLock.UnLock() | defer accessTokenLock.UnLock() | ||||
| callAccessTokenAndUpdateCache() | callAccessTokenAndUpdateCache() | ||||
| } | } | ||||
| } | } | ||||
| func refreshAndGetAccessToken() string { | func refreshAndGetAccessToken() string { | ||||
| if ok := accessTokenLock.LockWithWait(3*time.Second, 3*time.Second); ok { | |||||
| isOk, err := accessTokenLock.LockWithWait(3*time.Second, 3*time.Second) | |||||
| if err != nil { | |||||
| return "" | |||||
| } | |||||
| if isOk { | |||||
| defer accessTokenLock.UnLock() | defer accessTokenLock.UnLock() | ||||
| token, _ := redis_client.Get(redis_key.WechatAccessTokenKey()) | token, _ := redis_client.Get(redis_key.WechatAccessTokenKey()) | ||||
| if token != "" { | if token != "" { | ||||
| if token == EMPTY_REDIS_VAL { | |||||
| if token == redis_key.EMPTY_REDIS_VAL { | |||||
| return "" | return "" | ||||
| } | } | ||||
| return token | return token | ||||
| @@ -59,7 +61,7 @@ func callAccessTokenAndUpdateCache() string { | |||||
| } | } | ||||
| if token == "" { | if token == "" { | ||||
| redis_client.Setex(redis_key.WechatAccessTokenKey(), EMPTY_REDIS_VAL, 10*time.Second) | |||||
| redis_client.Setex(redis_key.WechatAccessTokenKey(), redis_key.EMPTY_REDIS_VAL, 10*time.Second) | |||||
| return "" | return "" | ||||
| } | } | ||||
| redis_client.Setex(redis_key.WechatAccessTokenKey(), token, time.Duration(r.Expires_in)*time.Second) | redis_client.Setex(redis_key.WechatAccessTokenKey(), token, time.Duration(r.Expires_in)*time.Second) | ||||
| @@ -38,7 +38,7 @@ func (err WechatBindError) Error() string { | |||||
| } | } | ||||
| func BindWechat(userId int64, wechatOpenId string) error { | func BindWechat(userId int64, wechatOpenId string) error { | ||||
| if !IsWechatAccountAvailable(userId, wechatOpenId) { | |||||
| if !IsWechatAccountUsed(userId, wechatOpenId) { | |||||
| log.Error("bind wechat failed, because user use wrong wechat account to bind,userId=%d wechatOpenId=%s", userId, wechatOpenId) | log.Error("bind wechat failed, because user use wrong wechat account to bind,userId=%d wechatOpenId=%s", userId, wechatOpenId) | ||||
| return NewWechatBindError(BIND_REPLY_WECHAT_ACCOUNT_USED) | return NewWechatBindError(BIND_REPLY_WECHAT_ACCOUNT_USED) | ||||
| } | } | ||||
| @@ -60,9 +60,9 @@ func IsUserAvailableForWechatBind(userId int64, wechatOpenId string) bool { | |||||
| return currentOpenId == "" || currentOpenId == wechatOpenId | return currentOpenId == "" || currentOpenId == wechatOpenId | ||||
| } | } | ||||
| //IsWechatAccountAvailable if wechat account used by another account,return false | |||||
| //IsWechatAccountUsed if wechat account used by another account,return false | |||||
| //if wechat account not used or used by the given user,return true | //if wechat account not used or used by the given user,return true | ||||
| func IsWechatAccountAvailable(userId int64, wechatOpenId string) bool { | |||||
| func IsWechatAccountUsed(userId int64, wechatOpenId string) bool { | |||||
| user := models.GetUserByWechatOpenId(wechatOpenId) | user := models.GetUserByWechatOpenId(wechatOpenId) | ||||
| if user != nil && user.WechatOpenId != "" && user.ID != userId { | if user != nil && user.WechatOpenId != "" && user.ID != userId { | ||||
| return false | return false | ||||
| @@ -95,6 +95,7 @@ func getWechatRestyClient() *resty.Client { | |||||
| func callAccessToken() *AccessTokenResponse { | func callAccessToken() *AccessTokenResponse { | ||||
| client := getWechatRestyClient() | client := getWechatRestyClient() | ||||
| log.Info("start to get wechat access token") | |||||
| var result AccessTokenResponse | var result AccessTokenResponse | ||||
| _, err := client.R(). | _, err := client.R(). | ||||
| SetQueryParam("grant_type", GRANT_TYPE). | SetQueryParam("grant_type", GRANT_TYPE). | ||||
| @@ -106,6 +107,7 @@ func callAccessToken() *AccessTokenResponse { | |||||
| log.Error("get wechat access token failed,e=%v", err) | log.Error("get wechat access token failed,e=%v", err) | ||||
| return nil | return nil | ||||
| } | } | ||||
| log.Info("get wechat access token result=%v", result) | |||||
| return &result | return &result | ||||
| } | } | ||||
| @@ -62,7 +62,7 @@ type CloudbrainStopMsg struct { | |||||
| func (CloudbrainStopMsg) Data(ctx *TemplateContext) *DefaultWechatTemplate { | func (CloudbrainStopMsg) Data(ctx *TemplateContext) *DefaultWechatTemplate { | ||||
| return &DefaultWechatTemplate{ | return &DefaultWechatTemplate{ | ||||
| First: TemplateValue{Value: setting.CloudbrainStoppedTitle}, | |||||
| First: TemplateValue{Value: fmt.Sprintf(setting.CloudbrainStoppedTitle, ctx.Cloudbrain.Status)}, | |||||
| Keyword1: TemplateValue{Value: ctx.Cloudbrain.DisplayJobName}, | Keyword1: TemplateValue{Value: ctx.Cloudbrain.DisplayJobName}, | ||||
| Keyword2: TemplateValue{Value: getJobTypeDisplayName(ctx.Cloudbrain.JobType)}, | Keyword2: TemplateValue{Value: getJobTypeDisplayName(ctx.Cloudbrain.JobType)}, | ||||
| Keyword3: TemplateValue{Value: time.Unix(int64(ctx.Cloudbrain.CreatedUnix), 0).Format("2006-01-02 15:04:05")}, | Keyword3: TemplateValue{Value: time.Unix(int64(ctx.Cloudbrain.CreatedUnix), 0).Format("2006-01-02 15:04:05")}, | ||||
| @@ -20,7 +20,7 @@ import ( | |||||
| const ( | const ( | ||||
| //Command = `pip3 install jupyterlab==2.2.5 -i https://pypi.tuna.tsinghua.edu.cn/simple;service ssh stop;jupyter lab --no-browser --ip=0.0.0.0 --allow-root --notebook-dir="/code" --port=80 --LabApp.token="" --LabApp.allow_origin="self https://cloudbrain.pcl.ac.cn"` | //Command = `pip3 install jupyterlab==2.2.5 -i https://pypi.tuna.tsinghua.edu.cn/simple;service ssh stop;jupyter lab --no-browser --ip=0.0.0.0 --allow-root --notebook-dir="/code" --port=80 --LabApp.token="" --LabApp.allow_origin="self https://cloudbrain.pcl.ac.cn"` | ||||
| //CommandBenchmark = `echo "start benchmark";python /code/test.py;echo "end benchmark"` | //CommandBenchmark = `echo "start benchmark";python /code/test.py;echo "end benchmark"` | ||||
| CommandBenchmark = `echo "start benchmark";cd /benchmark && bash run_bk.sh | tee /model/benchmark-log.txt;echo "end benchmark"` | |||||
| CommandBenchmark = `cd /benchmark && bash run_bk.sh >/model/benchmark-log.txt` | |||||
| CodeMountPath = "/code" | CodeMountPath = "/code" | ||||
| DataSetMountPath = "/dataset" | DataSetMountPath = "/dataset" | ||||
| ModelMountPath = "/model" | ModelMountPath = "/model" | ||||
| @@ -30,8 +30,8 @@ const ( | |||||
| Snn4imagenetMountPath = "/snn4imagenet" | Snn4imagenetMountPath = "/snn4imagenet" | ||||
| BrainScoreMountPath = "/brainscore" | BrainScoreMountPath = "/brainscore" | ||||
| TaskInfoName = "/taskInfo" | TaskInfoName = "/taskInfo" | ||||
| Snn4imagenetCommand = `/opt/conda/bin/python /snn4imagenet/testSNN_script.py --modelname '%s' --modelpath '/dataset' --modeldescription '%s' | tee /model/benchmark-log.txt` | |||||
| BrainScoreCommand = `bash /brainscore/brainscore_test_par4shSrcipt.sh -b '%s' -n '%s' -p '/dataset' -d '%s' | tee /model/benchmark-log.txt` | |||||
| Snn4imagenetCommand = `/opt/conda/bin/python /snn4imagenet/testSNN_script.py --modelname '%s' --modelpath '/dataset' --modeldescription '%s' >/model/benchmark-log.txt` | |||||
| BrainScoreCommand = `bash /brainscore/brainscore_test_par4shSrcipt.sh -b '%s' -n '%s' -p '/dataset' -d '%s' >/model/benchmark-log.txt` | |||||
| SubTaskName = "task1" | SubTaskName = "task1" | ||||
| @@ -61,7 +61,6 @@ type GenerateCloudBrainTaskReq struct { | |||||
| Snn4ImageNetPath string | Snn4ImageNetPath string | ||||
| BrainScorePath string | BrainScorePath string | ||||
| JobType string | JobType string | ||||
| GpuQueue string | |||||
| Description string | Description string | ||||
| BranchName string | BranchName string | ||||
| BootFile string | BootFile string | ||||
| @@ -72,13 +71,13 @@ type GenerateCloudBrainTaskReq struct { | |||||
| DatasetInfos map[string]models.DatasetInfo | DatasetInfos map[string]models.DatasetInfo | ||||
| BenchmarkTypeID int | BenchmarkTypeID int | ||||
| BenchmarkChildTypeID int | BenchmarkChildTypeID int | ||||
| ResourceSpecId int | |||||
| ResultPath string | ResultPath string | ||||
| TrainUrl string | TrainUrl string | ||||
| ModelName string | ModelName string | ||||
| ModelVersion string | ModelVersion string | ||||
| CkptName string | CkptName string | ||||
| LabelName string | LabelName string | ||||
| Spec *models.Specification | |||||
| } | } | ||||
| func GetCloudbrainDebugCommand() string { | func GetCloudbrainDebugCommand() string { | ||||
| @@ -227,50 +226,9 @@ func AdminOrImageCreaterRight(ctx *context.Context) { | |||||
| } | } | ||||
| func GenerateTask(req GenerateCloudBrainTaskReq) error { | func GenerateTask(req GenerateCloudBrainTaskReq) error { | ||||
| var resourceSpec *models.ResourceSpec | |||||
| var versionCount int | var versionCount int | ||||
| if req.JobType == string(models.JobTypeTrain) { | if req.JobType == string(models.JobTypeTrain) { | ||||
| versionCount = 1 | versionCount = 1 | ||||
| if TrainResourceSpecs == nil { | |||||
| json.Unmarshal([]byte(setting.TrainResourceSpecs), &TrainResourceSpecs) | |||||
| } | |||||
| for _, spec := range TrainResourceSpecs.ResourceSpec { | |||||
| if req.ResourceSpecId == spec.Id { | |||||
| resourceSpec = spec | |||||
| break | |||||
| } | |||||
| } | |||||
| } else if req.JobType == string(models.JobTypeInference) { | |||||
| if InferenceResourceSpecs == nil { | |||||
| json.Unmarshal([]byte(setting.InferenceResourceSpecs), &InferenceResourceSpecs) | |||||
| } | |||||
| for _, spec := range InferenceResourceSpecs.ResourceSpec { | |||||
| if req.ResourceSpecId == spec.Id { | |||||
| resourceSpec = spec | |||||
| break | |||||
| } | |||||
| } | |||||
| } else { | |||||
| if ResourceSpecs == nil { | |||||
| json.Unmarshal([]byte(setting.ResourceSpecs), &ResourceSpecs) | |||||
| } | |||||
| for _, spec := range ResourceSpecs.ResourceSpec { | |||||
| if req.ResourceSpecId == spec.Id { | |||||
| resourceSpec = spec | |||||
| break | |||||
| } | |||||
| } | |||||
| } | |||||
| //如果没有匹配到spec信息,尝试从专属资源池获取 | |||||
| if resourceSpec == nil && SpecialPools != nil { | |||||
| resourceSpec = geMatchResourceSpec(req.JobType, req.GpuQueue, req.ResourceSpecId) | |||||
| } | |||||
| if resourceSpec == nil { | |||||
| log.Error("no such resourceSpecId(%d)", req.ResourceSpecId, req.Ctx.Data["MsgID"]) | |||||
| return errors.New("no such resourceSpec") | |||||
| } | } | ||||
| volumes := []models.Volume{ | volumes := []models.Volume{ | ||||
| @@ -342,7 +300,7 @@ func GenerateTask(req GenerateCloudBrainTaskReq) error { | |||||
| jobResult, err := CreateJob(req.JobName, models.CreateJobParams{ | jobResult, err := CreateJob(req.JobName, models.CreateJobParams{ | ||||
| JobName: req.JobName, | JobName: req.JobName, | ||||
| RetryCount: 1, | RetryCount: 1, | ||||
| GpuType: req.GpuQueue, | |||||
| GpuType: req.Spec.QueueCode, | |||||
| Image: req.Image, | Image: req.Image, | ||||
| TaskRoles: []models.TaskRole{ | TaskRoles: []models.TaskRole{ | ||||
| { | { | ||||
| @@ -350,10 +308,10 @@ func GenerateTask(req GenerateCloudBrainTaskReq) error { | |||||
| TaskNumber: 1, | TaskNumber: 1, | ||||
| MinSucceededTaskCount: 1, | MinSucceededTaskCount: 1, | ||||
| MinFailedTaskCount: 1, | MinFailedTaskCount: 1, | ||||
| CPUNumber: resourceSpec.CpuNum, | |||||
| GPUNumber: resourceSpec.GpuNum, | |||||
| MemoryMB: resourceSpec.MemMiB, | |||||
| ShmMB: resourceSpec.ShareMemMiB, | |||||
| CPUNumber: req.Spec.CpuCores, | |||||
| GPUNumber: req.Spec.AccCardsNum, | |||||
| MemoryMB: int(req.Spec.MemGiB * 1024), | |||||
| ShmMB: int(req.Spec.ShareMemGiB * 1024), | |||||
| Command: req.Command, | Command: req.Command, | ||||
| NeedIBDevice: false, | NeedIBDevice: false, | ||||
| IsMainRole: false, | IsMainRole: false, | ||||
| @@ -384,8 +342,7 @@ func GenerateTask(req GenerateCloudBrainTaskReq) error { | |||||
| Type: models.TypeCloudBrainOne, | Type: models.TypeCloudBrainOne, | ||||
| Uuid: req.Uuids, | Uuid: req.Uuids, | ||||
| Image: req.Image, | Image: req.Image, | ||||
| GpuQueue: req.GpuQueue, | |||||
| ResourceSpecId: req.ResourceSpecId, | |||||
| GpuQueue: req.Spec.QueueCode, | |||||
| ComputeResource: models.GPUResource, | ComputeResource: models.GPUResource, | ||||
| BenchmarkTypeID: req.BenchmarkTypeID, | BenchmarkTypeID: req.BenchmarkTypeID, | ||||
| BenchmarkChildTypeID: req.BenchmarkChildTypeID, | BenchmarkChildTypeID: req.BenchmarkChildTypeID, | ||||
| @@ -405,6 +362,7 @@ func GenerateTask(req GenerateCloudBrainTaskReq) error { | |||||
| CreatedUnix: createTime, | CreatedUnix: createTime, | ||||
| UpdatedUnix: createTime, | UpdatedUnix: createTime, | ||||
| CommitID: req.CommitID, | CommitID: req.CommitID, | ||||
| Spec: req.Spec, | |||||
| }) | }) | ||||
| if err != nil { | if err != nil { | ||||
| @@ -416,6 +374,7 @@ func GenerateTask(req GenerateCloudBrainTaskReq) error { | |||||
| log.Error("GetCloudbrainByJobID failed: %v", err.Error()) | log.Error("GetCloudbrainByJobID failed: %v", err.Error()) | ||||
| return err | return err | ||||
| } | } | ||||
| stringId := strconv.FormatInt(task.ID, 10) | stringId := strconv.FormatInt(task.ID, 10) | ||||
| if IsBenchmarkJob(req.JobType) { | if IsBenchmarkJob(req.JobType) { | ||||
| @@ -447,25 +406,7 @@ func GetWaitingCloudbrainCount(cloudbrainType int, computeResource string, jobTy | |||||
| func RestartTask(ctx *context.Context, task *models.Cloudbrain, newID *string) error { | func RestartTask(ctx *context.Context, task *models.Cloudbrain, newID *string) error { | ||||
| jobName := task.JobName | jobName := task.JobName | ||||
| var resourceSpec *models.ResourceSpec | |||||
| if ResourceSpecs == nil { | |||||
| json.Unmarshal([]byte(setting.ResourceSpecs), &ResourceSpecs) | |||||
| } | |||||
| for _, spec := range ResourceSpecs.ResourceSpec { | |||||
| if task.ResourceSpecId == spec.Id { | |||||
| resourceSpec = spec | |||||
| } | |||||
| } | |||||
| //如果没有匹配到spec信息,尝试从专属资源池获取 | |||||
| if resourceSpec == nil && SpecialPools != nil { | |||||
| resourceSpec = geMatchResourceSpec(task.JobType, task.GpuQueue, task.ResourceSpecId) | |||||
| } | |||||
| if resourceSpec == nil { | |||||
| log.Error("no such resourceSpecId(%d)", task.ResourceSpecId, ctx.Data["MsgID"]) | |||||
| return errors.New("no such resourceSpec") | |||||
| } | |||||
| spec := task.Spec | |||||
| var datasetInfos map[string]models.DatasetInfo | var datasetInfos map[string]models.DatasetInfo | ||||
| if task.Uuid != "" { | if task.Uuid != "" { | ||||
| var err error | var err error | ||||
| @@ -547,10 +488,10 @@ func RestartTask(ctx *context.Context, task *models.Cloudbrain, newID *string) e | |||||
| TaskNumber: 1, | TaskNumber: 1, | ||||
| MinSucceededTaskCount: 1, | MinSucceededTaskCount: 1, | ||||
| MinFailedTaskCount: 1, | MinFailedTaskCount: 1, | ||||
| CPUNumber: resourceSpec.CpuNum, | |||||
| GPUNumber: resourceSpec.GpuNum, | |||||
| MemoryMB: resourceSpec.MemMiB, | |||||
| ShmMB: resourceSpec.ShareMemMiB, | |||||
| CPUNumber: spec.CpuCores, | |||||
| GPUNumber: spec.AccCardsNum, | |||||
| MemoryMB: int(spec.MemGiB * 1024), | |||||
| ShmMB: int(spec.ShareMemGiB * 1024), | |||||
| Command: GetCloudbrainDebugCommand(), //Command, | Command: GetCloudbrainDebugCommand(), //Command, | ||||
| NeedIBDevice: false, | NeedIBDevice: false, | ||||
| IsMainRole: false, | IsMainRole: false, | ||||
| @@ -588,6 +529,7 @@ func RestartTask(ctx *context.Context, task *models.Cloudbrain, newID *string) e | |||||
| CreatedUnix: createTime, | CreatedUnix: createTime, | ||||
| UpdatedUnix: createTime, | UpdatedUnix: createTime, | ||||
| BranchName: task.BranchName, | BranchName: task.BranchName, | ||||
| Spec: spec, | |||||
| } | } | ||||
| err = models.RestartCloudbrain(task, newTask) | err = models.RestartCloudbrain(task, newTask) | ||||
| @@ -1,6 +1,7 @@ | |||||
| package cloudbrain | package cloudbrain | ||||
| import ( | import ( | ||||
| "code.gitea.io/gitea/modules/notification" | |||||
| "encoding/json" | "encoding/json" | ||||
| "errors" | "errors" | ||||
| "fmt" | "fmt" | ||||
| @@ -25,10 +26,10 @@ var ( | |||||
| const ( | const ( | ||||
| JobHasBeenStopped = "S410" | JobHasBeenStopped = "S410" | ||||
| errInvalidToken = "S401" | |||||
| Public = "public" | Public = "public" | ||||
| Custom = "custom" | Custom = "custom" | ||||
| LogPageSize = 500 | LogPageSize = 500 | ||||
| errInvalidToken = "S401" | |||||
| LogPageTokenExpired = "5m" | LogPageTokenExpired = "5m" | ||||
| pageSize = 15 | pageSize = 15 | ||||
| QueuesDetailUrl = "/rest-server/api/v2/queuesdetail" | QueuesDetailUrl = "/rest-server/api/v2/queuesdetail" | ||||
| @@ -235,7 +236,7 @@ func getQueryString(page int, size int, name string) string { | |||||
| return fmt.Sprintf("pageIndex=%d&pageSize=%d&name=%s", page, size, name) | return fmt.Sprintf("pageIndex=%d&pageSize=%d&name=%s", page, size, name) | ||||
| } | } | ||||
| func CommitImage(jobID string, params models.CommitImageParams) error { | |||||
| func CommitImage(jobID string, params models.CommitImageParams, doer *models.User) error { | |||||
| imageTag := strings.TrimSpace(params.ImageTag) | imageTag := strings.TrimSpace(params.ImageTag) | ||||
| dbImage, err := models.GetImageByTag(imageTag) | dbImage, err := models.GetImageByTag(imageTag) | ||||
| @@ -340,11 +341,12 @@ sendjob: | |||||
| }) | }) | ||||
| if err == nil { | if err == nil { | ||||
| go updateImageStatus(image, isSetCreatedUnix, createTime) | go updateImageStatus(image, isSetCreatedUnix, createTime) | ||||
| notification.NotifyCreateImage(doer, image) | |||||
| } | } | ||||
| return err | return err | ||||
| } | } | ||||
| func CommitAdminImage(params models.CommitImageParams) error { | |||||
| func CommitAdminImage(params models.CommitImageParams, doer *models.User) error { | |||||
| imageTag := strings.TrimSpace(params.ImageTag) | imageTag := strings.TrimSpace(params.ImageTag) | ||||
| exist, err := models.IsImageExist(imageTag) | exist, err := models.IsImageExist(imageTag) | ||||
| @@ -381,6 +383,9 @@ func CommitAdminImage(params models.CommitImageParams) error { | |||||
| } | } | ||||
| return nil | return nil | ||||
| }) | }) | ||||
| if err == nil { | |||||
| notification.NotifyCreateImage(doer, image) | |||||
| } | |||||
| return err | return err | ||||
| } | } | ||||
| @@ -0,0 +1,21 @@ | |||||
| package context | |||||
| import ( | |||||
| "code.gitea.io/gitea/modules/setting" | |||||
| "code.gitea.io/gitea/services/reward/point/account" | |||||
| "gitea.com/macaron/macaron" | |||||
| ) | |||||
| // PointAccount returns a macaron to get request user's point account | |||||
| func PointAccount() macaron.Handler { | |||||
| return func(ctx *Context) { | |||||
| a, err := account.GetAccount(ctx.User.ID) | |||||
| if err != nil { | |||||
| ctx.ServerError("GetPointAccount", err) | |||||
| return | |||||
| } | |||||
| ctx.Data["PointAccount"] = a | |||||
| ctx.Data["CloudBrainPaySwitch"] = setting.CloudBrainPaySwitch | |||||
| ctx.Next() | |||||
| } | |||||
| } | |||||
| @@ -5,6 +5,7 @@ | |||||
| package cron | package cron | ||||
| import ( | import ( | ||||
| "code.gitea.io/gitea/services/reward" | |||||
| "code.gitea.io/gitea/services/cloudbrain/resource" | "code.gitea.io/gitea/services/cloudbrain/resource" | ||||
| "code.gitea.io/gitea/modules/modelarts" | "code.gitea.io/gitea/modules/modelarts" | ||||
| "context" | "context" | ||||
| @@ -209,6 +210,28 @@ func registerSyncCloudbrainStatus() { | |||||
| }) | }) | ||||
| } | } | ||||
| func registerRewardPeriodTask() { | |||||
| RegisterTaskFatal("reward_period_task", &BaseConfig{ | |||||
| Enabled: true, | |||||
| RunAtStart: true, | |||||
| Schedule: "@every 1m", | |||||
| }, func(ctx context.Context, _ *models.User, _ Config) error { | |||||
| reward.StartRewardTask() | |||||
| return nil | |||||
| }) | |||||
| } | |||||
| func registerCloudbrainPointDeductTask() { | |||||
| RegisterTaskFatal("cloudbrain_point_deduct_task", &BaseConfig{ | |||||
| Enabled: true, | |||||
| RunAtStart: true, | |||||
| Schedule: "@every 1m", | |||||
| }, func(ctx context.Context, _ *models.User, _ Config) error { | |||||
| reward.StartCloudbrainPointDeductTask() | |||||
| return nil | |||||
| }) | |||||
| } | |||||
| func registerSyncResourceSpecs() { | func registerSyncResourceSpecs() { | ||||
| RegisterTaskFatal("sync_grampus_specs", &BaseConfig{ | RegisterTaskFatal("sync_grampus_specs", &BaseConfig{ | ||||
| Enabled: true, | Enabled: true, | ||||
| @@ -253,4 +276,7 @@ func initBasicTasks() { | |||||
| registerHandleOrgStatistic() | registerHandleOrgStatistic() | ||||
| registerSyncResourceSpecs() | registerSyncResourceSpecs() | ||||
| registerSyncModelArtsTempJobs() | registerSyncModelArtsTempJobs() | ||||
| //registerRewardPeriodTask() | |||||
| registerCloudbrainPointDeductTask() | |||||
| } | } | ||||
| @@ -5,6 +5,7 @@ | |||||
| package eventsource | package eventsource | ||||
| import ( | import ( | ||||
| "code.gitea.io/gitea/services/reward" | |||||
| "context" | "context" | ||||
| "time" | "time" | ||||
| @@ -24,8 +25,28 @@ func (m *Manager) Init() { | |||||
| func (m *Manager) Run(ctx context.Context) { | func (m *Manager) Run(ctx context.Context) { | ||||
| then := timeutil.TimeStampNow().Add(-2) | then := timeutil.TimeStampNow().Add(-2) | ||||
| timer := time.NewTicker(setting.UI.Notification.EventSourceUpdateTime) | timer := time.NewTicker(setting.UI.Notification.EventSourceUpdateTime) | ||||
| rewardThen := then | |||||
| rewardTimer := time.NewTicker(setting.UI.Notification.RewardNotifyUpdateTime) | |||||
| loop: | loop: | ||||
| for { | for { | ||||
| select { | |||||
| case <-rewardTimer.C: | |||||
| log.Debug("rewardTimer run") | |||||
| now := timeutil.TimeStampNow().Add(-2) | |||||
| list := reward.GetRewardOperation(rewardThen, now) | |||||
| if list != nil { | |||||
| log.Debug("GetRewardOperation list=%v", list) | |||||
| for _, l := range list { | |||||
| m.SendMessage(l.UserId, &Event{ | |||||
| Name: "reward-operation", | |||||
| Data: l.Msg, | |||||
| }) | |||||
| } | |||||
| } | |||||
| rewardThen = now | |||||
| } | |||||
| select { | select { | ||||
| case <-ctx.Done(): | case <-ctx.Done(): | ||||
| timer.Stop() | timer.Stop() | ||||
| @@ -44,6 +65,7 @@ loop: | |||||
| }) | }) | ||||
| } | } | ||||
| then = now | then = now | ||||
| default: | |||||
| } | } | ||||
| } | } | ||||
| m.UnregisterAll() | m.UnregisterAll() | ||||
| @@ -30,18 +30,17 @@ const ( | |||||
| var ( | var ( | ||||
| poolInfos *models.PoolInfos | poolInfos *models.PoolInfos | ||||
| FlavorInfos *models.FlavorInfos | |||||
| ImageInfos *models.ImageInfosModelArts | |||||
| FlavorInfos *setting.StFlavorInfos | |||||
| ImageInfos *setting.StImageInfosModelArts | |||||
| SpecialPools *models.SpecialPools | SpecialPools *models.SpecialPools | ||||
| ) | ) | ||||
| type GenerateTrainJobReq struct { | type GenerateTrainJobReq struct { | ||||
| JobName string | |||||
| Command string | |||||
| ResourceSpecId string | |||||
| ImageUrl string //与image_id二选一,都有的情况下优先image_url | |||||
| ImageId string | |||||
| JobName string | |||||
| Command string | |||||
| ImageUrl string //与image_id二选一,都有的情况下优先image_url | |||||
| ImageId string | |||||
| DisplayJobName string | DisplayJobName string | ||||
| Uuid string | Uuid string | ||||
| @@ -58,7 +57,6 @@ type GenerateTrainJobReq struct { | |||||
| BranchName string | BranchName string | ||||
| PreVersionId int64 | PreVersionId int64 | ||||
| PreVersionName string | PreVersionName string | ||||
| FlavorName string | |||||
| VersionCount int | VersionCount int | ||||
| EngineName string | EngineName string | ||||
| TotalVersionCount int | TotalVersionCount int | ||||
| @@ -66,6 +64,7 @@ type GenerateTrainJobReq struct { | |||||
| ProcessType string | ProcessType string | ||||
| DatasetName string | DatasetName string | ||||
| Params string | Params string | ||||
| Spec *models.Specification | |||||
| } | } | ||||
| func GenerateTrainJob(ctx *context.Context, req *GenerateTrainJobReq) (err error) { | func GenerateTrainJob(ctx *context.Context, req *GenerateTrainJobReq) (err error) { | ||||
| @@ -79,7 +78,7 @@ func GenerateTrainJob(ctx *context.Context, req *GenerateTrainJobReq) (err error | |||||
| { | { | ||||
| Name: req.JobName, | Name: req.JobName, | ||||
| Command: req.Command, | Command: req.Command, | ||||
| ResourceSpecId: req.ResourceSpecId, | |||||
| ResourceSpecId: req.Spec.SourceSpecId, | |||||
| ImageId: req.ImageId, | ImageId: req.ImageId, | ||||
| ImageUrl: req.ImageUrl, | ImageUrl: req.ImageUrl, | ||||
| CenterID: centerID, | CenterID: centerID, | ||||
| @@ -114,15 +113,14 @@ func GenerateTrainJob(ctx *context.Context, req *GenerateTrainJobReq) (err error | |||||
| Parameters: req.Params, | Parameters: req.Params, | ||||
| BootFile: req.BootFile, | BootFile: req.BootFile, | ||||
| DataUrl: req.DataUrl, | DataUrl: req.DataUrl, | ||||
| FlavorCode: req.ResourceSpecId, | |||||
| Description: req.Description, | Description: req.Description, | ||||
| WorkServerNumber: req.WorkServerNumber, | WorkServerNumber: req.WorkServerNumber, | ||||
| FlavorName: req.FlavorName, | |||||
| EngineName: req.EngineName, | EngineName: req.EngineName, | ||||
| VersionCount: req.VersionCount, | VersionCount: req.VersionCount, | ||||
| TotalVersionCount: req.TotalVersionCount, | TotalVersionCount: req.TotalVersionCount, | ||||
| CreatedUnix: createTime, | CreatedUnix: createTime, | ||||
| UpdatedUnix: createTime, | UpdatedUnix: createTime, | ||||
| Spec: req.Spec, | |||||
| }) | }) | ||||
| if err != nil { | if err != nil { | ||||
| @@ -1,6 +1,7 @@ | |||||
| package modelarts | package modelarts | ||||
| import ( | import ( | ||||
| "code.gitea.io/gitea/modules/modelarts_cd" | |||||
| "encoding/json" | "encoding/json" | ||||
| "errors" | "errors" | ||||
| "fmt" | "fmt" | ||||
| @@ -68,8 +69,6 @@ const ( | |||||
| var ( | var ( | ||||
| poolInfos *models.PoolInfos | poolInfos *models.PoolInfos | ||||
| FlavorInfos *models.FlavorInfos | |||||
| ImageInfos *models.ImageInfosModelArts | |||||
| TrainFlavorInfos *Flavor | TrainFlavorInfos *Flavor | ||||
| SpecialPools *models.SpecialPools | SpecialPools *models.SpecialPools | ||||
| MultiNodeConfig *MultiNodes | MultiNodeConfig *MultiNodes | ||||
| @@ -85,7 +84,6 @@ type GenerateTrainJobReq struct { | |||||
| BootFileUrl string | BootFileUrl string | ||||
| DataUrl string | DataUrl string | ||||
| TrainUrl string | TrainUrl string | ||||
| FlavorCode string | |||||
| LogUrl string | LogUrl string | ||||
| PoolID string | PoolID string | ||||
| WorkServerNumber int | WorkServerNumber int | ||||
| @@ -97,6 +95,7 @@ type GenerateTrainJobReq struct { | |||||
| BranchName string | BranchName string | ||||
| PreVersionId int64 | PreVersionId int64 | ||||
| PreVersionName string | PreVersionName string | ||||
| FlavorCode string | |||||
| FlavorName string | FlavorName string | ||||
| VersionCount int | VersionCount int | ||||
| EngineName string | EngineName string | ||||
| @@ -104,6 +103,7 @@ type GenerateTrainJobReq struct { | |||||
| UserImageUrl string | UserImageUrl string | ||||
| UserCommand string | UserCommand string | ||||
| DatasetName string | DatasetName string | ||||
| Spec *models.Specification | |||||
| } | } | ||||
| type GenerateInferenceJobReq struct { | type GenerateInferenceJobReq struct { | ||||
| @@ -116,7 +116,6 @@ type GenerateInferenceJobReq struct { | |||||
| BootFileUrl string | BootFileUrl string | ||||
| DataUrl string | DataUrl string | ||||
| TrainUrl string | TrainUrl string | ||||
| FlavorCode string | |||||
| LogUrl string | LogUrl string | ||||
| PoolID string | PoolID string | ||||
| WorkServerNumber int | WorkServerNumber int | ||||
| @@ -135,6 +134,7 @@ type GenerateInferenceJobReq struct { | |||||
| ModelVersion string | ModelVersion string | ||||
| CkptName string | CkptName string | ||||
| ResultUrl string | ResultUrl string | ||||
| Spec *models.Specification | |||||
| DatasetName string | DatasetName string | ||||
| } | } | ||||
| @@ -148,8 +148,9 @@ type VersionInfo struct { | |||||
| type Flavor struct { | type Flavor struct { | ||||
| Info []struct { | Info []struct { | ||||
| Code string `json:"code"` | |||||
| Value string `json:"value"` | |||||
| Code string `json:"code"` | |||||
| Value string `json:"value"` | |||||
| UnitPrice int64 `json:"unitPrice"` | |||||
| } `json:"flavor"` | } `json:"flavor"` | ||||
| } | } | ||||
| @@ -266,7 +267,7 @@ func GenerateTask(ctx *context.Context, jobName, uuid, description, flavor strin | |||||
| return nil | return nil | ||||
| } | } | ||||
| func GenerateNotebook2(ctx *context.Context, displayJobName, jobName, uuid, description, flavor, imageId string) error { | |||||
| func GenerateNotebook2(ctx *context.Context, displayJobName, jobName, uuid, description, imageId string, spec *models.Specification) error { | |||||
| if poolInfos == nil { | if poolInfos == nil { | ||||
| json.Unmarshal([]byte(setting.PoolInfos), &poolInfos) | json.Unmarshal([]byte(setting.PoolInfos), &poolInfos) | ||||
| } | } | ||||
| @@ -280,7 +281,7 @@ func GenerateNotebook2(ctx *context.Context, displayJobName, jobName, uuid, desc | |||||
| jobResult, err := createNotebook2(models.CreateNotebook2Params{ | jobResult, err := createNotebook2(models.CreateNotebook2Params{ | ||||
| JobName: jobName, | JobName: jobName, | ||||
| Description: description, | Description: description, | ||||
| Flavor: flavor, | |||||
| Flavor: spec.SourceSpecId, | |||||
| Duration: autoStopDurationMs, | Duration: autoStopDurationMs, | ||||
| ImageID: imageId, | ImageID: imageId, | ||||
| PoolID: poolInfos.PoolInfo[0].PoolId, | PoolID: poolInfos.PoolInfo[0].PoolId, | ||||
| @@ -317,7 +318,7 @@ func GenerateNotebook2(ctx *context.Context, displayJobName, jobName, uuid, desc | |||||
| RepoID: ctx.Repo.Repository.ID, | RepoID: ctx.Repo.Repository.ID, | ||||
| JobID: jobResult.ID, | JobID: jobResult.ID, | ||||
| JobName: jobName, | JobName: jobName, | ||||
| FlavorCode: flavor, | |||||
| FlavorCode: spec.SourceSpecId, | |||||
| DisplayJobName: displayJobName, | DisplayJobName: displayJobName, | ||||
| JobType: string(models.JobTypeDebug), | JobType: string(models.JobTypeDebug), | ||||
| Type: models.TypeCloudBrainTwo, | Type: models.TypeCloudBrainTwo, | ||||
| @@ -327,6 +328,7 @@ func GenerateNotebook2(ctx *context.Context, displayJobName, jobName, uuid, desc | |||||
| Description: description, | Description: description, | ||||
| CreatedUnix: createTime, | CreatedUnix: createTime, | ||||
| UpdatedUnix: createTime, | UpdatedUnix: createTime, | ||||
| Spec: spec, | |||||
| } | } | ||||
| err = models.CreateCloudbrain(task) | err = models.CreateCloudbrain(task) | ||||
| @@ -357,7 +359,7 @@ func GenerateTrainJob(ctx *context.Context, req *GenerateTrainJobReq) (err error | |||||
| PoolID: req.PoolID, | PoolID: req.PoolID, | ||||
| CreateVersion: true, | CreateVersion: true, | ||||
| Flavor: models.Flavor{ | Flavor: models.Flavor{ | ||||
| Code: req.FlavorCode, | |||||
| Code: req.Spec.SourceSpecId, | |||||
| }, | }, | ||||
| Parameter: req.Parameters, | Parameter: req.Parameters, | ||||
| UserImageUrl: req.UserImageUrl, | UserImageUrl: req.UserImageUrl, | ||||
| @@ -379,7 +381,7 @@ func GenerateTrainJob(ctx *context.Context, req *GenerateTrainJobReq) (err error | |||||
| PoolID: req.PoolID, | PoolID: req.PoolID, | ||||
| CreateVersion: true, | CreateVersion: true, | ||||
| Flavor: models.Flavor{ | Flavor: models.Flavor{ | ||||
| Code: req.FlavorCode, | |||||
| Code: req.Spec.SourceSpecId, | |||||
| }, | }, | ||||
| Parameter: req.Parameters, | Parameter: req.Parameters, | ||||
| }, | }, | ||||
| @@ -428,7 +430,7 @@ func GenerateTrainJob(ctx *context.Context, req *GenerateTrainJobReq) (err error | |||||
| BootFile: req.BootFile, | BootFile: req.BootFile, | ||||
| DataUrl: req.DataUrl, | DataUrl: req.DataUrl, | ||||
| LogUrl: req.LogUrl, | LogUrl: req.LogUrl, | ||||
| FlavorCode: req.FlavorCode, | |||||
| FlavorCode: req.Spec.SourceSpecId, | |||||
| Description: req.Description, | Description: req.Description, | ||||
| WorkServerNumber: req.WorkServerNumber, | WorkServerNumber: req.WorkServerNumber, | ||||
| FlavorName: req.FlavorName, | FlavorName: req.FlavorName, | ||||
| @@ -437,6 +439,7 @@ func GenerateTrainJob(ctx *context.Context, req *GenerateTrainJobReq) (err error | |||||
| TotalVersionCount: req.TotalVersionCount, | TotalVersionCount: req.TotalVersionCount, | ||||
| CreatedUnix: createTime, | CreatedUnix: createTime, | ||||
| UpdatedUnix: createTime, | UpdatedUnix: createTime, | ||||
| Spec: req.Spec, | |||||
| }) | }) | ||||
| if createErr != nil { | if createErr != nil { | ||||
| @@ -488,7 +491,7 @@ func GenerateTrainJobVersion(ctx *context.Context, req *GenerateTrainJobReq, job | |||||
| LogUrl: req.LogUrl, | LogUrl: req.LogUrl, | ||||
| PoolID: req.PoolID, | PoolID: req.PoolID, | ||||
| Flavor: models.Flavor{ | Flavor: models.Flavor{ | ||||
| Code: req.FlavorCode, | |||||
| Code: req.Spec.SourceSpecId, | |||||
| }, | }, | ||||
| Parameter: req.Parameters, | Parameter: req.Parameters, | ||||
| PreVersionId: req.PreVersionId, | PreVersionId: req.PreVersionId, | ||||
| @@ -509,7 +512,7 @@ func GenerateTrainJobVersion(ctx *context.Context, req *GenerateTrainJobReq, job | |||||
| LogUrl: req.LogUrl, | LogUrl: req.LogUrl, | ||||
| PoolID: req.PoolID, | PoolID: req.PoolID, | ||||
| Flavor: models.Flavor{ | Flavor: models.Flavor{ | ||||
| Code: req.FlavorCode, | |||||
| Code: req.Spec.SourceSpecId, | |||||
| }, | }, | ||||
| Parameter: req.Parameters, | Parameter: req.Parameters, | ||||
| PreVersionId: req.PreVersionId, | PreVersionId: req.PreVersionId, | ||||
| @@ -576,7 +579,7 @@ func GenerateTrainJobVersion(ctx *context.Context, req *GenerateTrainJobReq, job | |||||
| DataUrl: req.DataUrl, | DataUrl: req.DataUrl, | ||||
| LogUrl: req.LogUrl, | LogUrl: req.LogUrl, | ||||
| PreVersionId: req.PreVersionId, | PreVersionId: req.PreVersionId, | ||||
| FlavorCode: req.FlavorCode, | |||||
| FlavorCode: req.Spec.SourceSpecId, | |||||
| Description: req.Description, | Description: req.Description, | ||||
| WorkServerNumber: req.WorkServerNumber, | WorkServerNumber: req.WorkServerNumber, | ||||
| FlavorName: req.FlavorName, | FlavorName: req.FlavorName, | ||||
| @@ -585,6 +588,7 @@ func GenerateTrainJobVersion(ctx *context.Context, req *GenerateTrainJobReq, job | |||||
| VersionCount: VersionListCount + 1, | VersionCount: VersionListCount + 1, | ||||
| CreatedUnix: createTime, | CreatedUnix: createTime, | ||||
| UpdatedUnix: createTime, | UpdatedUnix: createTime, | ||||
| Spec: req.Spec, | |||||
| }) | }) | ||||
| if createErr != nil { | if createErr != nil { | ||||
| log.Error("CreateCloudbrain(%s) failed:%v", req.JobName, createErr.Error()) | log.Error("CreateCloudbrain(%s) failed:%v", req.JobName, createErr.Error()) | ||||
| @@ -675,7 +679,7 @@ func GenerateInferenceJob(ctx *context.Context, req *GenerateInferenceJobReq) (e | |||||
| PoolID: req.PoolID, | PoolID: req.PoolID, | ||||
| CreateVersion: true, | CreateVersion: true, | ||||
| Flavor: models.Flavor{ | Flavor: models.Flavor{ | ||||
| Code: req.FlavorCode, | |||||
| Code: req.Spec.SourceSpecId, | |||||
| }, | }, | ||||
| Parameter: req.Parameters, | Parameter: req.Parameters, | ||||
| }, | }, | ||||
| @@ -727,7 +731,7 @@ func GenerateInferenceJob(ctx *context.Context, req *GenerateInferenceJobReq) (e | |||||
| BootFile: req.BootFile, | BootFile: req.BootFile, | ||||
| DataUrl: req.DataUrl, | DataUrl: req.DataUrl, | ||||
| LogUrl: req.LogUrl, | LogUrl: req.LogUrl, | ||||
| FlavorCode: req.FlavorCode, | |||||
| FlavorCode: req.Spec.SourceSpecId, | |||||
| Description: req.Description, | Description: req.Description, | ||||
| WorkServerNumber: req.WorkServerNumber, | WorkServerNumber: req.WorkServerNumber, | ||||
| FlavorName: req.FlavorName, | FlavorName: req.FlavorName, | ||||
| @@ -743,6 +747,7 @@ func GenerateInferenceJob(ctx *context.Context, req *GenerateInferenceJobReq) (e | |||||
| ResultUrl: req.ResultUrl, | ResultUrl: req.ResultUrl, | ||||
| CreatedUnix: createTime, | CreatedUnix: createTime, | ||||
| UpdatedUnix: createTime, | UpdatedUnix: createTime, | ||||
| Spec: req.Spec, | |||||
| }) | }) | ||||
| if err != nil { | if err != nil { | ||||
| @@ -757,11 +762,7 @@ func GetNotebookImageName(imageId string) (string, error) { | |||||
| var validImage = false | var validImage = false | ||||
| var imageName = "" | var imageName = "" | ||||
| if ImageInfos == nil { | |||||
| json.Unmarshal([]byte(setting.ImageInfos), &ImageInfos) | |||||
| } | |||||
| for _, imageInfo := range ImageInfos.ImageInfo { | |||||
| for _, imageInfo := range setting.StImageInfos.ImageInfo { | |||||
| if imageInfo.Id == imageId { | if imageInfo.Id == imageId { | ||||
| validImage = true | validImage = true | ||||
| imageName = imageInfo.Value | imageName = imageInfo.Value | ||||
| @@ -825,8 +826,13 @@ func HandleTrainJobInfo(task *models.Cloudbrain) error { | |||||
| } | } | ||||
| func HandleNotebookInfo(task *models.Cloudbrain) error { | func HandleNotebookInfo(task *models.Cloudbrain) error { | ||||
| result, err := GetNotebook2(task.JobID) | |||||
| var result *models.GetNotebook2Result | |||||
| var err error | |||||
| if task.Type == models.TypeCloudBrainTwo { | |||||
| result, err = GetNotebook2(task.JobID) | |||||
| } else if task.Type == models.TypeCDCenter { | |||||
| result, err = modelarts_cd.GetNotebook(task.JobID) | |||||
| } | |||||
| if err != nil { | if err != nil { | ||||
| log.Error("GetNotebook2(%s) failed:%v", task.DisplayJobName, err) | log.Error("GetNotebook2(%s) failed:%v", task.DisplayJobName, err) | ||||
| return err | return err | ||||
| @@ -0,0 +1,215 @@ | |||||
| package modelarts_cd | |||||
| import ( | |||||
| "errors" | |||||
| "strconv" | |||||
| "strings" | |||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/modules/context" | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| "code.gitea.io/gitea/modules/notification" | |||||
| "code.gitea.io/gitea/modules/setting" | |||||
| "code.gitea.io/gitea/modules/timeutil" | |||||
| ) | |||||
| const ( | |||||
| //notebook | |||||
| storageTypeOBS = "obs" | |||||
| autoStopDuration = 4 * 60 * 60 | |||||
| autoStopDurationMs = 4 * 60 * 60 * 1000 | |||||
| MORDELART_USER_IMAGE_ENGINE_ID = -1 | |||||
| DataSetMountPath = "/home/ma-user/work" | |||||
| NotebookEnv = "Python3" | |||||
| NotebookType = "Ascend" | |||||
| FlavorInfo = "Ascend: 1*Ascend 910 CPU: 24 核 96GiB (modelarts.kat1.xlarge)" | |||||
| //train-job | |||||
| CodePath = "/code/" | |||||
| OutputPath = "/output/" | |||||
| ResultPath = "/result/" | |||||
| LogPath = "/log/" | |||||
| JobPath = "/job/" | |||||
| OrderDesc = "desc" //向下查询 | |||||
| OrderAsc = "asc" //向上查询 | |||||
| Lines = 500 | |||||
| TrainUrl = "train_url" | |||||
| DataUrl = "data_url" | |||||
| MultiDataUrl = "multi_data_url" | |||||
| ResultUrl = "result_url" | |||||
| CkptUrl = "ckpt_url" | |||||
| DeviceTarget = "device_target" | |||||
| Ascend = "Ascend" | |||||
| PerPage = 10 | |||||
| IsLatestVersion = "1" | |||||
| NotLatestVersion = "0" | |||||
| VersionCountOne = 1 | |||||
| SortByCreateTime = "create_time" | |||||
| ConfigTypeCustom = "custom" | |||||
| TotalVersionCount = 1 | |||||
| ) | |||||
| var () | |||||
| type VersionInfo struct { | |||||
| Version []struct { | |||||
| ID int `json:"id"` | |||||
| Value string `json:"value"` | |||||
| Url string `json:"url"` | |||||
| } `json:"version"` | |||||
| } | |||||
| type Flavor struct { | |||||
| Info []struct { | |||||
| Code string `json:"code"` | |||||
| Value string `json:"value"` | |||||
| } `json:"flavor"` | |||||
| } | |||||
| type Engine struct { | |||||
| Info []struct { | |||||
| ID int `json:"id"` | |||||
| Value string `json:"value"` | |||||
| } `json:"engine"` | |||||
| } | |||||
| type ResourcePool struct { | |||||
| Info []struct { | |||||
| ID string `json:"id"` | |||||
| Value string `json:"value"` | |||||
| } `json:"resource_pool"` | |||||
| } | |||||
| type Parameters struct { | |||||
| Parameter []struct { | |||||
| Label string `json:"label"` | |||||
| Value string `json:"value"` | |||||
| } `json:"parameter"` | |||||
| } | |||||
| func GenerateNotebook(ctx *context.Context, displayJobName, jobName, uuid, description, imageId string, spec *models.Specification) error { | |||||
| imageName, err := GetNotebookImageName(imageId) | |||||
| if err != nil { | |||||
| log.Error("GetNotebookImageName failed: %v", err.Error()) | |||||
| return err | |||||
| } | |||||
| createTime := timeutil.TimeStampNow() | |||||
| jobResult, err := createNotebook(models.CreateNotebookWithoutPoolParams{ | |||||
| JobName: jobName, | |||||
| Description: description, | |||||
| Flavor: spec.SourceSpecId, | |||||
| Duration: autoStopDurationMs, | |||||
| ImageID: imageId, | |||||
| Feature: models.NotebookFeature, | |||||
| Volume: models.VolumeReq{ | |||||
| Capacity: setting.Capacity, | |||||
| Category: models.EVSCategory, | |||||
| Ownership: models.ManagedOwnership, | |||||
| }, | |||||
| WorkspaceID: "0", | |||||
| }) | |||||
| if err != nil { | |||||
| log.Error("createNotebook failed: %v", err.Error()) | |||||
| if strings.HasPrefix(err.Error(), UnknownErrorPrefix) { | |||||
| log.Info("(%s)unknown error, set temp status", displayJobName) | |||||
| errTemp := models.InsertCloudbrainTemp(&models.CloudbrainTemp{ | |||||
| JobID: models.TempJobId, | |||||
| VersionID: models.TempVersionId, | |||||
| Status: models.TempJobStatus, | |||||
| Type: models.TypeCDCenter, | |||||
| JobName: jobName, | |||||
| JobType: string(models.JobTypeDebug), | |||||
| }) | |||||
| if errTemp != nil { | |||||
| log.Error("InsertCloudbrainTemp failed: %v", errTemp.Error()) | |||||
| return errTemp | |||||
| } | |||||
| } | |||||
| return err | |||||
| } | |||||
| task := &models.Cloudbrain{ | |||||
| Status: jobResult.Status, | |||||
| UserID: ctx.User.ID, | |||||
| RepoID: ctx.Repo.Repository.ID, | |||||
| JobID: jobResult.ID, | |||||
| JobName: jobName, | |||||
| FlavorCode: spec.SourceSpecId, | |||||
| DisplayJobName: displayJobName, | |||||
| JobType: string(models.JobTypeDebug), | |||||
| Type: models.TypeCDCenter, | |||||
| Uuid: uuid, | |||||
| ComputeResource: models.NPUResource, | |||||
| Image: imageName, | |||||
| Description: description, | |||||
| CreatedUnix: createTime, | |||||
| UpdatedUnix: createTime, | |||||
| Spec: spec, | |||||
| } | |||||
| err = models.CreateCloudbrain(task) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| stringId := strconv.FormatInt(task.ID, 10) | |||||
| notification.NotifyOtherTask(ctx.User, ctx.Repo.Repository, stringId, displayJobName, models.ActionCreateDebugNPUTask) | |||||
| return nil | |||||
| } | |||||
| func GetNotebookImageName(imageId string) (string, error) { | |||||
| var validImage = false | |||||
| var imageName = "" | |||||
| for _, imageInfo := range setting.StImageInfos.ImageInfo { | |||||
| if imageInfo.Id == imageId { | |||||
| validImage = true | |||||
| imageName = imageInfo.Value | |||||
| } | |||||
| } | |||||
| if !validImage { | |||||
| log.Error("the image id(%s) is invalid", imageId) | |||||
| return imageName, errors.New("the image id is invalid") | |||||
| } | |||||
| return imageName, nil | |||||
| } | |||||
| /* | |||||
| func HandleNotebookInfo(task *models.Cloudbrain) error { | |||||
| result, err := GetNotebook(task.JobID) | |||||
| if err != nil { | |||||
| log.Error("GetNotebook2(%s) failed:%v", task.DisplayJobName, err) | |||||
| return err | |||||
| } | |||||
| if result != nil { | |||||
| oldStatus := task.Status | |||||
| task.Status = result.Status | |||||
| if task.StartTime == 0 && result.Lease.UpdateTime > 0 { | |||||
| task.StartTime = timeutil.TimeStamp(result.Lease.UpdateTime / 1000) | |||||
| } | |||||
| if task.EndTime == 0 && models.IsModelArtsDebugJobTerminal(task.Status) { | |||||
| task.EndTime = timeutil.TimeStampNow() | |||||
| } | |||||
| task.CorrectCreateUnix() | |||||
| task.ComputeAndSetDuration() | |||||
| if oldStatus != task.Status { | |||||
| notification.NotifyChangeCloudbrainStatus(task, oldStatus) | |||||
| } | |||||
| if task.FlavorCode == "" { | |||||
| task.FlavorCode = result.Flavor | |||||
| } | |||||
| err = models.UpdateJob(task) | |||||
| if err != nil { | |||||
| log.Error("UpdateJob(%s) failed:%v", task.DisplayJobName, err) | |||||
| return err | |||||
| } | |||||
| } | |||||
| return nil | |||||
| } | |||||
| */ | |||||
| @@ -0,0 +1,220 @@ | |||||
| package modelarts_cd | |||||
| import ( | |||||
| "bytes" | |||||
| "code.gitea.io/gitea/modules/modelarts_gateway/core" | |||||
| "crypto/tls" | |||||
| "encoding/json" | |||||
| "fmt" | |||||
| "io/ioutil" | |||||
| "net/http" | |||||
| "strconv" | |||||
| "time" | |||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| "code.gitea.io/gitea/modules/setting" | |||||
| ) | |||||
| var ( | |||||
| httpClient *http.Client | |||||
| HOST string | |||||
| TOKEN string | |||||
| ) | |||||
| const ( | |||||
| errorCodeExceedLimit = "ModelArts.0118" | |||||
| //notebook 2.0 | |||||
| urlNotebook2 = "/notebooks" | |||||
| //error code | |||||
| modelartsIllegalToken = "ModelArts.6401" | |||||
| NotebookNotFound = "ModelArts.6404" | |||||
| NotebookNoPermission = "ModelArts.6407" | |||||
| NotebookInvalid = "ModelArts.6400" | |||||
| UnknownErrorPrefix = "UNKNOWN:" | |||||
| ) | |||||
| func getHttpClient() *http.Client { | |||||
| if httpClient == nil { | |||||
| httpClient = &http.Client{ | |||||
| Timeout: 30 * time.Second, | |||||
| Transport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}, | |||||
| } | |||||
| } | |||||
| return httpClient | |||||
| } | |||||
| func GetNotebook(jobID string) (*models.GetNotebook2Result, error) { | |||||
| var result models.GetNotebook2Result | |||||
| client := getHttpClient() | |||||
| s := core.Signer{ | |||||
| Key: setting.ModelartsCD.AccessKey, | |||||
| Secret: setting.ModelartsCD.SecretKey, | |||||
| } | |||||
| r, _ := http.NewRequest(http.MethodGet, | |||||
| setting.ModelartsCD.EndPoint+"/v1/"+setting.ModelartsCD.ProjectID+urlNotebook2+"/"+jobID, | |||||
| nil) | |||||
| r.Header.Add("content-type", "application/json") | |||||
| s.Sign(r) | |||||
| resp, err := client.Do(r) | |||||
| if err != nil { | |||||
| log.Error("client.Do failed: %s", err.Error()) | |||||
| return &result, fmt.Errorf("client.Do failed: %s", err.Error()) | |||||
| } | |||||
| defer resp.Body.Close() | |||||
| body, err := ioutil.ReadAll(resp.Body) | |||||
| if err != nil { | |||||
| log.Error("ioutil.ReadAll failed: %s", err.Error()) | |||||
| return &result, fmt.Errorf("ioutil.ReadAll failed: %s", err.Error()) | |||||
| } | |||||
| err = json.Unmarshal(body, &result) | |||||
| if err != nil { | |||||
| log.Error("json.Unmarshal failed: %s", err.Error()) | |||||
| return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error()) | |||||
| } | |||||
| if len(result.ErrorCode) != 0 { | |||||
| log.Error("GetNotebook failed(%s): %s", result.ErrorCode, result.ErrorMsg) | |||||
| return &result, fmt.Errorf("GetNotebook failed(%s): %s", result.ErrorCode, result.ErrorMsg) | |||||
| } | |||||
| return &result, nil | |||||
| } | |||||
| func ManageNotebook(jobID string, param models.NotebookAction) (*models.NotebookActionResult, error) { | |||||
| var result models.NotebookActionResult | |||||
| client := getHttpClient() | |||||
| s := core.Signer{ | |||||
| Key: setting.ModelartsCD.AccessKey, | |||||
| Secret: setting.ModelartsCD.SecretKey, | |||||
| } | |||||
| r, _ := http.NewRequest(http.MethodPost, | |||||
| setting.ModelartsCD.EndPoint+"/v1/"+setting.ModelartsCD.ProjectID+urlNotebook2+"/"+jobID+"/"+param.Action+"?duration="+strconv.Itoa(autoStopDurationMs), | |||||
| nil) | |||||
| r.Header.Add("content-type", "application/json") | |||||
| s.Sign(r) | |||||
| resp, err := client.Do(r) | |||||
| if err != nil { | |||||
| log.Error("client.Do failed: %s", err.Error()) | |||||
| return &result, fmt.Errorf("client.Do failed: %s", err.Error()) | |||||
| } | |||||
| defer resp.Body.Close() | |||||
| body, err := ioutil.ReadAll(resp.Body) | |||||
| if err != nil { | |||||
| log.Error("ioutil.ReadAll failed: %s", err.Error()) | |||||
| return &result, fmt.Errorf("ioutil.ReadAll failed: %s", err.Error()) | |||||
| } | |||||
| err = json.Unmarshal(body, &result) | |||||
| if err != nil { | |||||
| log.Error("json.Unmarshal failed: %s", err.Error()) | |||||
| return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error()) | |||||
| } | |||||
| if len(result.ErrorCode) != 0 { | |||||
| log.Error("ManageNotebook2 failed(%s): %s", result.ErrorCode, result.ErrorMsg) | |||||
| return &result, fmt.Errorf("ManageNotebook failed(%s): %s", result.ErrorCode, result.ErrorMsg) | |||||
| } | |||||
| return &result, nil | |||||
| } | |||||
| func DelNotebook(jobID string) (*models.NotebookDelResult, error) { | |||||
| var result models.NotebookDelResult | |||||
| client := getHttpClient() | |||||
| s := core.Signer{ | |||||
| Key: setting.ModelartsCD.AccessKey, | |||||
| Secret: setting.ModelartsCD.SecretKey, | |||||
| } | |||||
| r, _ := http.NewRequest(http.MethodDelete, | |||||
| setting.ModelartsCD.EndPoint+"/v1/"+setting.ModelartsCD.ProjectID+urlNotebook2+"/"+jobID, | |||||
| nil) | |||||
| r.Header.Add("content-type", "application/json") | |||||
| s.Sign(r) | |||||
| resp, err := client.Do(r) | |||||
| if err != nil { | |||||
| log.Error("client.Do failed: %s", err.Error()) | |||||
| return &result, fmt.Errorf("client.Do failed: %s", err.Error()) | |||||
| } | |||||
| defer resp.Body.Close() | |||||
| body, err := ioutil.ReadAll(resp.Body) | |||||
| if err != nil { | |||||
| log.Error("ioutil.ReadAll failed: %s", err.Error()) | |||||
| return &result, fmt.Errorf("ioutil.ReadAll failed: %s", err.Error()) | |||||
| } | |||||
| err = json.Unmarshal(body, &result) | |||||
| if err != nil { | |||||
| log.Error("json.Unmarshal failed: %s", err.Error()) | |||||
| return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error()) | |||||
| } | |||||
| if len(result.ErrorCode) != 0 { | |||||
| log.Error("DelNotebook2 failed(%s): %s", result.ErrorCode, result.ErrorMsg) | |||||
| return &result, fmt.Errorf("DelNotebook2 failed(%s): %s", result.ErrorCode, result.ErrorMsg) | |||||
| } | |||||
| return &result, nil | |||||
| } | |||||
| func createNotebook(createJobParams models.CreateNotebookWithoutPoolParams) (*models.CreateNotebookResult, error) { | |||||
| var result models.CreateNotebookResult | |||||
| client := getHttpClient() | |||||
| s := core.Signer{ | |||||
| Key: setting.ModelartsCD.AccessKey, | |||||
| Secret: setting.ModelartsCD.SecretKey, | |||||
| } | |||||
| req, _ := json.Marshal(createJobParams) | |||||
| r, _ := http.NewRequest(http.MethodPost, | |||||
| setting.ModelartsCD.EndPoint+"/v1/"+setting.ModelartsCD.ProjectID+urlNotebook2, | |||||
| ioutil.NopCloser(bytes.NewBuffer(req))) | |||||
| r.Header.Add("content-type", "application/json") | |||||
| s.Sign(r) | |||||
| resp, err := client.Do(r) | |||||
| if err != nil { | |||||
| log.Error("client.Do failed: %s", err.Error()) | |||||
| return &result, fmt.Errorf("client.Do failed: %s", err.Error()) | |||||
| } | |||||
| defer resp.Body.Close() | |||||
| body, err := ioutil.ReadAll(resp.Body) | |||||
| if err != nil { | |||||
| log.Error("ioutil.ReadAll failed: %s", err.Error()) | |||||
| return &result, fmt.Errorf("ioutil.ReadAll failed: %s", err.Error()) | |||||
| } | |||||
| err = json.Unmarshal(body, &result) | |||||
| if err != nil { | |||||
| log.Error("json.Unmarshal failed: %s", err.Error()) | |||||
| return &result, fmt.Errorf("json.Unmarshal failed: %s", err.Error()) | |||||
| } | |||||
| if len(result.ErrorCode) != 0 { | |||||
| log.Error("createNotebook failed(%s): %s", result.ErrorCode, result.ErrorMsg) | |||||
| if result.ErrorCode == errorCodeExceedLimit { | |||||
| result.ErrorMsg = "所选规格使用数量已超过最大配额限制。" | |||||
| } | |||||
| return &result, fmt.Errorf("createNotebook failed(%s): %s", result.ErrorCode, result.ErrorMsg) | |||||
| } | |||||
| return &result, nil | |||||
| } | |||||
| @@ -0,0 +1,42 @@ | |||||
| // based on https://github.com/golang/go/blob/master/src/net/url/url.go | |||||
| // Copyright 2009 The Go Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style | |||||
| // license that can be found in the LICENSE file. | |||||
| package core | |||||
| func shouldEscape(c byte) bool { | |||||
| if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c == '-' || c == '~' || c == '.' { | |||||
| return false | |||||
| } | |||||
| return true | |||||
| } | |||||
| func escape(s string) string { | |||||
| hexCount := 0 | |||||
| for i := 0; i < len(s); i++ { | |||||
| c := s[i] | |||||
| if shouldEscape(c) { | |||||
| hexCount++ | |||||
| } | |||||
| } | |||||
| if hexCount == 0 { | |||||
| return s | |||||
| } | |||||
| t := make([]byte, len(s)+2*hexCount) | |||||
| j := 0 | |||||
| for i := 0; i < len(s); i++ { | |||||
| switch c := s[i]; { | |||||
| case shouldEscape(c): | |||||
| t[j] = '%' | |||||
| t[j+1] = "0123456789ABCDEF"[c>>4] | |||||
| t[j+2] = "0123456789ABCDEF"[c&15] | |||||
| j += 3 | |||||
| default: | |||||
| t[j] = s[i] | |||||
| j++ | |||||
| } | |||||
| } | |||||
| return string(t) | |||||
| } | |||||
| @@ -0,0 +1,208 @@ | |||||
| // HWS API Gateway Signature | |||||
| // based on https://github.com/datastream/aws/blob/master/signv4.go | |||||
| // Copyright (c) 2014, Xianjie | |||||
| package core | |||||
| import ( | |||||
| "bytes" | |||||
| "crypto/hmac" | |||||
| "crypto/sha256" | |||||
| "fmt" | |||||
| "io/ioutil" | |||||
| "net/http" | |||||
| "sort" | |||||
| "strings" | |||||
| "time" | |||||
| ) | |||||
| const ( | |||||
| BasicDateFormat = "20060102T150405Z" | |||||
| Algorithm = "SDK-HMAC-SHA256" | |||||
| HeaderXDate = "X-Sdk-Date" | |||||
| HeaderHost = "host" | |||||
| HeaderAuthorization = "Authorization" | |||||
| HeaderContentSha256 = "X-Sdk-Content-Sha256" | |||||
| ) | |||||
| func hmacsha256(key []byte, data string) ([]byte, error) { | |||||
| h := hmac.New(sha256.New, []byte(key)) | |||||
| if _, err := h.Write([]byte(data)); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return h.Sum(nil), nil | |||||
| } | |||||
| // Build a CanonicalRequest from a regular request string | |||||
| // | |||||
| // CanonicalRequest = | |||||
| // HTTPRequestMethod + '\n' + | |||||
| // CanonicalURI + '\n' + | |||||
| // CanonicalQueryString + '\n' + | |||||
| // CanonicalHeaders + '\n' + | |||||
| // SignedHeaders + '\n' + | |||||
| // HexEncode(Hash(RequestPayload)) | |||||
| func CanonicalRequest(r *http.Request, signedHeaders []string) (string, error) { | |||||
| var hexencode string | |||||
| var err error | |||||
| if hex := r.Header.Get(HeaderContentSha256); hex != "" { | |||||
| hexencode = hex | |||||
| } else { | |||||
| data, err := RequestPayload(r) | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| hexencode, err = HexEncodeSHA256Hash(data) | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| } | |||||
| return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", r.Method, CanonicalURI(r), CanonicalQueryString(r), CanonicalHeaders(r, signedHeaders), strings.Join(signedHeaders, ";"), hexencode), err | |||||
| } | |||||
| // CanonicalURI returns request uri | |||||
| func CanonicalURI(r *http.Request) string { | |||||
| pattens := strings.Split(r.URL.Path, "/") | |||||
| var uri []string | |||||
| for _, v := range pattens { | |||||
| uri = append(uri, escape(v)) | |||||
| } | |||||
| urlpath := strings.Join(uri, "/") | |||||
| if len(urlpath) == 0 || urlpath[len(urlpath)-1] != '/' { | |||||
| urlpath = urlpath + "/" | |||||
| } | |||||
| return urlpath | |||||
| } | |||||
| // CanonicalQueryString | |||||
| func CanonicalQueryString(r *http.Request) string { | |||||
| var keys []string | |||||
| query := r.URL.Query() | |||||
| for key := range query { | |||||
| keys = append(keys, key) | |||||
| } | |||||
| sort.Strings(keys) | |||||
| var a []string | |||||
| for _, key := range keys { | |||||
| k := escape(key) | |||||
| sort.Strings(query[key]) | |||||
| for _, v := range query[key] { | |||||
| kv := fmt.Sprintf("%s=%s", k, escape(v)) | |||||
| a = append(a, kv) | |||||
| } | |||||
| } | |||||
| queryStr := strings.Join(a, "&") | |||||
| r.URL.RawQuery = queryStr | |||||
| return queryStr | |||||
| } | |||||
| // CanonicalHeaders | |||||
| func CanonicalHeaders(r *http.Request, signerHeaders []string) string { | |||||
| var a []string | |||||
| header := make(map[string][]string) | |||||
| for k, v := range r.Header { | |||||
| header[strings.ToLower(k)] = v | |||||
| } | |||||
| for _, key := range signerHeaders { | |||||
| value := header[key] | |||||
| if strings.EqualFold(key, HeaderHost) { | |||||
| value = []string{r.Host} | |||||
| } | |||||
| sort.Strings(value) | |||||
| for _, v := range value { | |||||
| a = append(a, key+":"+strings.TrimSpace(v)) | |||||
| } | |||||
| } | |||||
| return fmt.Sprintf("%s\n", strings.Join(a, "\n")) | |||||
| } | |||||
| // SignedHeaders | |||||
| func SignedHeaders(r *http.Request) []string { | |||||
| var a []string | |||||
| for key := range r.Header { | |||||
| a = append(a, strings.ToLower(key)) | |||||
| } | |||||
| sort.Strings(a) | |||||
| return a | |||||
| } | |||||
| // RequestPayload | |||||
| func RequestPayload(r *http.Request) ([]byte, error) { | |||||
| if r.Body == nil { | |||||
| return []byte(""), nil | |||||
| } | |||||
| b, err := ioutil.ReadAll(r.Body) | |||||
| if err != nil { | |||||
| return []byte(""), err | |||||
| } | |||||
| r.Body = ioutil.NopCloser(bytes.NewBuffer(b)) | |||||
| return b, err | |||||
| } | |||||
| // Create a "String to Sign". | |||||
| func StringToSign(canonicalRequest string, t time.Time) (string, error) { | |||||
| hash := sha256.New() | |||||
| _, err := hash.Write([]byte(canonicalRequest)) | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| return fmt.Sprintf("%s\n%s\n%x", | |||||
| Algorithm, t.UTC().Format(BasicDateFormat), hash.Sum(nil)), nil | |||||
| } | |||||
| // Create the HWS Signature. | |||||
| func SignStringToSign(stringToSign string, signingKey []byte) (string, error) { | |||||
| hm, err := hmacsha256(signingKey, stringToSign) | |||||
| return fmt.Sprintf("%x", hm), err | |||||
| } | |||||
| // HexEncodeSHA256Hash returns hexcode of sha256 | |||||
| func HexEncodeSHA256Hash(body []byte) (string, error) { | |||||
| hash := sha256.New() | |||||
| if body == nil { | |||||
| body = []byte("") | |||||
| } | |||||
| _, err := hash.Write(body) | |||||
| return fmt.Sprintf("%x", hash.Sum(nil)), err | |||||
| } | |||||
| // Get the finalized value for the "Authorization" header. The signature parameter is the output from SignStringToSign | |||||
| func AuthHeaderValue(signature, accessKey string, signedHeaders []string) string { | |||||
| return fmt.Sprintf("%s Access=%s, SignedHeaders=%s, Signature=%s", Algorithm, accessKey, strings.Join(signedHeaders, ";"), signature) | |||||
| } | |||||
| // Signature HWS meta | |||||
| type Signer struct { | |||||
| Key string | |||||
| Secret string | |||||
| } | |||||
| // SignRequest set Authorization header | |||||
| func (s *Signer) Sign(r *http.Request) error { | |||||
| var t time.Time | |||||
| var err error | |||||
| var dt string | |||||
| if dt = r.Header.Get(HeaderXDate); dt != "" { | |||||
| t, err = time.Parse(BasicDateFormat, dt) | |||||
| } | |||||
| if err != nil || dt == "" { | |||||
| t = time.Now() | |||||
| r.Header.Set(HeaderXDate, t.UTC().Format(BasicDateFormat)) | |||||
| } | |||||
| signedHeaders := SignedHeaders(r) | |||||
| canonicalRequest, err := CanonicalRequest(r, signedHeaders) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| stringToSign, err := StringToSign(canonicalRequest, t) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| signature, err := SignStringToSign(stringToSign, []byte(s.Secret)) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| authValue := AuthHeaderValue(signature, s.Key, signedHeaders) | |||||
| r.Header.Set(HeaderAuthorization, authValue) | |||||
| return nil | |||||
| } | |||||
| @@ -5,6 +5,7 @@ | |||||
| package action | package action | ||||
| import ( | import ( | ||||
| "code.gitea.io/gitea/modules/auth" | |||||
| "encoding/json" | "encoding/json" | ||||
| "fmt" | "fmt" | ||||
| "path" | "path" | ||||
| @@ -345,3 +346,101 @@ func (a *actionNotifier) NotifyOtherTask(doer *models.User, repo *models.Reposit | |||||
| log.Error("notifyWatchers: %v", err) | log.Error("notifyWatchers: %v", err) | ||||
| } | } | ||||
| } | } | ||||
| func (t *actionNotifier) NotifyWechatBind(user *models.User, wechatOpenId string) { | |||||
| act := &models.Action{ | |||||
| ActUserID: user.ID, | |||||
| ActUser: user, | |||||
| OpType: models.ActionBindWechat, | |||||
| IsPrivate: true, | |||||
| Content: wechatOpenId, | |||||
| } | |||||
| if err := models.NotifyWatchers(act); err != nil { | |||||
| log.Error("notifyWatchers: %v", err) | |||||
| } | |||||
| } | |||||
| func (t *actionNotifier) NotifyDatasetRecommend(optUser *models.User, dataset *models.Dataset, action string) { | |||||
| switch action { | |||||
| case "recommend": | |||||
| users, err := models.GetAllDatasetContributorByDatasetId(dataset.ID) | |||||
| if err != nil { | |||||
| return | |||||
| } | |||||
| var actions = make([]*models.Action, 0) | |||||
| for _, user := range users { | |||||
| actions = append(actions, &models.Action{ | |||||
| OpType: models.ActionDatasetRecommended, | |||||
| ActUserID: user.ID, | |||||
| ActUser: user, | |||||
| RepoID: dataset.RepoID, | |||||
| Repo: dataset.Repo, | |||||
| Content: fmt.Sprintf("%d|%s", dataset.ID, dataset.Title), | |||||
| }) | |||||
| } | |||||
| if err := models.NotifyWatchers(actions...); err != nil { | |||||
| log.Error("notifyWatchers: %v", err) | |||||
| } | |||||
| } | |||||
| } | |||||
| func (t *actionNotifier) NotifyCreateImage(doer *models.User, image models.Image) { | |||||
| act := &models.Action{ | |||||
| ActUserID: doer.ID, | |||||
| ActUser: doer, | |||||
| OpType: models.ActionCreateImage, | |||||
| IsPrivate: image.IsPrivate, | |||||
| Content: fmt.Sprintf("%d|%s", image.ID, image.Tag), | |||||
| } | |||||
| if err := models.NotifyWatchers(act); err != nil { | |||||
| log.Error("notifyWatchers: %v", err) | |||||
| } | |||||
| } | |||||
| func (t *actionNotifier) NotifyImageRecommend(optUser *models.User, image *models.Image, action string) { | |||||
| u, err := models.GetUserByID(image.UID) | |||||
| if err != nil { | |||||
| return | |||||
| } | |||||
| switch action { | |||||
| case "recommend": | |||||
| act := &models.Action{ | |||||
| ActUserID: u.ID, | |||||
| ActUser: u, | |||||
| OpType: models.ActionImageRecommend, | |||||
| IsPrivate: false, | |||||
| Content: fmt.Sprintf("%d|%s", image.ID, image.Tag), | |||||
| } | |||||
| if err := models.NotifyWatchers(act); err != nil { | |||||
| log.Error("notifyWatchers: %v", err) | |||||
| } | |||||
| } | |||||
| } | |||||
| func (t *actionNotifier) NotifyChangeUserAvatar(user *models.User, form auth.AvatarForm) { | |||||
| act := &models.Action{ | |||||
| ActUserID: user.ID, | |||||
| ActUser: user, | |||||
| OpType: models.ActionChangeUserAvatar, | |||||
| IsPrivate: true, | |||||
| } | |||||
| if err := models.NotifyWatchers(act); err != nil { | |||||
| log.Error("notifyWatchers: %v", err) | |||||
| } | |||||
| } | |||||
| func (t *actionNotifier) NotifyPushCommits(pusher *models.User, repo *models.Repository, refName, oldCommitID, newCommitID string, commits *repository.PushCommits) { | |||||
| act := &models.Action{ | |||||
| ActUserID: pusher.ID, | |||||
| ActUser: pusher, | |||||
| OpType: models.ActionPushCommits, | |||||
| RepoID: repo.ID, | |||||
| Repo: repo, | |||||
| RefName: refName, | |||||
| IsPrivate: repo.IsPrivate, | |||||
| Content: fmt.Sprintf("%s|%s", oldCommitID, newCommitID), | |||||
| } | |||||
| if err := models.NotifyWatchers(act); err != nil { | |||||
| log.Error("notifyWatchers: %v", err) | |||||
| } | |||||
| } | |||||
| @@ -6,6 +6,7 @@ package base | |||||
| import ( | import ( | ||||
| "code.gitea.io/gitea/models" | "code.gitea.io/gitea/models" | ||||
| "code.gitea.io/gitea/modules/auth" | |||||
| "code.gitea.io/gitea/modules/repository" | "code.gitea.io/gitea/modules/repository" | ||||
| ) | ) | ||||
| @@ -56,6 +57,11 @@ type Notifier interface { | |||||
| NotifySyncDeleteRef(doer *models.User, repo *models.Repository, refType, refFullName string) | NotifySyncDeleteRef(doer *models.User, repo *models.Repository, refType, refFullName string) | ||||
| NotifyOtherTask(doer *models.User, repo *models.Repository, id string, name string, optype models.ActionType) | NotifyOtherTask(doer *models.User, repo *models.Repository, id string, name string, optype models.ActionType) | ||||
| NotifyWechatBind(user *models.User, wechatOpenId string) | |||||
| NotifyDatasetRecommend(optUser *models.User, dataset *models.Dataset, action string) | |||||
| NotifyCreateImage(doer *models.User, image models.Image) | |||||
| NotifyImageRecommend(optUser *models.User, image *models.Image, action string) | |||||
| NotifyChangeUserAvatar(user *models.User, form auth.AvatarForm) | |||||
| NotifyChangeCloudbrainStatus(cloudbrain *models.Cloudbrain, oldStatus string) | NotifyChangeCloudbrainStatus(cloudbrain *models.Cloudbrain, oldStatus string) | ||||
| } | } | ||||
| @@ -6,6 +6,7 @@ package base | |||||
| import ( | import ( | ||||
| "code.gitea.io/gitea/models" | "code.gitea.io/gitea/models" | ||||
| "code.gitea.io/gitea/modules/auth" | |||||
| "code.gitea.io/gitea/modules/repository" | "code.gitea.io/gitea/modules/repository" | ||||
| ) | ) | ||||
| @@ -159,6 +160,23 @@ func (*NullNotifier) NotifyOtherTask(doer *models.User, repo *models.Repository, | |||||
| } | } | ||||
| func (*NullNotifier) NotifyWechatBind(user *models.User, wechatOpenId string) { | |||||
| } | |||||
| func (*NullNotifier) NotifyDatasetRecommend(optUser *models.User, dataset *models.Dataset, action string) { | |||||
| } | |||||
| func (*NullNotifier) NotifyCreateImage(doer *models.User, image models.Image) { | |||||
| } | |||||
| func (*NullNotifier) NotifyImageRecommend(optUser *models.User, image *models.Image, action string) { | |||||
| } | |||||
| func (*NullNotifier) NotifyChangeUserAvatar(user *models.User, form auth.AvatarForm) { | |||||
| } | |||||
| func (*NullNotifier) NotifyChangeCloudbrainStatus(cloudbrain *models.Cloudbrain, oldStatus string) { | func (*NullNotifier) NotifyChangeCloudbrainStatus(cloudbrain *models.Cloudbrain, oldStatus string) { | ||||
| } | } | ||||
| @@ -6,10 +6,12 @@ package notification | |||||
| import ( | import ( | ||||
| "code.gitea.io/gitea/models" | "code.gitea.io/gitea/models" | ||||
| "code.gitea.io/gitea/modules/auth" | |||||
| "code.gitea.io/gitea/modules/notification/action" | "code.gitea.io/gitea/modules/notification/action" | ||||
| "code.gitea.io/gitea/modules/notification/base" | "code.gitea.io/gitea/modules/notification/base" | ||||
| "code.gitea.io/gitea/modules/notification/indexer" | "code.gitea.io/gitea/modules/notification/indexer" | ||||
| "code.gitea.io/gitea/modules/notification/mail" | "code.gitea.io/gitea/modules/notification/mail" | ||||
| "code.gitea.io/gitea/modules/notification/reward" | |||||
| "code.gitea.io/gitea/modules/notification/ui" | "code.gitea.io/gitea/modules/notification/ui" | ||||
| "code.gitea.io/gitea/modules/notification/webhook" | "code.gitea.io/gitea/modules/notification/webhook" | ||||
| wechatNotifier "code.gitea.io/gitea/modules/notification/wechat" | wechatNotifier "code.gitea.io/gitea/modules/notification/wechat" | ||||
| @@ -37,6 +39,7 @@ func NewContext() { | |||||
| RegisterNotifier(webhook.NewNotifier()) | RegisterNotifier(webhook.NewNotifier()) | ||||
| RegisterNotifier(action.NewNotifier()) | RegisterNotifier(action.NewNotifier()) | ||||
| RegisterNotifier(wechatNotifier.NewNotifier()) | RegisterNotifier(wechatNotifier.NewNotifier()) | ||||
| RegisterNotifier(reward.NewNotifier()) | |||||
| } | } | ||||
| // NotifyUploadAttachment notifies attachment upload message to notifiers | // NotifyUploadAttachment notifies attachment upload message to notifiers | ||||
| @@ -272,6 +275,41 @@ func NotifySyncDeleteRef(pusher *models.User, repo *models.Repository, refType, | |||||
| } | } | ||||
| } | } | ||||
| // NotifyWechatBind notifies wechat bind | |||||
| func NotifyWechatBind(user *models.User, wechatOpenId string) { | |||||
| for _, notifier := range notifiers { | |||||
| notifier.NotifyWechatBind(user, wechatOpenId) | |||||
| } | |||||
| } | |||||
| // NotifyDatasetRecommend | |||||
| func NotifyDatasetRecommend(optUser *models.User, dataset *models.Dataset, action string) { | |||||
| for _, notifier := range notifiers { | |||||
| notifier.NotifyDatasetRecommend(optUser, dataset, action) | |||||
| } | |||||
| } | |||||
| // NotifyDatasetRecommend | |||||
| func NotifyCreateImage(doer *models.User, image models.Image) { | |||||
| for _, notifier := range notifiers { | |||||
| notifier.NotifyCreateImage(doer, image) | |||||
| } | |||||
| } | |||||
| // NotifyDatasetRecommend | |||||
| func NotifyImageRecommend(optUser *models.User, image *models.Image, action string) { | |||||
| for _, notifier := range notifiers { | |||||
| notifier.NotifyImageRecommend(optUser, image, action) | |||||
| } | |||||
| } | |||||
| // NotifyDatasetRecommend | |||||
| func NotifyChangeUserAvatar(user *models.User, form auth.AvatarForm) { | |||||
| for _, notifier := range notifiers { | |||||
| notifier.NotifyChangeUserAvatar(user, form) | |||||
| } | |||||
| } | |||||
| // NotifyChangeCloudbrainStatus | // NotifyChangeCloudbrainStatus | ||||
| func NotifyChangeCloudbrainStatus(cloudbrain *models.Cloudbrain, oldStatus string) { | func NotifyChangeCloudbrainStatus(cloudbrain *models.Cloudbrain, oldStatus string) { | ||||
| for _, notifier := range notifiers { | for _, notifier := range notifiers { | ||||
| @@ -0,0 +1,27 @@ | |||||
| package reward | |||||
| import ( | |||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| "code.gitea.io/gitea/modules/notification/base" | |||||
| ) | |||||
| type pointNotifier struct { | |||||
| base.NullNotifier | |||||
| } | |||||
| var ( | |||||
| _ base.Notifier = &pointNotifier{} | |||||
| ) | |||||
| // NewNotifier create a new wechatNotifier notifier | |||||
| func NewNotifier() base.Notifier { | |||||
| return &pointNotifier{} | |||||
| } | |||||
| func (*pointNotifier) NotifyChangeCloudbrainStatus(cloudbrain *models.Cloudbrain, oldStatus string) { | |||||
| log.Info("pointNotifier NotifyChangeCloudbrainStatus cloudbrain.id=%d cloudbrain.status=%s oldStatus=%s", cloudbrain.ID, cloudbrain.Status, oldStatus) | |||||
| if cloudbrain.IsRunning() || cloudbrain.IsTerminal() { | |||||
| models.StatusChangeChan <- cloudbrain | |||||
| } | |||||
| } | |||||
| @@ -76,7 +76,7 @@ func HEXISTS(conn redis.Conn, key string, subKey string) (bool, error) { | |||||
| } | } | ||||
| func Expire(conn redis.Conn, key string, seconds int) error { | |||||
| func EXPIRE(conn redis.Conn, key string, seconds int) error { | |||||
| _, err := conn.Do("EXPIRE", key, seconds) | _, err := conn.Do("EXPIRE", key, seconds) | ||||
| return err | return err | ||||
| @@ -145,3 +145,85 @@ func TTL(key string) (int, error) { | |||||
| return n, nil | return n, nil | ||||
| } | } | ||||
| func IncrBy(key string, n int64) (int64, error) { | |||||
| redisClient := labelmsg.Get() | |||||
| defer redisClient.Close() | |||||
| reply, err := redisClient.Do("INCRBY", key, n) | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| i, err := strconv.ParseInt(fmt.Sprint(reply), 10, 64) | |||||
| return i, nil | |||||
| } | |||||
| func Expire(key string, expireTime time.Duration) error { | |||||
| redisClient := labelmsg.Get() | |||||
| defer redisClient.Close() | |||||
| _, err := redisClient.Do("EXPIRE", key, int64(expireTime.Seconds())) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| return nil | |||||
| } | |||||
| //GetInt64 get redis value by Get(key) | |||||
| //and then parse the value to int64 | |||||
| //return {isExist(bool)} {value(int64)} {error(error)} | |||||
| func GetInt64(key string) (bool, int64, error) { | |||||
| str, err := Get(key) | |||||
| if err != nil { | |||||
| return false, 0, err | |||||
| } | |||||
| if str == "" { | |||||
| return false, 0, nil | |||||
| } | |||||
| i, err := strconv.ParseInt(str, 10, 64) | |||||
| if err != nil { | |||||
| return false, 0, err | |||||
| } | |||||
| return true, i, nil | |||||
| } | |||||
| func ZAdd(key, value string, score float64) error { | |||||
| redisClient := labelmsg.Get() | |||||
| defer redisClient.Close() | |||||
| _, err := redisClient.Do("ZADD", key, score, value) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| return nil | |||||
| } | |||||
| func ZRangeByScore(key string, min, max float64) ([]string, error) { | |||||
| redisClient := labelmsg.Get() | |||||
| defer redisClient.Close() | |||||
| reply, err := redisClient.Do("ZRANGEBYSCORE", key, min, max) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if reply == nil { | |||||
| return nil, err | |||||
| } | |||||
| s, _ := redis.Strings(reply, nil) | |||||
| return s, nil | |||||
| } | |||||
| func ZRemRangeByScore(key string, min, max float64) error { | |||||
| redisClient := labelmsg.Get() | |||||
| defer redisClient.Close() | |||||
| _, err := redisClient.Do("ZREMRANGEBYSCORE", key, min, max) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| return nil | |||||
| } | |||||
| @@ -0,0 +1,17 @@ | |||||
| package redis_key | |||||
| import "fmt" | |||||
| const ACCOUNT_REDIS_PREFIX = "account" | |||||
| func PointAccountOperateLock(userId int64) string { | |||||
| return KeyJoin(ACCOUNT_REDIS_PREFIX, fmt.Sprint(userId), "point", "operate", "lock") | |||||
| } | |||||
| func PointAccountInfo(userId int64) string { | |||||
| return KeyJoin(ACCOUNT_REDIS_PREFIX, fmt.Sprint(userId), "info") | |||||
| } | |||||
| func PointAccountInitLock(userId int64) string { | |||||
| return KeyJoin(ACCOUNT_REDIS_PREFIX, fmt.Sprint(userId), "init", "lock") | |||||
| } | |||||
| @@ -4,6 +4,8 @@ import "strings" | |||||
| const KEY_SEPARATE = ":" | const KEY_SEPARATE = ":" | ||||
| const EMPTY_REDIS_VAL = "Nil" | |||||
| func KeyJoin(keys ...string) string { | func KeyJoin(keys ...string) string { | ||||
| var build strings.Builder | var build strings.Builder | ||||
| for _, v := range keys { | for _, v := range keys { | ||||
| @@ -0,0 +1,26 @@ | |||||
| package redis_key | |||||
| import ( | |||||
| "code.gitea.io/gitea/models" | |||||
| "fmt" | |||||
| ) | |||||
| const LIMIT_REDIS_PREFIX = "limit" | |||||
| func LimitCount(userId int64, limitCode string, limitType string, scope string, period *models.PeriodResult) string { | |||||
| if scope == models.LimitScopeAllUsers.Name() { | |||||
| if period == nil { | |||||
| return KeyJoin(LIMIT_REDIS_PREFIX, limitCode, limitType, "count") | |||||
| } | |||||
| return KeyJoin(LIMIT_REDIS_PREFIX, limitCode, limitType, fmt.Sprint(period.StartTime.Unix()), fmt.Sprint(period.EndTime.Unix()), "count") | |||||
| } | |||||
| if period == nil { | |||||
| return KeyJoin(LIMIT_REDIS_PREFIX, "uid", fmt.Sprint(userId), limitCode, limitType, "count") | |||||
| } | |||||
| return KeyJoin(LIMIT_REDIS_PREFIX, "uid", fmt.Sprint(userId), limitCode, limitType, fmt.Sprint(period.StartTime.Unix()), fmt.Sprint(period.EndTime.Unix()), "count") | |||||
| } | |||||
| func LimitConfig(limitType string) string { | |||||
| return KeyJoin(LIMIT_REDIS_PREFIX, limitType, "config") | |||||
| } | |||||
| @@ -0,0 +1,21 @@ | |||||
| package redis_key | |||||
| import ( | |||||
| "code.gitea.io/gitea/modules/setting" | |||||
| "fmt" | |||||
| "strings" | |||||
| ) | |||||
| const REWARD_REDIS_PREFIX = "reward" | |||||
| func RewardOperateLock(requestId string, sourceType string, operateType string) string { | |||||
| return KeyJoin(REWARD_REDIS_PREFIX, requestId, sourceType, operateType, "send") | |||||
| } | |||||
| func RewardOperateNotification() string { | |||||
| return KeyJoin(REWARD_REDIS_PREFIX, "operate", strings.ReplaceAll(setting.AppURL, "/", ""), "notification") | |||||
| } | |||||
| func RewardTaskRunningLock(taskId int64) string { | |||||
| return KeyJoin(REWARD_REDIS_PREFIX, "periodic_task", fmt.Sprint(taskId), "lock") | |||||
| } | |||||
| @@ -0,0 +1,10 @@ | |||||
| package redis_key | |||||
| import "time" | |||||
| const SERIAL_REDIS_PREFIX = "serial" | |||||
| func RewardSerialCounter(now time.Time) string { | |||||
| h := now.Format("200601021504") | |||||
| return KeyJoin(SERIAL_REDIS_PREFIX, "reward_operate", h, "counter") | |||||
| } | |||||
| @@ -0,0 +1,14 @@ | |||||
| package redis_key | |||||
| const TASK_REDIS_PREFIX = "task" | |||||
| func TaskAccomplishLock(sourceId string, taskType string) string { | |||||
| return KeyJoin(TASK_REDIS_PREFIX, sourceId, taskType, "accomplish") | |||||
| } | |||||
| func TaskConfigList() string { | |||||
| return KeyJoin(TASK_REDIS_PREFIX, "config", "list") | |||||
| } | |||||
| func TaskConfigOperateLock(taskCode, rewardType string) string { | |||||
| return KeyJoin(TASK_REDIS_PREFIX, "config", "operate", "lock") | |||||
| } | |||||
| @@ -13,26 +13,32 @@ func NewDistributeLock(lockKey string) *DistributeLock { | |||||
| return &DistributeLock{lockKey: lockKey} | return &DistributeLock{lockKey: lockKey} | ||||
| } | } | ||||
| func (lock *DistributeLock) Lock(expireTime time.Duration) bool { | |||||
| isOk, _ := redis_client.Setnx(lock.lockKey, "", expireTime) | |||||
| return isOk | |||||
| func (lock *DistributeLock) Lock(expireTime time.Duration) (bool, error) { | |||||
| isOk, err := redis_client.Setnx(lock.lockKey, "", expireTime) | |||||
| if err != nil { | |||||
| return false, err | |||||
| } | |||||
| return isOk, nil | |||||
| } | } | ||||
| func (lock *DistributeLock) LockWithWait(expireTime time.Duration, waitTime time.Duration) bool { | |||||
| func (lock *DistributeLock) LockWithWait(expireTime time.Duration, waitTime time.Duration) (bool, error) { | |||||
| start := time.Now().Unix() * 1000 | start := time.Now().Unix() * 1000 | ||||
| duration := waitTime.Milliseconds() | duration := waitTime.Milliseconds() | ||||
| for { | for { | ||||
| isOk, _ := redis_client.Setnx(lock.lockKey, "", expireTime) | |||||
| isOk, err := redis_client.Setnx(lock.lockKey, "", expireTime) | |||||
| if err != nil { | |||||
| return false, err | |||||
| } | |||||
| if isOk { | if isOk { | ||||
| return true | |||||
| return true, nil | |||||
| } | } | ||||
| if time.Now().Unix()*1000-start > duration { | if time.Now().Unix()*1000-start > duration { | ||||
| return false | |||||
| return false, nil | |||||
| } | } | ||||
| time.Sleep(50 * time.Millisecond) | time.Sleep(50 * time.Millisecond) | ||||
| } | } | ||||
| return false | |||||
| return false, nil | |||||
| } | } | ||||
| func (lock *DistributeLock) UnLock() error { | func (lock *DistributeLock) UnLock() error { | ||||
| @@ -76,6 +76,26 @@ type C2NetSqInfos struct { | |||||
| C2NetSqInfo []*C2NetSequenceInfo `json:"sequence"` | C2NetSqInfo []*C2NetSequenceInfo `json:"sequence"` | ||||
| } | } | ||||
| type StFlavorInfos struct { | |||||
| FlavorInfo []*FlavorInfo `json:"flavor_info"` | |||||
| } | |||||
| type FlavorInfo struct { | |||||
| Id int `json:"id"` | |||||
| Value string `json:"value"` | |||||
| Desc string `json:"desc"` | |||||
| } | |||||
| type StImageInfosModelArts struct { | |||||
| ImageInfo []*ImageInfoModelArts `json:"image_info"` | |||||
| } | |||||
| type ImageInfoModelArts struct { | |||||
| Id string `json:"id"` | |||||
| Value string `json:"value"` | |||||
| Desc string `json:"desc"` | |||||
| } | |||||
| var ( | var ( | ||||
| // AppVer settings | // AppVer settings | ||||
| AppVer string | AppVer string | ||||
| @@ -195,10 +215,11 @@ var ( | |||||
| UseServiceWorker bool | UseServiceWorker bool | ||||
| Notification struct { | Notification struct { | ||||
| MinTimeout time.Duration | |||||
| TimeoutStep time.Duration | |||||
| MaxTimeout time.Duration | |||||
| EventSourceUpdateTime time.Duration | |||||
| MinTimeout time.Duration | |||||
| TimeoutStep time.Duration | |||||
| MaxTimeout time.Duration | |||||
| EventSourceUpdateTime time.Duration | |||||
| RewardNotifyUpdateTime time.Duration | |||||
| } `ini:"ui.notification"` | } `ini:"ui.notification"` | ||||
| Admin struct { | Admin struct { | ||||
| @@ -232,15 +253,17 @@ var ( | |||||
| Themes: []string{`gitea`, `arc-green`}, | Themes: []string{`gitea`, `arc-green`}, | ||||
| Reactions: []string{`+1`, `-1`, `laugh`, `hooray`, `confused`, `heart`, `rocket`, `eyes`}, | Reactions: []string{`+1`, `-1`, `laugh`, `hooray`, `confused`, `heart`, `rocket`, `eyes`}, | ||||
| Notification: struct { | Notification: struct { | ||||
| MinTimeout time.Duration | |||||
| TimeoutStep time.Duration | |||||
| MaxTimeout time.Duration | |||||
| EventSourceUpdateTime time.Duration | |||||
| MinTimeout time.Duration | |||||
| TimeoutStep time.Duration | |||||
| MaxTimeout time.Duration | |||||
| EventSourceUpdateTime time.Duration | |||||
| RewardNotifyUpdateTime time.Duration | |||||
| }{ | }{ | ||||
| MinTimeout: 10 * time.Second, | |||||
| TimeoutStep: 10 * time.Second, | |||||
| MaxTimeout: 60 * time.Second, | |||||
| EventSourceUpdateTime: 10 * time.Second, | |||||
| MinTimeout: 10 * time.Second, | |||||
| TimeoutStep: 10 * time.Second, | |||||
| MaxTimeout: 60 * time.Second, | |||||
| EventSourceUpdateTime: 10 * time.Second, | |||||
| RewardNotifyUpdateTime: 2 * time.Second, | |||||
| }, | }, | ||||
| Admin: struct { | Admin: struct { | ||||
| UserPagingNum int | UserPagingNum int | ||||
| @@ -536,20 +559,32 @@ var ( | |||||
| AllowedOrg string | AllowedOrg string | ||||
| ProfileID string | ProfileID string | ||||
| PoolInfos string | PoolInfos string | ||||
| Flavor string | |||||
| FlavorInfos string | |||||
| DebugHost string | DebugHost string | ||||
| ImageInfos string | ImageInfos string | ||||
| Capacity int | Capacity int | ||||
| MaxTempQueryTimes int | MaxTempQueryTimes int | ||||
| StFlavorInfo *StFlavorInfos | |||||
| StImageInfos *StImageInfosModelArts | |||||
| //train-job | //train-job | ||||
| ResourcePools string | ResourcePools string | ||||
| Engines string | Engines string | ||||
| EngineVersions string | EngineVersions string | ||||
| FlavorInfos string | |||||
| TrainJobFLAVORINFOS string | TrainJobFLAVORINFOS string | ||||
| ModelArtsSpecialPools string | ModelArtsSpecialPools string | ||||
| ModelArtsMultiNode string | ModelArtsMultiNode string | ||||
| // modelarts-cd config | |||||
| ModelartsCD = struct { | |||||
| Enabled bool | |||||
| EndPoint string | |||||
| ProjectID string | |||||
| AccessKey string | |||||
| SecretKey string | |||||
| ImageInfos string | |||||
| FlavorInfos string | |||||
| }{} | |||||
| //grampus config | //grampus config | ||||
| Grampus = struct { | Grampus = struct { | ||||
| Env string | Env string | ||||
| @@ -579,6 +614,13 @@ var ( | |||||
| WechatQRCodeExpireSeconds int | WechatQRCodeExpireSeconds int | ||||
| WechatAuthSwitch bool | WechatAuthSwitch bool | ||||
| //point config | |||||
| CloudBrainPaySwitch bool | |||||
| CloudBrainPayDelay time.Duration | |||||
| CloudBrainPayInterval time.Duration | |||||
| DeductTaskRange time.Duration | |||||
| DeductTaskRangeForFirst time.Duration | |||||
| //wechat auto reply config | //wechat auto reply config | ||||
| UserNameOfWechatReply string | UserNameOfWechatReply string | ||||
| RepoNameOfWechatReply string | RepoNameOfWechatReply string | ||||
| @@ -1424,9 +1466,8 @@ func NewContext() { | |||||
| AllowedOrg = sec.Key("ORGANIZATION").MustString("") | AllowedOrg = sec.Key("ORGANIZATION").MustString("") | ||||
| ProfileID = sec.Key("PROFILE_ID").MustString("") | ProfileID = sec.Key("PROFILE_ID").MustString("") | ||||
| PoolInfos = sec.Key("POOL_INFOS").MustString("") | PoolInfos = sec.Key("POOL_INFOS").MustString("") | ||||
| Flavor = sec.Key("FLAVOR").MustString("") | |||||
| ImageInfos = sec.Key("IMAGE_INFOS").MustString("") | ImageInfos = sec.Key("IMAGE_INFOS").MustString("") | ||||
| Capacity = sec.Key("IMAGE_INFOS").MustInt(100) | |||||
| Capacity = sec.Key("CAPACITY").MustInt(100) | |||||
| MaxTempQueryTimes = sec.Key("MAX_TEMP_QUERY_TIMES").MustInt(30) | MaxTempQueryTimes = sec.Key("MAX_TEMP_QUERY_TIMES").MustInt(30) | ||||
| ResourcePools = sec.Key("Resource_Pools").MustString("") | ResourcePools = sec.Key("Resource_Pools").MustString("") | ||||
| Engines = sec.Key("Engines").MustString("") | Engines = sec.Key("Engines").MustString("") | ||||
| @@ -1451,21 +1492,28 @@ func NewContext() { | |||||
| WechatAppId = sec.Key("APP_ID").MustString("wxba77b915a305a57d") | WechatAppId = sec.Key("APP_ID").MustString("wxba77b915a305a57d") | ||||
| WechatAppSecret = sec.Key("APP_SECRET").MustString("") | WechatAppSecret = sec.Key("APP_SECRET").MustString("") | ||||
| WechatQRCodeExpireSeconds = sec.Key("QR_CODE_EXPIRE_SECONDS").MustInt(120) | WechatQRCodeExpireSeconds = sec.Key("QR_CODE_EXPIRE_SECONDS").MustInt(120) | ||||
| WechatAuthSwitch = sec.Key("AUTH_SWITCH").MustBool(true) | |||||
| WechatAuthSwitch = sec.Key("AUTH_SWITCH").MustBool(false) | |||||
| UserNameOfWechatReply = sec.Key("AUTO_REPLY_USER_NAME").MustString("OpenIOSSG") | UserNameOfWechatReply = sec.Key("AUTO_REPLY_USER_NAME").MustString("OpenIOSSG") | ||||
| RepoNameOfWechatReply = sec.Key("AUTO_REPLY_REPO_NAME").MustString("promote") | RepoNameOfWechatReply = sec.Key("AUTO_REPLY_REPO_NAME").MustString("promote") | ||||
| RefNameOfWechatReply = sec.Key("AUTO_REPLY_REF_NAME").MustString("master") | RefNameOfWechatReply = sec.Key("AUTO_REPLY_REF_NAME").MustString("master") | ||||
| TreePathOfAutoMsgReply = sec.Key("AUTO_REPLY_TREE_PATH").MustString("wechat/auto_reply.json") | TreePathOfAutoMsgReply = sec.Key("AUTO_REPLY_TREE_PATH").MustString("wechat/auto_reply.json") | ||||
| TreePathOfSubscribe = sec.Key("SUBSCRIBE_TREE_PATH").MustString("wechat/subscribe_reply.json") | TreePathOfSubscribe = sec.Key("SUBSCRIBE_TREE_PATH").MustString("wechat/subscribe_reply.json") | ||||
| WechatAuthSwitch = sec.Key("AUTH_SWITCH").MustBool(false) | |||||
| CloudbrainStartedTemplateId = sec.Key("CLOUDBRAIN_STARTED_TEMPLATE_ID").MustString("") | CloudbrainStartedTemplateId = sec.Key("CLOUDBRAIN_STARTED_TEMPLATE_ID").MustString("") | ||||
| CloudbrainStartedNotifyList = strings.Split(sec.Key("CLOUDBRAIN_STARTED_NOTIFY_LIST").MustString("DEBUG"), ",") | CloudbrainStartedNotifyList = strings.Split(sec.Key("CLOUDBRAIN_STARTED_NOTIFY_LIST").MustString("DEBUG"), ",") | ||||
| CloudbrainStartedTitle = sec.Key("CLOUDBRAIN_STARTED_TITLE").MustString("您好,您提交的算力资源申请已通过,任务已启动,请您关注运行情况。") | CloudbrainStartedTitle = sec.Key("CLOUDBRAIN_STARTED_TITLE").MustString("您好,您提交的算力资源申请已通过,任务已启动,请您关注运行情况。") | ||||
| CloudbrainStartedRemark = sec.Key("CLOUDBRAIN_STARTED_REMARK").MustString("感谢您的耐心等待。") | CloudbrainStartedRemark = sec.Key("CLOUDBRAIN_STARTED_REMARK").MustString("感谢您的耐心等待。") | ||||
| CloudbrainStoppedTemplateId = sec.Key("CLOUDBRAIN_STOPPED_TEMPLATE_ID").MustString("") | CloudbrainStoppedTemplateId = sec.Key("CLOUDBRAIN_STOPPED_TEMPLATE_ID").MustString("") | ||||
| CloudbrainStoppedNotifyList = strings.Split(sec.Key("CLOUDBRAIN_STOPPED_NOTIFY_LIST").MustString("TRAIN"), ",") | CloudbrainStoppedNotifyList = strings.Split(sec.Key("CLOUDBRAIN_STOPPED_NOTIFY_LIST").MustString("TRAIN"), ",") | ||||
| CloudbrainStoppedTitle = sec.Key("CLOUDBRAIN_STOPPED_TITLE").MustString("您好,您申请的算力资源已结束使用,任务已完成运行,请您关注运行结果。") | |||||
| CloudbrainStoppedTitle = sec.Key("CLOUDBRAIN_STOPPED_TITLE").MustString("您好,您申请的算力资源已结束使用,任务已完成运行,状态为%s,请您关注运行结果") | |||||
| CloudbrainStoppedRemark = sec.Key("CLOUDBRAIN_STOPPED_REMARK").MustString("感谢您的耐心等待。") | CloudbrainStoppedRemark = sec.Key("CLOUDBRAIN_STOPPED_REMARK").MustString("感谢您的耐心等待。") | ||||
| sec = Cfg.Section("point") | |||||
| CloudBrainPaySwitch = sec.Key("CLOUDBRAIN_PAY_SWITCH").MustBool(false) | |||||
| CloudBrainPayDelay = sec.Key("CLOUDBRAIN_PAY_DELAY").MustDuration(30 * time.Minute) | |||||
| CloudBrainPayInterval = sec.Key("CLOUDBRAIN_PAY_INTERVAL").MustDuration(60 * time.Minute) | |||||
| DeductTaskRange = sec.Key("DEDUCT_TASK_RANGE").MustDuration(30 * time.Minute) | |||||
| DeductTaskRangeForFirst = sec.Key("DEDUCT_TASK_RANGE_FOR_FIRST").MustDuration(3 * time.Hour) | |||||
| SetRadarMapConfig() | SetRadarMapConfig() | ||||
| sec = Cfg.Section("warn_mail") | sec = Cfg.Section("warn_mail") | ||||
| @@ -1475,8 +1523,8 @@ func NewContext() { | |||||
| Course.OrgName = sec.Key("org_name").MustString("") | Course.OrgName = sec.Key("org_name").MustString("") | ||||
| Course.TeamName = sec.Key("team_name").MustString("") | Course.TeamName = sec.Key("team_name").MustString("") | ||||
| GetGrampusConfig() | |||||
| getGrampusConfig() | |||||
| getModelartsCDConfig() | |||||
| getModelConvertConfig() | getModelConvertConfig() | ||||
| } | } | ||||
| @@ -1499,7 +1547,22 @@ func getModelConvertConfig() { | |||||
| ModelConvert.NPU_TENSORFLOW_IMAGE_ID = sec.Key("NPU_TENSORFLOW_IMAGE_ID").MustInt(35) | ModelConvert.NPU_TENSORFLOW_IMAGE_ID = sec.Key("NPU_TENSORFLOW_IMAGE_ID").MustInt(35) | ||||
| } | } | ||||
| func GetGrampusConfig() { | |||||
| func getModelartsCDConfig() { | |||||
| sec := Cfg.Section("modelarts-cd") | |||||
| ModelartsCD.Enabled = sec.Key("ENABLED").MustBool(false) | |||||
| ModelartsCD.EndPoint = sec.Key("ENDPOINT").MustString("https://modelarts.cn-southwest-228.cdzs.cn") | |||||
| ModelartsCD.ProjectID = sec.Key("PROJECT_ID").MustString("") | |||||
| ModelartsCD.AccessKey = sec.Key("ACCESS_KEY").MustString("") | |||||
| ModelartsCD.SecretKey = sec.Key("SECRET_KEY").MustString("") | |||||
| ModelartsCD.ImageInfos = sec.Key("IMAGE_INFOS").MustString("") | |||||
| ModelartsCD.FlavorInfos = sec.Key("FLAVOR_INFOS").MustString("") | |||||
| getNotebookImageInfos() | |||||
| getNotebookFlavorInfos() | |||||
| } | |||||
| func getGrampusConfig() { | |||||
| sec := Cfg.Section("grampus") | sec := Cfg.Section("grampus") | ||||
| Grampus.Env = sec.Key("ENV").MustString("TEST") | Grampus.Env = sec.Key("ENV").MustString("TEST") | ||||
| @@ -1633,6 +1696,26 @@ func ensureLFSDirectory() { | |||||
| } | } | ||||
| } | } | ||||
| func getNotebookImageInfos() { | |||||
| if StImageInfos == nil { | |||||
| if ModelartsCD.Enabled { | |||||
| json.Unmarshal([]byte(ModelartsCD.ImageInfos), &StImageInfos) | |||||
| } else { | |||||
| json.Unmarshal([]byte(ImageInfos), &StImageInfos) | |||||
| } | |||||
| } | |||||
| } | |||||
| func getNotebookFlavorInfos() { | |||||
| if StFlavorInfo == nil { | |||||
| if ModelartsCD.Enabled { | |||||
| json.Unmarshal([]byte(ModelartsCD.FlavorInfos), &StFlavorInfo) | |||||
| } else { | |||||
| json.Unmarshal([]byte(FlavorInfos), &StFlavorInfo) | |||||
| } | |||||
| } | |||||
| } | |||||
| // NewServices initializes the services | // NewServices initializes the services | ||||
| func NewServices() { | func NewServices() { | ||||
| InitDBConfig() | InitDBConfig() | ||||
| @@ -791,7 +791,7 @@ func GetRefName(ref string) string { | |||||
| return reg.ReplaceAllString(ref, "") | return reg.ReplaceAllString(ref, "") | ||||
| } | } | ||||
| func MB2GB(size int64) string { | |||||
| func MB2GB(size int) string { | |||||
| s := strconv.FormatFloat(float64(size)/float64(1024), 'f', 2, 64) | s := strconv.FormatFloat(float64(size)/float64(1024), 'f', 2, 64) | ||||
| for strings.HasSuffix(s, "0") { | for strings.HasSuffix(s, "0") { | ||||
| s = strings.TrimSuffix(s, "0") | s = strings.TrimSuffix(s, "0") | ||||
| @@ -0,0 +1,10 @@ | |||||
| package util | |||||
| import ( | |||||
| gouuid "github.com/satori/go.uuid" | |||||
| "strings" | |||||
| ) | |||||
| func UUID() string { | |||||
| return strings.ReplaceAll(gouuid.NewV4().String(), "-", "") | |||||
| } | |||||
| @@ -23,6 +23,7 @@ signed_in_as = Signed in as | |||||
| enable_javascript = This website works better with JavaScript. | enable_javascript = This website works better with JavaScript. | ||||
| toc = Table of Contents | toc = Table of Contents | ||||
| return=Back OpenI | return=Back OpenI | ||||
| calculation_points = Calculation Points | |||||
| username = Username | username = Username | ||||
| email = Email Address | email = Email Address | ||||
| @@ -1059,7 +1060,7 @@ image_delete_fail=Failed to delete image, please try again later. | |||||
| image_overwrite=You had submitted the same name image before, are you sure to overwrite the original image? | image_overwrite=You had submitted the same name image before, are you sure to overwrite the original image? | ||||
| download=Download | download=Download | ||||
| score=Score | score=Score | ||||
| wait_count_start = There are currently | |||||
| wait_count_start = There are currently | |||||
| wait_count_end = tasks queued | wait_count_end = tasks queued | ||||
| file_limit_100 = Display up to 100 files or folders in a single directory | file_limit_100 = Display up to 100 files or folders in a single directory | ||||
| images.name = Image Tag | images.name = Image Tag | ||||
| @@ -1083,6 +1084,7 @@ balance.total_view = Total Balance | |||||
| balance.available = Available Balance: | balance.available = Available Balance: | ||||
| cloudbrain1 = cloudbrain1 | cloudbrain1 = cloudbrain1 | ||||
| cloudbrain2 = cloudbrain2 | cloudbrain2 = cloudbrain2 | ||||
| cdCenter = cd_ai_center | |||||
| cloudbrain_selection = select cloudbrain | cloudbrain_selection = select cloudbrain | ||||
| cloudbrain_platform_selection = Select the cloudbrain platform you want to use: | cloudbrain_platform_selection = Select the cloudbrain platform you want to use: | ||||
| confirm_choice = Confirm | confirm_choice = Confirm | ||||
| @@ -1269,7 +1271,7 @@ model.manage.modellabel=Model label | |||||
| model.manage.modeldesc=Model description | model.manage.modeldesc=Model description | ||||
| model.manage.baseinfo=Base Information | model.manage.baseinfo=Base Information | ||||
| modelconvert.notcreate=No model conversion task has been created. | modelconvert.notcreate=No model conversion task has been created. | ||||
| modelconvert.importfirst1=Please import the | |||||
| modelconvert.importfirst1=Please import the | |||||
| modelconvert.importfirst2=model | modelconvert.importfirst2=model | ||||
| modelconvert.importfirst3=first, then converts it. | modelconvert.importfirst3=first, then converts it. | ||||
| modelconvert.download=Download | modelconvert.download=Download | ||||
| @@ -3210,6 +3212,9 @@ gpu_num = GPU | |||||
| cpu_num = CPU | cpu_num = CPU | ||||
| memory = Memory | memory = Memory | ||||
| shared_memory = Shared Memory | shared_memory = Shared Memory | ||||
| gpu_memory = GPU Memory | |||||
| free = Free | |||||
| point_hr = Point/hr | |||||
| DEBUG = DEBUG | DEBUG = DEBUG | ||||
| @@ -3229,4 +3234,14 @@ load_code_failed=Fail to load code, please check if the right branch is selected | |||||
| error.dataset_select = dataset select error:the count exceed the limit or has same name | error.dataset_select = dataset select error:the count exceed the limit or has same name | ||||
| new_train_gpu_tooltips = The code is storaged in <strong style="color:#010101">%s</strong>, the dataset is storaged in <strong style="color:#010101">%s</strong>, and please put your model into <strong style="color:#010101">%s</strong> then you can download it online | new_train_gpu_tooltips = The code is storaged in <strong style="color:#010101">%s</strong>, the dataset is storaged in <strong style="color:#010101">%s</strong>, and please put your model into <strong style="color:#010101">%s</strong> then you can download it online | ||||
| new_infer_gpu_tooltips = The dataset is stored in <strong style="color:#010101">%s</strong>, the model file is stored in <strong style="color:#010101">%s</strong>, please store the inference output in <strong style="color:#010101">%s</strong> for subsequent downloads. | |||||
| new_infer_gpu_tooltips = The dataset is stored in <strong style="color:#010101">%s</strong>, the model file is stored in <strong style="color:#010101">%s</strong>, please store the inference output in <strong style="color:#010101">%s</strong> for subsequent downloads. | |||||
| [points] | |||||
| points = points | |||||
| free = Free | |||||
| points_hour = Points/hour | |||||
| balance_of_points = Balance of Points: | |||||
| hours = Hours | |||||
| expected_time = , expected to be available for | |||||
| points_acquisition_instructions = Points Acquisition Instructions | |||||
| insufficient_points_balance = Insufficient points balance | |||||
| @@ -23,6 +23,7 @@ signed_in_as=已登录用户 | |||||
| enable_javascript=使用 JavaScript能使本网站更好的工作。 | enable_javascript=使用 JavaScript能使本网站更好的工作。 | ||||
| toc=目录 | toc=目录 | ||||
| return=返回OpenI | return=返回OpenI | ||||
| calculation_points=算力积分 | |||||
| username=用户名 | username=用户名 | ||||
| email=电子邮件地址 | email=电子邮件地址 | ||||
| @@ -1084,6 +1085,7 @@ balance.total_view=余额总览 | |||||
| balance.available=可用余额: | balance.available=可用余额: | ||||
| cloudbrain1=云脑1 | cloudbrain1=云脑1 | ||||
| cloudbrain2=云脑2 | cloudbrain2=云脑2 | ||||
| cdCenter=成都智算中心 | |||||
| intelligent_net=智算网络 | intelligent_net=智算网络 | ||||
| cloudbrain_selection=云脑选择 | cloudbrain_selection=云脑选择 | ||||
| cloudbrain_platform_selection=选择您准备使用的云脑平台: | cloudbrain_platform_selection=选择您准备使用的云脑平台: | ||||
| @@ -3229,6 +3231,9 @@ gpu_num = GPU数 | |||||
| cpu_num = CPU数 | cpu_num = CPU数 | ||||
| memory = 内存 | memory = 内存 | ||||
| shared_memory = 共享内存 | shared_memory = 共享内存 | ||||
| gpu_memory = 显存 | |||||
| free = 免费 | |||||
| point_hr = 积分/时 | |||||
| DEBUG = 调试任务 | DEBUG = 调试任务 | ||||
| SNN4IMAGENET = 评测任务 | SNN4IMAGENET = 评测任务 | ||||
| @@ -3247,5 +3252,16 @@ load_code_failed=代码加载失败,请确认选择了正确的分支。 | |||||
| error.dataset_select = 数据集选择错误:数量超过限制或者有同名数据集 | error.dataset_select = 数据集选择错误:数量超过限制或者有同名数据集 | ||||
| [points] | |||||
| points = 积分 | |||||
| free = 免费 | |||||
| points_hour = 积分/每小时 | |||||
| balance_of_points = 积分余额: | |||||
| hours = 小时 | |||||
| expected_time = ,预计可用 | |||||
| points_acquisition_instructions = 积分获取说明 | |||||
| insufficient_points_balance = 积分余额不足 | |||||
| new_train_gpu_tooltips =训练脚本存储在<strong style="color:#010101">%s</strong>中,数据集存储在<strong style="color:#010101">%s</strong>中,训练输出请存储在<strong style="color:#010101">%s</strong>中以供后续下载。 | new_train_gpu_tooltips =训练脚本存储在<strong style="color:#010101">%s</strong>中,数据集存储在<strong style="color:#010101">%s</strong>中,训练输出请存储在<strong style="color:#010101">%s</strong>中以供后续下载。 | ||||
| new_infer_gpu_tooltips = 数据集存储在<strong style="color:#010101">%s</strong>中,模型文件存储在<strong style="color:#010101">%s</strong>中,推理输出请存储在<strong style="color:#010101">%s</strong>中以供后续下载。 | |||||
| new_infer_gpu_tooltips = 数据集存储在<strong style="color:#010101">%s</strong>中,模型文件存储在<strong style="color:#010101">%s</strong>中,推理输出请存储在<strong style="color:#010101">%s</strong>中以供后续下载。 | |||||
| @@ -80,4 +80,4 @@ | |||||
| "browserslist": [ | "browserslist": [ | ||||
| "defaults" | "defaults" | ||||
| ] | ] | ||||
| } | |||||
| } | |||||
| @@ -92,13 +92,13 @@ func CloudBrains(ctx *context.Context) { | |||||
| return | return | ||||
| } | } | ||||
| models.LoadSpecs4CloudbrainInfo(ciTasks) | |||||
| for i, task := range ciTasks { | for i, task := range ciTasks { | ||||
| ciTasks[i].CanDebug = true | ciTasks[i].CanDebug = true | ||||
| ciTasks[i].CanDel = true | ciTasks[i].CanDel = true | ||||
| ciTasks[i].Cloudbrain.ComputeResource = task.ComputeResource | ciTasks[i].Cloudbrain.ComputeResource = task.ComputeResource | ||||
| ciTasks[i].Cloudbrain.AiCenter = repo.GetCloudbrainAiCenter(task.Cloudbrain, ctx) | ciTasks[i].Cloudbrain.AiCenter = repo.GetCloudbrainAiCenter(task.Cloudbrain, ctx) | ||||
| _, cardType, _ := repo.GetCloudbrainCardNumAndType(task.Cloudbrain) | |||||
| ciTasks[i].Cloudbrain.CardType = cardType | |||||
| ciTasks[i].Cloudbrain.Cluster = repo.GetCloudbrainCluster(task.Cloudbrain, ctx) | ciTasks[i].Cloudbrain.Cluster = repo.GetCloudbrainCluster(task.Cloudbrain, ctx) | ||||
| } | } | ||||
| @@ -1,6 +1,7 @@ | |||||
| package admin | package admin | ||||
| import ( | import ( | ||||
| "code.gitea.io/gitea/modules/notification" | |||||
| "net/http" | "net/http" | ||||
| "strconv" | "strconv" | ||||
| "strings" | "strings" | ||||
| @@ -111,6 +112,8 @@ func DatasetAction(ctx *context.Context) { | |||||
| if err != nil { | if err != nil { | ||||
| ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("repo.star_fail", ctx.Params(":action")))) | ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("repo.star_fail", ctx.Params(":action")))) | ||||
| } else { | } else { | ||||
| d, _ := models.GetDatasetByID(datasetId) | |||||
| notification.NotifyDatasetRecommend(ctx.User, d, ctx.Params(":action")) | |||||
| ctx.JSON(http.StatusOK, models.BaseOKMessage) | ctx.JSON(http.StatusOK, models.BaseOKMessage) | ||||
| } | } | ||||
| } | } | ||||
| @@ -8,6 +8,8 @@ import ( | |||||
| "code.gitea.io/gitea/routers/response" | "code.gitea.io/gitea/routers/response" | ||||
| "code.gitea.io/gitea/services/cloudbrain/resource" | "code.gitea.io/gitea/services/cloudbrain/resource" | ||||
| "net/http" | "net/http" | ||||
| "strconv" | |||||
| "strings" | |||||
| ) | ) | ||||
| const ( | const ( | ||||
| @@ -118,11 +120,13 @@ func GetResourceSpecificationList(ctx *context.Context) { | |||||
| queue := ctx.QueryInt64("queue") | queue := ctx.QueryInt64("queue") | ||||
| status := ctx.QueryInt("status") | status := ctx.QueryInt("status") | ||||
| cluster := ctx.Query("cluster") | cluster := ctx.Query("cluster") | ||||
| available := ctx.QueryInt("available") | |||||
| list, err := resource.GetResourceSpecificationList(models.SearchResourceSpecificationOptions{ | list, err := resource.GetResourceSpecificationList(models.SearchResourceSpecificationOptions{ | ||||
| ListOptions: models.ListOptions{Page: page, PageSize: 10}, | |||||
| QueueId: queue, | |||||
| Status: status, | |||||
| Cluster: cluster, | |||||
| ListOptions: models.ListOptions{Page: page, PageSize: 10}, | |||||
| QueueId: queue, | |||||
| Status: status, | |||||
| Cluster: cluster, | |||||
| AvailableCode: available, | |||||
| }) | }) | ||||
| if err != nil { | if err != nil { | ||||
| log.Error("GetResourceSpecificationList error.%v", err) | log.Error("GetResourceSpecificationList error.%v", err) | ||||
| @@ -246,3 +250,37 @@ func UpdateResourceScene(ctx *context.Context, req models.ResourceSceneReq) { | |||||
| } | } | ||||
| ctx.JSON(http.StatusOK, response.Success()) | ctx.JSON(http.StatusOK, response.Success()) | ||||
| } | } | ||||
| func RefreshHistorySpec(ctx *context.Context) { | |||||
| scope := ctx.Query("scope") | |||||
| list := ctx.Query("list") | |||||
| var scopeAll = false | |||||
| if scope == "all" { | |||||
| scopeAll = true | |||||
| } | |||||
| var ids = make([]int64, 0) | |||||
| if list != "" { | |||||
| strs := strings.Split(list, "|") | |||||
| for _, s := range strs { | |||||
| i, err := strconv.ParseInt(s, 10, 64) | |||||
| if err != nil { | |||||
| ctx.JSON(http.StatusOK, response.ServerError(err.Error())) | |||||
| return | |||||
| } | |||||
| ids = append(ids, i) | |||||
| } | |||||
| } | |||||
| total, success, err := resource.RefreshHistorySpec(scopeAll, ids) | |||||
| if err != nil { | |||||
| log.Error("RefreshHistorySpec error. %v", err) | |||||
| ctx.JSON(http.StatusOK, response.ServerError(err.Error())) | |||||
| return | |||||
| } | |||||
| r := make(map[string]interface{}, 0) | |||||
| r["success"] = success | |||||
| r["total"] = total | |||||
| ctx.JSON(http.StatusOK, response.SuccessWithData(r)) | |||||
| } | |||||
| @@ -31,6 +31,7 @@ func GetQRCode4Bind(ctx *context.Context) { | |||||
| r, err := createQRCode4Bind(userId) | r, err := createQRCode4Bind(userId) | ||||
| if err != nil { | if err != nil { | ||||
| log.Error("GetQRCode4Bind failed,error=%v", err) | |||||
| ctx.JSON(200, map[string]interface{}{ | ctx.JSON(200, map[string]interface{}{ | ||||
| "code": "9999", | "code": "9999", | ||||
| "msg": "Get QR code failed", | "msg": "Get QR code failed", | ||||
| @@ -1,9 +1,9 @@ | |||||
| package authentication | package authentication | ||||
| import ( | import ( | ||||
| "code.gitea.io/gitea/modules/auth/wechat" | |||||
| "code.gitea.io/gitea/modules/context" | "code.gitea.io/gitea/modules/context" | ||||
| "code.gitea.io/gitea/modules/log" | "code.gitea.io/gitea/modules/log" | ||||
| wechat "code.gitea.io/gitea/services/wechat" | |||||
| "encoding/xml" | "encoding/xml" | ||||
| "io/ioutil" | "io/ioutil" | ||||
| "time" | "time" | ||||
| @@ -1,6 +1,7 @@ | |||||
| package image | package image | ||||
| import ( | import ( | ||||
| "code.gitea.io/gitea/modules/notification" | |||||
| "net/http" | "net/http" | ||||
| "strconv" | "strconv" | ||||
| @@ -25,6 +26,10 @@ func Action(ctx *context.Context) { | |||||
| if err != nil { | if err != nil { | ||||
| ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("repo.star_fail", ctx.Params(":action")))) | ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("repo.star_fail", ctx.Params(":action")))) | ||||
| } else { | } else { | ||||
| image, err := models.GetImageByID(imageId) | |||||
| if err == nil { | |||||
| notification.NotifyImageRecommend(ctx.User, image, ctx.Params(":action")) | |||||
| } | |||||
| ctx.JSON(http.StatusOK, models.BaseOKMessage) | ctx.JSON(http.StatusOK, models.BaseOKMessage) | ||||
| } | } | ||||
| } | } | ||||
| @@ -6,6 +6,7 @@ | |||||
| package private | package private | ||||
| import ( | import ( | ||||
| "code.gitea.io/gitea/routers/admin" | |||||
| "strings" | "strings" | ||||
| "code.gitea.io/gitea/routers/repo" | "code.gitea.io/gitea/routers/repo" | ||||
| @@ -51,6 +52,7 @@ func RegisterRoutes(m *macaron.Macaron) { | |||||
| m.Get("/tool/org_stat", OrgStatisticManually) | m.Get("/tool/org_stat", OrgStatisticManually) | ||||
| m.Post("/tool/update_repo_visit/:date", UpdateRepoVisit) | m.Post("/tool/update_repo_visit/:date", UpdateRepoVisit) | ||||
| m.Post("/task/history_handle/duration", repo.HandleTaskWithNoDuration) | m.Post("/task/history_handle/duration", repo.HandleTaskWithNoDuration) | ||||
| m.Post("/resources/specification/handle_historical_task", admin.RefreshHistorySpec) | |||||
| }, CheckInternalToken) | }, CheckInternalToken) | ||||
| } | } | ||||
| @@ -2,6 +2,8 @@ package repo | |||||
| import ( | import ( | ||||
| "bufio" | "bufio" | ||||
| "code.gitea.io/gitea/services/cloudbrain/resource" | |||||
| "code.gitea.io/gitea/services/reward/point/account" | |||||
| "encoding/json" | "encoding/json" | ||||
| "errors" | "errors" | ||||
| "fmt" | "fmt" | ||||
| @@ -121,86 +123,7 @@ func cloudBrainNewDataPrepare(ctx *context.Context) error { | |||||
| ctx.Data["QueuesDetail"] = queuesDetail | ctx.Data["QueuesDetail"] = queuesDetail | ||||
| } | } | ||||
| cloudbrain.InitSpecialPool() | |||||
| if gpuInfos == nil { | |||||
| json.Unmarshal([]byte(setting.GpuTypes), &gpuInfos) | |||||
| } | |||||
| ctx.Data["gpu_types"] = gpuInfos.GpuInfo | |||||
| if trainGpuInfos == nil { | |||||
| json.Unmarshal([]byte(setting.TrainGpuTypes), &trainGpuInfos) | |||||
| } | |||||
| ctx.Data["train_gpu_types"] = trainGpuInfos.GpuInfo | |||||
| if inferenceGpuInfos == nil && setting.InferenceGpuTypes != "" { | |||||
| json.Unmarshal([]byte(setting.InferenceGpuTypes), &inferenceGpuInfos) | |||||
| } | |||||
| if inferenceGpuInfos != nil { | |||||
| ctx.Data["inference_gpu_types"] = inferenceGpuInfos.GpuInfo | |||||
| } | |||||
| if benchmarkGpuInfos == nil { | |||||
| json.Unmarshal([]byte(setting.BenchmarkGpuTypes), &benchmarkGpuInfos) | |||||
| } | |||||
| ctx.Data["benchmark_gpu_types"] = benchmarkGpuInfos.GpuInfo | |||||
| if benchmarkResourceSpecs == nil { | |||||
| json.Unmarshal([]byte(setting.BenchmarkResourceSpecs), &benchmarkResourceSpecs) | |||||
| } | |||||
| ctx.Data["benchmark_resource_specs"] = benchmarkResourceSpecs.ResourceSpec | |||||
| if cloudbrain.ResourceSpecs == nil { | |||||
| json.Unmarshal([]byte(setting.ResourceSpecs), &cloudbrain.ResourceSpecs) | |||||
| } | |||||
| ctx.Data["resource_specs"] = cloudbrain.ResourceSpecs.ResourceSpec | |||||
| if cloudbrain.TrainResourceSpecs == nil { | |||||
| json.Unmarshal([]byte(setting.TrainResourceSpecs), &cloudbrain.TrainResourceSpecs) | |||||
| } | |||||
| ctx.Data["train_resource_specs"] = cloudbrain.TrainResourceSpecs.ResourceSpec | |||||
| if cloudbrain.InferenceResourceSpecs == nil && setting.InferenceResourceSpecs != "" { | |||||
| json.Unmarshal([]byte(setting.InferenceResourceSpecs), &cloudbrain.InferenceResourceSpecs) | |||||
| } | |||||
| if cloudbrain.InferenceResourceSpecs != nil { | |||||
| ctx.Data["inference_resource_specs"] = cloudbrain.InferenceResourceSpecs.ResourceSpec | |||||
| } | |||||
| if cloudbrain.SpecialPools != nil { | |||||
| var debugGpuTypes []*models.GpuInfo | |||||
| var trainGpuTypes []*models.GpuInfo | |||||
| for _, pool := range cloudbrain.SpecialPools.Pools { | |||||
| isOrgMember, _ := models.IsOrganizationMemberByOrgName(pool.Org, ctx.User.ID) | |||||
| if isOrgMember { | |||||
| for _, jobType := range pool.JobType { | |||||
| if jobType == string(models.JobTypeDebug) { | |||||
| debugGpuTypes = append(debugGpuTypes, pool.Pool...) | |||||
| if pool.ResourceSpec != nil { | |||||
| ctx.Data["resource_specs"] = pool.ResourceSpec | |||||
| } | |||||
| } else if jobType == string(models.JobTypeTrain) { | |||||
| trainGpuTypes = append(trainGpuTypes, pool.Pool...) | |||||
| if pool.ResourceSpec != nil { | |||||
| ctx.Data["train_resource_specs"] = pool.ResourceSpec | |||||
| } | |||||
| } | |||||
| } | |||||
| break | |||||
| } | |||||
| } | |||||
| if len(debugGpuTypes) > 0 { | |||||
| ctx.Data["gpu_types"] = debugGpuTypes | |||||
| } | |||||
| if len(trainGpuTypes) > 0 { | |||||
| ctx.Data["train_gpu_types"] = trainGpuTypes | |||||
| } | |||||
| } | |||||
| prepareCloudbrainOneSpecs(ctx) | |||||
| ctx.Data["params"] = "" | ctx.Data["params"] = "" | ||||
| ctx.Data["branchName"] = ctx.Repo.BranchName | ctx.Data["branchName"] = ctx.Repo.BranchName | ||||
| @@ -218,6 +141,40 @@ func cloudBrainNewDataPrepare(ctx *context.Context) error { | |||||
| return nil | return nil | ||||
| } | } | ||||
| func prepareCloudbrainOneSpecs(ctx *context.Context) { | |||||
| debugSpecs, _ := resource.FindAvailableSpecs(ctx.User.ID, models.FindSpecsOptions{ | |||||
| JobType: models.JobTypeDebug, | |||||
| ComputeResource: models.GPU, | |||||
| Cluster: models.OpenICluster, | |||||
| AiCenterCode: models.AICenterOfCloudBrainOne, | |||||
| }) | |||||
| ctx.Data["debug_specs"] = debugSpecs | |||||
| trainSpecs, _ := resource.FindAvailableSpecs(ctx.User.ID, models.FindSpecsOptions{ | |||||
| JobType: models.JobTypeTrain, | |||||
| ComputeResource: models.GPU, | |||||
| Cluster: models.OpenICluster, | |||||
| AiCenterCode: models.AICenterOfCloudBrainOne, | |||||
| }) | |||||
| ctx.Data["train_specs"] = trainSpecs | |||||
| inferenceSpecs, _ := resource.FindAvailableSpecs(ctx.User.ID, models.FindSpecsOptions{ | |||||
| JobType: models.JobTypeInference, | |||||
| ComputeResource: models.GPU, | |||||
| Cluster: models.OpenICluster, | |||||
| AiCenterCode: models.AICenterOfCloudBrainOne, | |||||
| }) | |||||
| ctx.Data["inference_specs"] = inferenceSpecs | |||||
| benchmarkSpecs, _ := resource.FindAvailableSpecs(ctx.User.ID, models.FindSpecsOptions{ | |||||
| JobType: models.JobTypeBenchmark, | |||||
| ComputeResource: models.GPU, | |||||
| Cluster: models.OpenICluster, | |||||
| AiCenterCode: models.AICenterOfCloudBrainOne, | |||||
| }) | |||||
| ctx.Data["benchmark_specs"] = benchmarkSpecs | |||||
| } | |||||
| func CloudBrainNew(ctx *context.Context) { | func CloudBrainNew(ctx *context.Context) { | ||||
| err := cloudBrainNewDataPrepare(ctx) | err := cloudBrainNewDataPrepare(ctx) | ||||
| if err != nil { | if err != nil { | ||||
| @@ -235,9 +192,7 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { | |||||
| image := strings.TrimSpace(form.Image) | image := strings.TrimSpace(form.Image) | ||||
| uuids := form.Attachment | uuids := form.Attachment | ||||
| jobType := form.JobType | jobType := form.JobType | ||||
| gpuQueue := form.GpuType | |||||
| codePath := setting.JobPath + jobName + cloudbrain.CodeMountPath | codePath := setting.JobPath + jobName + cloudbrain.CodeMountPath | ||||
| resourceSpecId := form.ResourceSpecId | |||||
| branchName := form.BranchName | branchName := form.BranchName | ||||
| bootFile := strings.TrimSpace(form.BootFile) | bootFile := strings.TrimSpace(form.BootFile) | ||||
| repo := ctx.Repo.Repository | repo := ctx.Repo.Repository | ||||
| @@ -325,18 +280,10 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { | |||||
| command = commandTrain | command = commandTrain | ||||
| } | } | ||||
| errStr := checkCloudBrainSpecialPool(ctx, jobType, gpuQueue, resourceSpecId) | |||||
| if errStr != "" { | |||||
| cloudBrainNewDataPrepare(ctx) | |||||
| ctx.RenderWithErr(errStr, tpl, &form) | |||||
| return | |||||
| } | |||||
| if branchName == "" { | if branchName == "" { | ||||
| branchName = cloudbrain.DefaultBranchName | branchName = cloudbrain.DefaultBranchName | ||||
| } | } | ||||
| errStr = loadCodeAndMakeModelPath(repo, codePath, branchName, jobName, cloudbrain.ModelMountPath) | |||||
| errStr := loadCodeAndMakeModelPath(repo, codePath, branchName, jobName, cloudbrain.ModelMountPath) | |||||
| if errStr != "" { | if errStr != "" { | ||||
| cloudBrainNewDataPrepare(ctx) | cloudBrainNewDataPrepare(ctx) | ||||
| ctx.RenderWithErr(ctx.Tr(errStr), tpl, &form) | ctx.RenderWithErr(ctx.Tr(errStr), tpl, &form) | ||||
| @@ -345,6 +292,24 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { | |||||
| commitID, _ := ctx.Repo.GitRepo.GetBranchCommitID(branchName) | commitID, _ := ctx.Repo.GitRepo.GetBranchCommitID(branchName) | ||||
| spec, err := resource.GetAndCheckSpec(ctx.User.ID, form.SpecId, models.FindSpecsOptions{ | |||||
| JobType: models.JobType(jobType), | |||||
| ComputeResource: models.GPU, | |||||
| Cluster: models.OpenICluster, | |||||
| AiCenterCode: models.AICenterOfCloudBrainOne}) | |||||
| if err != nil || spec == nil { | |||||
| cloudBrainNewDataPrepare(ctx) | |||||
| ctx.RenderWithErr("Resource specification not available", tpl, &form) | |||||
| return | |||||
| } | |||||
| if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) { | |||||
| log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID) | |||||
| cloudBrainNewDataPrepare(ctx) | |||||
| ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tpl, &form) | |||||
| return | |||||
| } | |||||
| req := cloudbrain.GenerateCloudBrainTaskReq{ | req := cloudbrain.GenerateCloudBrainTaskReq{ | ||||
| Ctx: ctx, | Ctx: ctx, | ||||
| DisplayJobName: displayJobName, | DisplayJobName: displayJobName, | ||||
| @@ -360,7 +325,6 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { | |||||
| Snn4ImageNetPath: storage.GetMinioPath(jobName, cloudbrain.Snn4imagenetMountPath+"/"), | Snn4ImageNetPath: storage.GetMinioPath(jobName, cloudbrain.Snn4imagenetMountPath+"/"), | ||||
| BrainScorePath: storage.GetMinioPath(jobName, cloudbrain.BrainScoreMountPath+"/"), | BrainScorePath: storage.GetMinioPath(jobName, cloudbrain.BrainScoreMountPath+"/"), | ||||
| JobType: jobType, | JobType: jobType, | ||||
| GpuQueue: gpuQueue, | |||||
| Description: form.Description, | Description: form.Description, | ||||
| BranchName: branchName, | BranchName: branchName, | ||||
| BootFile: form.BootFile, | BootFile: form.BootFile, | ||||
| @@ -368,8 +332,8 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { | |||||
| CommitID: commitID, | CommitID: commitID, | ||||
| BenchmarkTypeID: 0, | BenchmarkTypeID: 0, | ||||
| BenchmarkChildTypeID: 0, | BenchmarkChildTypeID: 0, | ||||
| ResourceSpecId: resourceSpecId, | |||||
| ResultPath: storage.GetMinioPath(jobName, cloudbrain.ResultPath+"/"), | ResultPath: storage.GetMinioPath(jobName, cloudbrain.ResultPath+"/"), | ||||
| Spec: spec, | |||||
| } | } | ||||
| err = cloudbrain.GenerateTask(req) | err = cloudbrain.GenerateTask(req) | ||||
| @@ -417,9 +381,7 @@ func CloudBrainInferenceJobCreate(ctx *context.Context, form auth.CreateCloudBra | |||||
| image := strings.TrimSpace(form.Image) | image := strings.TrimSpace(form.Image) | ||||
| uuid := form.Attachment | uuid := form.Attachment | ||||
| jobType := string(models.JobTypeInference) | jobType := string(models.JobTypeInference) | ||||
| gpuQueue := form.GpuType | |||||
| codePath := setting.JobPath + jobName + cloudbrain.CodeMountPath | codePath := setting.JobPath + jobName + cloudbrain.CodeMountPath | ||||
| resourceSpecId := form.ResourceSpecId | |||||
| branchName := form.BranchName | branchName := form.BranchName | ||||
| bootFile := strings.TrimSpace(form.BootFile) | bootFile := strings.TrimSpace(form.BootFile) | ||||
| labelName := form.LabelName | labelName := form.LabelName | ||||
| @@ -501,7 +463,22 @@ func CloudBrainInferenceJobCreate(ctx *context.Context, form auth.CreateCloudBra | |||||
| ctx.RenderWithErr(ctx.Tr("cloudbrain.error.dataset_select"), tpl, &form) | ctx.RenderWithErr(ctx.Tr("cloudbrain.error.dataset_select"), tpl, &form) | ||||
| return | return | ||||
| } | } | ||||
| spec, err := resource.GetAndCheckSpec(ctx.User.ID, form.SpecId, models.FindSpecsOptions{ | |||||
| JobType: models.JobTypeInference, | |||||
| ComputeResource: models.GPU, | |||||
| Cluster: models.OpenICluster, | |||||
| AiCenterCode: models.AICenterOfCloudBrainOne}) | |||||
| if err != nil || spec == nil { | |||||
| cloudBrainNewDataPrepare(ctx) | |||||
| ctx.RenderWithErr("Resource specification not available", tpl, &form) | |||||
| return | |||||
| } | |||||
| if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) { | |||||
| log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID) | |||||
| cloudBrainNewDataPrepare(ctx) | |||||
| ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tpl, &form) | |||||
| return | |||||
| } | |||||
| req := cloudbrain.GenerateCloudBrainTaskReq{ | req := cloudbrain.GenerateCloudBrainTaskReq{ | ||||
| Ctx: ctx, | Ctx: ctx, | ||||
| DisplayJobName: displayJobName, | DisplayJobName: displayJobName, | ||||
| @@ -517,19 +494,18 @@ func CloudBrainInferenceJobCreate(ctx *context.Context, form auth.CreateCloudBra | |||||
| Snn4ImageNetPath: storage.GetMinioPath(jobName, cloudbrain.Snn4imagenetMountPath+"/"), | Snn4ImageNetPath: storage.GetMinioPath(jobName, cloudbrain.Snn4imagenetMountPath+"/"), | ||||
| BrainScorePath: storage.GetMinioPath(jobName, cloudbrain.BrainScoreMountPath+"/"), | BrainScorePath: storage.GetMinioPath(jobName, cloudbrain.BrainScoreMountPath+"/"), | ||||
| JobType: jobType, | JobType: jobType, | ||||
| GpuQueue: gpuQueue, | |||||
| Description: form.Description, | Description: form.Description, | ||||
| BranchName: branchName, | BranchName: branchName, | ||||
| BootFile: form.BootFile, | BootFile: form.BootFile, | ||||
| Params: form.Params, | Params: form.Params, | ||||
| CommitID: commitID, | CommitID: commitID, | ||||
| ResourceSpecId: resourceSpecId, | |||||
| ResultPath: storage.GetMinioPath(jobName, cloudbrain.ResultPath+"/"), | ResultPath: storage.GetMinioPath(jobName, cloudbrain.ResultPath+"/"), | ||||
| ModelName: form.ModelName, | ModelName: form.ModelName, | ||||
| ModelVersion: form.ModelVersion, | ModelVersion: form.ModelVersion, | ||||
| CkptName: form.CkptName, | CkptName: form.CkptName, | ||||
| TrainUrl: form.TrainUrl, | TrainUrl: form.TrainUrl, | ||||
| LabelName: labelName, | LabelName: labelName, | ||||
| Spec: spec, | |||||
| } | } | ||||
| err = cloudbrain.GenerateTask(req) | err = cloudbrain.GenerateTask(req) | ||||
| @@ -607,32 +583,30 @@ func CloudBrainRestart(ctx *context.Context) { | |||||
| break | break | ||||
| } | } | ||||
| var hasSameResource bool | |||||
| if gpuInfos == nil { | |||||
| json.Unmarshal([]byte(setting.GpuTypes), &gpuInfos) | |||||
| } | |||||
| for _, resourceType := range gpuInfos.GpuInfo { | |||||
| if resourceType.Queue == task.GpuQueue { | |||||
| hasSameResource = true | |||||
| break | |||||
| } | |||||
| specOld, err := resource.GetCloudbrainSpec(task.ID) | |||||
| if err != nil || specOld == nil { | |||||
| log.Error("CloudBrainRestart GetCloudbrainSpec error.task.id = %d", task.ID) | |||||
| resultCode = "-1" | |||||
| errorMsg = "Resource specification not support any more" | |||||
| break | |||||
| } | } | ||||
| if !hasSameResource && cloudbrain.SpecialPools != nil { | |||||
| for _, specialPool := range cloudbrain.SpecialPools.Pools { | |||||
| cloudbrain.IsElementExist(specialPool.JobType, string(models.JobTypeDebug)) | |||||
| for _, pool := range specialPool.Pool { | |||||
| if pool.Queue == task.GpuQueue { | |||||
| hasSameResource = true | |||||
| } | |||||
| } | |||||
| } | |||||
| spec, err := resource.GetAndCheckSpec(ctx.User.ID, specOld.ID, models.FindSpecsOptions{ | |||||
| JobType: models.JobType(task.JobType), | |||||
| ComputeResource: models.GPU, | |||||
| Cluster: models.OpenICluster, | |||||
| AiCenterCode: models.AICenterOfCloudBrainOne}) | |||||
| if err != nil || spec == nil { | |||||
| log.Error("CloudBrainRestart GetAndCheckSpec error.task.id = %d", task.ID) | |||||
| resultCode = "-1" | |||||
| errorMsg = "Resource specification not support any more" | |||||
| break | |||||
| } | } | ||||
| task.Spec = spec | |||||
| if !hasSameResource { | |||||
| log.Error("has no same resource, can not restart", ctx.Data["MsgID"]) | |||||
| if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) { | |||||
| log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID) | |||||
| resultCode = "-1" | resultCode = "-1" | ||||
| errorMsg = "the job's version is too old and can not be restarted" | |||||
| errorMsg = ctx.Tr("points.insufficient_points_balance") | |||||
| break | break | ||||
| } | } | ||||
| @@ -707,128 +681,13 @@ func cloudBrainShow(ctx *context.Context, tpName base.TplName, jobType models.Jo | |||||
| ctx.NotFound(ctx.Req.URL.RequestURI(), nil) | ctx.NotFound(ctx.Req.URL.RequestURI(), nil) | ||||
| return | return | ||||
| } | } | ||||
| hasSpec := false | |||||
| if task.JobType == string(models.JobTypeTrain) { | |||||
| if cloudbrain.TrainResourceSpecs == nil { | |||||
| json.Unmarshal([]byte(setting.TrainResourceSpecs), &cloudbrain.TrainResourceSpecs) | |||||
| } | |||||
| for _, tmp := range cloudbrain.TrainResourceSpecs.ResourceSpec { | |||||
| if tmp.Id == task.ResourceSpecId { | |||||
| hasSpec = true | |||||
| ctx.Data["GpuNum"] = tmp.GpuNum | |||||
| ctx.Data["CpuNum"] = tmp.CpuNum | |||||
| ctx.Data["MemMiB"] = tmp.MemMiB | |||||
| ctx.Data["ShareMemMiB"] = tmp.ShareMemMiB | |||||
| break | |||||
| } | |||||
| } | |||||
| } else if task.JobType == string(models.JobTypeInference) { | |||||
| if cloudbrain.InferenceResourceSpecs == nil { | |||||
| json.Unmarshal([]byte(setting.InferenceResourceSpecs), &cloudbrain.InferenceResourceSpecs) | |||||
| } | |||||
| for _, tmp := range cloudbrain.InferenceResourceSpecs.ResourceSpec { | |||||
| if tmp.Id == task.ResourceSpecId { | |||||
| hasSpec = true | |||||
| ctx.Data["GpuNum"] = tmp.GpuNum | |||||
| ctx.Data["CpuNum"] = tmp.CpuNum | |||||
| ctx.Data["MemMiB"] = tmp.MemMiB | |||||
| ctx.Data["ShareMemMiB"] = tmp.ShareMemMiB | |||||
| break | |||||
| } | |||||
| } | |||||
| } else { | |||||
| if cloudbrain.ResourceSpecs == nil { | |||||
| json.Unmarshal([]byte(setting.ResourceSpecs), &cloudbrain.ResourceSpecs) | |||||
| } | |||||
| for _, tmp := range cloudbrain.ResourceSpecs.ResourceSpec { | |||||
| if tmp.Id == task.ResourceSpecId { | |||||
| hasSpec = true | |||||
| ctx.Data["GpuNum"] = tmp.GpuNum | |||||
| ctx.Data["CpuNum"] = tmp.CpuNum | |||||
| ctx.Data["MemMiB"] = tmp.MemMiB | |||||
| ctx.Data["ShareMemMiB"] = tmp.ShareMemMiB | |||||
| break | |||||
| } | |||||
| } | |||||
| } | |||||
| if !hasSpec && cloudbrain.SpecialPools != nil { | |||||
| for _, specialPool := range cloudbrain.SpecialPools.Pools { | |||||
| if specialPool.ResourceSpec != nil { | |||||
| for _, spec := range specialPool.ResourceSpec { | |||||
| if task.ResourceSpecId == spec.Id { | |||||
| ctx.Data["GpuNum"] = spec.GpuNum | |||||
| ctx.Data["CpuNum"] = spec.CpuNum | |||||
| ctx.Data["MemMiB"] = spec.MemMiB | |||||
| ctx.Data["ShareMemMiB"] = spec.ShareMemMiB | |||||
| break | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| prepareSpec4Show(ctx, task) | |||||
| if ctx.Written() { | |||||
| return | |||||
| } | } | ||||
| if result != nil { | if result != nil { | ||||
| jobRes, _ := models.ConvertToJobResultPayload(result.Payload) | jobRes, _ := models.ConvertToJobResultPayload(result.Payload) | ||||
| jobRes.Resource.Memory = strings.ReplaceAll(jobRes.Resource.Memory, "Mi", "MB") | |||||
| spec := "GPU数:" + strconv.Itoa(jobRes.Resource.NvidiaComGpu) + ",CPU数:" + strconv.Itoa(jobRes.Resource.CPU) + ",内存(MB):" + jobRes.Resource.Memory | |||||
| ctx.Data["resource_spec"] = spec | |||||
| if task.JobType == string(models.JobTypeTrain) { | |||||
| if trainGpuInfos == nil { | |||||
| json.Unmarshal([]byte(setting.TrainGpuTypes), &trainGpuInfos) | |||||
| } | |||||
| for _, resourceType := range trainGpuInfos.GpuInfo { | |||||
| if resourceType.Queue == jobRes.Config.GpuType { | |||||
| ctx.Data["resource_type"] = resourceType.Value | |||||
| } | |||||
| } | |||||
| } else if task.JobType == string(models.JobTypeInference) { | |||||
| if inferenceGpuInfos == nil { | |||||
| json.Unmarshal([]byte(setting.InferenceGpuTypes), &inferenceGpuInfos) | |||||
| } | |||||
| for _, resourceType := range inferenceGpuInfos.GpuInfo { | |||||
| if resourceType.Queue == jobRes.Config.GpuType { | |||||
| ctx.Data["resource_type"] = resourceType.Value | |||||
| } | |||||
| } | |||||
| } else if cloudbrain.IsBenchmarkJob(task.JobType) { | |||||
| if benchmarkGpuInfos == nil { | |||||
| json.Unmarshal([]byte(setting.BenchmarkGpuTypes), &benchmarkGpuInfos) | |||||
| } | |||||
| for _, resourceType := range benchmarkGpuInfos.GpuInfo { | |||||
| if resourceType.Queue == jobRes.Config.GpuType { | |||||
| ctx.Data["resource_type"] = resourceType.Value | |||||
| } | |||||
| } | |||||
| } else { | |||||
| if gpuInfos == nil { | |||||
| json.Unmarshal([]byte(setting.GpuTypes), &gpuInfos) | |||||
| } | |||||
| for _, resourceType := range gpuInfos.GpuInfo { | |||||
| if resourceType.Queue == jobRes.Config.GpuType { | |||||
| ctx.Data["resource_type"] = resourceType.Value | |||||
| } | |||||
| } | |||||
| } | |||||
| if cloudbrain.SpecialPools != nil { | |||||
| for _, specialPool := range cloudbrain.SpecialPools.Pools { | |||||
| for _, resourceType := range specialPool.Pool { | |||||
| if resourceType.Queue == jobRes.Config.GpuType { | |||||
| ctx.Data["resource_type"] = resourceType.Value | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| taskRoles := jobRes.TaskRoles | taskRoles := jobRes.TaskRoles | ||||
| taskRes, _ := models.ConvertToTaskPod(taskRoles[cloudbrain.SubTaskName].(map[string]interface{})) | taskRes, _ := models.ConvertToTaskPod(taskRoles[cloudbrain.SubTaskName].(map[string]interface{})) | ||||
| ctx.Data["taskRes"] = taskRes | ctx.Data["taskRes"] = taskRes | ||||
| @@ -952,6 +811,85 @@ func CloudBrainDebug(ctx *context.Context) { | |||||
| ctx.Redirect(debugUrl) | ctx.Redirect(debugUrl) | ||||
| } | } | ||||
| func prepareSpec4Show(ctx *context.Context, task *models.Cloudbrain) { | |||||
| s, err := resource.GetCloudbrainSpec(task.ID) | |||||
| if err != nil { | |||||
| log.Info("error:" + err.Error()) | |||||
| ctx.NotFound(ctx.Req.URL.RequestURI(), nil) | |||||
| return | |||||
| } | |||||
| ctx.Data["Spec"] = s | |||||
| } | |||||
| func oldPrepareSpec4Show(ctx *context.Context, task *models.Cloudbrain) { | |||||
| hasSpec := false | |||||
| if task.JobType == string(models.JobTypeTrain) { | |||||
| if cloudbrain.TrainResourceSpecs == nil { | |||||
| json.Unmarshal([]byte(setting.TrainResourceSpecs), &cloudbrain.TrainResourceSpecs) | |||||
| } | |||||
| for _, tmp := range cloudbrain.TrainResourceSpecs.ResourceSpec { | |||||
| if tmp.Id == task.ResourceSpecId { | |||||
| hasSpec = true | |||||
| ctx.Data["GpuNum"] = tmp.GpuNum | |||||
| ctx.Data["CpuNum"] = tmp.CpuNum | |||||
| ctx.Data["MemMiB"] = tmp.MemMiB | |||||
| ctx.Data["ShareMemMiB"] = tmp.ShareMemMiB | |||||
| break | |||||
| } | |||||
| } | |||||
| } else if task.JobType == string(models.JobTypeInference) { | |||||
| if cloudbrain.InferenceResourceSpecs == nil { | |||||
| json.Unmarshal([]byte(setting.InferenceResourceSpecs), &cloudbrain.InferenceResourceSpecs) | |||||
| } | |||||
| for _, tmp := range cloudbrain.InferenceResourceSpecs.ResourceSpec { | |||||
| if tmp.Id == task.ResourceSpecId { | |||||
| hasSpec = true | |||||
| ctx.Data["GpuNum"] = tmp.GpuNum | |||||
| ctx.Data["CpuNum"] = tmp.CpuNum | |||||
| ctx.Data["MemMiB"] = tmp.MemMiB | |||||
| ctx.Data["ShareMemMiB"] = tmp.ShareMemMiB | |||||
| break | |||||
| } | |||||
| } | |||||
| } else { | |||||
| if cloudbrain.ResourceSpecs == nil { | |||||
| json.Unmarshal([]byte(setting.ResourceSpecs), &cloudbrain.ResourceSpecs) | |||||
| } | |||||
| for _, tmp := range cloudbrain.ResourceSpecs.ResourceSpec { | |||||
| if tmp.Id == task.ResourceSpecId { | |||||
| hasSpec = true | |||||
| ctx.Data["GpuNum"] = tmp.GpuNum | |||||
| ctx.Data["CpuNum"] = tmp.CpuNum | |||||
| ctx.Data["MemMiB"] = tmp.MemMiB | |||||
| ctx.Data["ShareMemMiB"] = tmp.ShareMemMiB | |||||
| break | |||||
| } | |||||
| } | |||||
| } | |||||
| if !hasSpec && cloudbrain.SpecialPools != nil { | |||||
| for _, specialPool := range cloudbrain.SpecialPools.Pools { | |||||
| if specialPool.ResourceSpec != nil { | |||||
| for _, spec := range specialPool.ResourceSpec { | |||||
| if task.ResourceSpecId == spec.Id { | |||||
| ctx.Data["GpuNum"] = spec.GpuNum | |||||
| ctx.Data["CpuNum"] = spec.CpuNum | |||||
| ctx.Data["MemMiB"] = spec.MemMiB | |||||
| ctx.Data["ShareMemMiB"] = spec.ShareMemMiB | |||||
| break | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| func CloudBrainCommitImageShow(ctx *context.Context) { | func CloudBrainCommitImageShow(ctx *context.Context) { | ||||
| ctx.Data["PageIsCloudBrain"] = true | ctx.Data["PageIsCloudBrain"] = true | ||||
| ctx.Data["Type"] = ctx.Cloudbrain.Type | ctx.Data["Type"] = ctx.Cloudbrain.Type | ||||
| @@ -1088,7 +1026,7 @@ func CloudBrainAdminCommitImage(ctx *context.Context, form auth.CommitAdminImage | |||||
| UID: ctx.User.ID, | UID: ctx.User.ID, | ||||
| Type: models.GetRecommondType(form.IsRecommend), | Type: models.GetRecommondType(form.IsRecommend), | ||||
| Place: form.Place, | Place: form.Place, | ||||
| }) | |||||
| }, ctx.User) | |||||
| if err != nil { | if err != nil { | ||||
| log.Error("CommitImagefailed") | log.Error("CommitImagefailed") | ||||
| if models.IsErrImageTagExist(err) { | if models.IsErrImageTagExist(err) { | ||||
| @@ -1135,7 +1073,7 @@ func CloudBrainCommitImage(ctx *context.Context, form auth.CommitImageCloudBrain | |||||
| CloudBrainType: form.Type, | CloudBrainType: form.Type, | ||||
| Topics: validTopics, | Topics: validTopics, | ||||
| UID: ctx.User.ID, | UID: ctx.User.ID, | ||||
| }) | |||||
| }, ctx.User) | |||||
| if err != nil { | if err != nil { | ||||
| log.Error("CommitImage(%s) failed:%v", ctx.Cloudbrain.JobName, err.Error(), ctx.Data["msgID"]) | log.Error("CommitImage(%s) failed:%v", ctx.Cloudbrain.JobName, err.Error(), ctx.Data["msgID"]) | ||||
| if models.IsErrImageTagExist(err) { | if models.IsErrImageTagExist(err) { | ||||
| @@ -1149,7 +1087,6 @@ func CloudBrainCommitImage(ctx *context.Context, form auth.CommitImageCloudBrain | |||||
| return | return | ||||
| } | } | ||||
| ctx.JSON(200, models.BaseOKMessage) | ctx.JSON(200, models.BaseOKMessage) | ||||
| } | } | ||||
| @@ -1186,6 +1123,7 @@ func CloudBrainStop(ctx *context.Context) { | |||||
| log.Error("the job(%s) has been stopped", task.JobName, ctx.Data["msgID"]) | log.Error("the job(%s) has been stopped", task.JobName, ctx.Data["msgID"]) | ||||
| resultCode = "-1" | resultCode = "-1" | ||||
| errorMsg = "cloudbrain.Already_stopped" | errorMsg = "cloudbrain.Already_stopped" | ||||
| resultCode = task.Status | |||||
| break | break | ||||
| } | } | ||||
| @@ -1212,7 +1150,6 @@ func CloudBrainStop(ctx *context.Context) { | |||||
| errorMsg = "cloudbrain.Stopped_success_update_status_fail" | errorMsg = "cloudbrain.Stopped_success_update_status_fail" | ||||
| break | break | ||||
| } | } | ||||
| status = task.Status | status = task.Status | ||||
| break | break | ||||
| } | } | ||||
| @@ -1267,7 +1204,7 @@ func StopJobs(cloudBrains []*models.Cloudbrain) { | |||||
| }) | }) | ||||
| logErrorAndUpdateJobStatus(err, taskInfo) | logErrorAndUpdateJobStatus(err, taskInfo) | ||||
| } else { | |||||
| } else if taskInfo.Type == models.TypeCloudBrainTwo { | |||||
| if taskInfo.JobType == string(models.JobTypeTrain) { | if taskInfo.JobType == string(models.JobTypeTrain) { | ||||
| err := retry(3, time.Second*30, func() error { | err := retry(3, time.Second*30, func() error { | ||||
| _, err := modelarts.StopTrainJob(taskInfo.JobID, strconv.FormatInt(taskInfo.VersionID, 10)) | _, err := modelarts.StopTrainJob(taskInfo.JobID, strconv.FormatInt(taskInfo.VersionID, 10)) | ||||
| @@ -1284,8 +1221,16 @@ func StopJobs(cloudBrains []*models.Cloudbrain) { | |||||
| }) | }) | ||||
| logErrorAndUpdateJobStatus(err, taskInfo) | logErrorAndUpdateJobStatus(err, taskInfo) | ||||
| } | } | ||||
| } | |||||
| } else if taskInfo.Type == models.TypeC2Net { | |||||
| if taskInfo.JobType == string(models.JobTypeTrain) { | |||||
| err := retry(3, time.Second*30, func() error { | |||||
| _, err := grampus.StopJob(taskInfo.JobID) | |||||
| return err | |||||
| }) | |||||
| logErrorAndUpdateJobStatus(err, taskInfo) | |||||
| } | |||||
| } | |||||
| } | } | ||||
| } | } | ||||
| @@ -2285,10 +2230,8 @@ func BenchMarkAlgorithmCreate(ctx *context.Context, form auth.CreateCloudBrainFo | |||||
| displayJobName := form.DisplayJobName | displayJobName := form.DisplayJobName | ||||
| jobName := util.ConvertDisplayJobNameToJobName(displayJobName) | jobName := util.ConvertDisplayJobNameToJobName(displayJobName) | ||||
| image := strings.TrimSpace(form.Image) | image := strings.TrimSpace(form.Image) | ||||
| gpuQueue := form.GpuType | |||||
| command := cloudbrain.CommandBenchmark | command := cloudbrain.CommandBenchmark | ||||
| codePath := setting.JobPath + jobName + cloudbrain.CodeMountPath | codePath := setting.JobPath + jobName + cloudbrain.CodeMountPath | ||||
| resourceSpecId := cloudbrain.BenchMarkResourceID | |||||
| benchmarkTypeID := form.BenchmarkTypeID | benchmarkTypeID := form.BenchmarkTypeID | ||||
| benchmarkChildTypeID := form.BenchmarkChildTypeID | benchmarkChildTypeID := form.BenchmarkChildTypeID | ||||
| @@ -2329,19 +2272,20 @@ func BenchMarkAlgorithmCreate(ctx *context.Context, form auth.CreateCloudBrainFo | |||||
| return | return | ||||
| } | } | ||||
| _, err = getBenchmarkGpuQueue(gpuQueue) | |||||
| if err != nil { | |||||
| log.Error("getBenchmarkGpuQueue failed:%v", err, ctx.Data["MsgID"]) | |||||
| spec, err := resource.GetAndCheckSpec(ctx.User.ID, form.SpecId, models.FindSpecsOptions{ | |||||
| JobType: models.JobTypeBenchmark, | |||||
| ComputeResource: models.GPU, | |||||
| Cluster: models.OpenICluster, | |||||
| AiCenterCode: models.AICenterOfCloudBrainOne}) | |||||
| if err != nil || spec == nil { | |||||
| cloudBrainNewDataPrepare(ctx) | cloudBrainNewDataPrepare(ctx) | ||||
| ctx.RenderWithErr("gpu queue error", tplCloudBrainBenchmarkNew, &form) | |||||
| ctx.RenderWithErr("Resource specification not available", tplCloudBrainBenchmarkNew, &form) | |||||
| return | return | ||||
| } | } | ||||
| _, err = getBenchmarkResourceSpec(resourceSpecId) | |||||
| if err != nil { | |||||
| log.Error("getBenchmarkResourceSpec failed:%v", err, ctx.Data["MsgID"]) | |||||
| if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) { | |||||
| log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID) | |||||
| cloudBrainNewDataPrepare(ctx) | cloudBrainNewDataPrepare(ctx) | ||||
| ctx.RenderWithErr("resource spec error", tplCloudBrainBenchmarkNew, &form) | |||||
| ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tplCloudBrainBenchmarkNew, &form) | |||||
| return | return | ||||
| } | } | ||||
| @@ -2402,14 +2346,8 @@ func BenchMarkAlgorithmCreate(ctx *context.Context, form auth.CreateCloudBrainFo | |||||
| } | } | ||||
| benchmarkPath := setting.JobPath + jobName + cloudbrain.BenchMarkMountPath | benchmarkPath := setting.JobPath + jobName + cloudbrain.BenchMarkMountPath | ||||
| var gpuType string | |||||
| for _, gpuInfo := range gpuInfos.GpuInfo { | |||||
| if gpuInfo.Queue == gpuQueue { | |||||
| gpuType = gpuInfo.Value | |||||
| } | |||||
| } | |||||
| if err := downloadRateCode(repo, jobName, childInfo.Owner, childInfo.RepoName, benchmarkPath, form.BenchmarkCategory, gpuType, ctx.User.Name); err != nil { | |||||
| if err := downloadRateCode(repo, jobName, childInfo.Owner, childInfo.RepoName, benchmarkPath, form.BenchmarkCategory, spec.AccCardType, ctx.User.Name); err != nil { | |||||
| log.Error("downloadRateCode failed, %v", err, ctx.Data["MsgID"]) | log.Error("downloadRateCode failed, %v", err, ctx.Data["MsgID"]) | ||||
| //cloudBrainNewDataPrepare(ctx) | //cloudBrainNewDataPrepare(ctx) | ||||
| //ctx.RenderWithErr("system error", tplCloudBrainBenchmarkNew, &form) | //ctx.RenderWithErr("system error", tplCloudBrainBenchmarkNew, &form) | ||||
| @@ -2448,7 +2386,6 @@ func BenchMarkAlgorithmCreate(ctx *context.Context, form auth.CreateCloudBrainFo | |||||
| Snn4ImageNetPath: storage.GetMinioPath(jobName, cloudbrain.Snn4imagenetMountPath+"/"), | Snn4ImageNetPath: storage.GetMinioPath(jobName, cloudbrain.Snn4imagenetMountPath+"/"), | ||||
| BrainScorePath: storage.GetMinioPath(jobName, cloudbrain.BrainScoreMountPath+"/"), | BrainScorePath: storage.GetMinioPath(jobName, cloudbrain.BrainScoreMountPath+"/"), | ||||
| JobType: string(models.JobTypeBenchmark), | JobType: string(models.JobTypeBenchmark), | ||||
| GpuQueue: gpuQueue, | |||||
| Description: form.Description, | Description: form.Description, | ||||
| BranchName: cloudbrain.DefaultBranchName, | BranchName: cloudbrain.DefaultBranchName, | ||||
| BootFile: "", | BootFile: "", | ||||
| @@ -2456,8 +2393,8 @@ func BenchMarkAlgorithmCreate(ctx *context.Context, form auth.CreateCloudBrainFo | |||||
| CommitID: "", | CommitID: "", | ||||
| BenchmarkTypeID: benchmarkTypeID, | BenchmarkTypeID: benchmarkTypeID, | ||||
| BenchmarkChildTypeID: benchmarkChildTypeID, | BenchmarkChildTypeID: benchmarkChildTypeID, | ||||
| ResourceSpecId: resourceSpecId, | |||||
| ResultPath: storage.GetMinioPath(jobName, cloudbrain.ResultPath+"/"), | ResultPath: storage.GetMinioPath(jobName, cloudbrain.ResultPath+"/"), | ||||
| Spec: spec, | |||||
| } | } | ||||
| err = cloudbrain.GenerateTask(req) | err = cloudbrain.GenerateTask(req) | ||||
| @@ -2477,9 +2414,7 @@ func ModelBenchmarkCreate(ctx *context.Context, form auth.CreateCloudBrainForm) | |||||
| image := form.Image | image := form.Image | ||||
| uuid := form.Attachment | uuid := form.Attachment | ||||
| jobType := form.JobType | jobType := form.JobType | ||||
| gpuQueue := form.GpuType | |||||
| codePath := setting.JobPath + jobName + cloudbrain.CodeMountPath | codePath := setting.JobPath + jobName + cloudbrain.CodeMountPath | ||||
| resourceSpecId := form.ResourceSpecId | |||||
| branchName := cloudbrain.DefaultBranchName | branchName := cloudbrain.DefaultBranchName | ||||
| repo := ctx.Repo.Repository | repo := ctx.Repo.Repository | ||||
| @@ -2561,6 +2496,23 @@ func ModelBenchmarkCreate(ctx *context.Context, form auth.CreateCloudBrainForm) | |||||
| ctx.RenderWithErr(ctx.Tr("cloudbrain.error.dataset_select"), tpl, &form) | ctx.RenderWithErr(ctx.Tr("cloudbrain.error.dataset_select"), tpl, &form) | ||||
| return | return | ||||
| } | } | ||||
| spec, err := resource.GetAndCheckSpec(ctx.User.ID, form.SpecId, models.FindSpecsOptions{ | |||||
| JobType: models.JobTypeBenchmark, | |||||
| ComputeResource: models.GPU, | |||||
| Cluster: models.OpenICluster, | |||||
| AiCenterCode: models.AICenterOfCloudBrainOne}) | |||||
| if err != nil || spec == nil { | |||||
| cloudBrainNewDataPrepare(ctx) | |||||
| ctx.RenderWithErr("Resource specification not available", tpl, &form) | |||||
| return | |||||
| } | |||||
| if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) { | |||||
| log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID) | |||||
| cloudBrainNewDataPrepare(ctx) | |||||
| ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tpl, &form) | |||||
| return | |||||
| } | |||||
| log.Info("Command=" + command) | log.Info("Command=" + command) | ||||
| log.Info("ModelPath=" + storage.GetMinioPath(jobName, cloudbrain.ModelMountPath+"/")) | log.Info("ModelPath=" + storage.GetMinioPath(jobName, cloudbrain.ModelMountPath+"/")) | ||||
| req := cloudbrain.GenerateCloudBrainTaskReq{ | req := cloudbrain.GenerateCloudBrainTaskReq{ | ||||
| @@ -2578,7 +2530,6 @@ func ModelBenchmarkCreate(ctx *context.Context, form auth.CreateCloudBrainForm) | |||||
| Snn4ImageNetPath: storage.GetMinioPath(jobName, cloudbrain.Snn4imagenetMountPath+"/"), | Snn4ImageNetPath: storage.GetMinioPath(jobName, cloudbrain.Snn4imagenetMountPath+"/"), | ||||
| BrainScorePath: storage.GetMinioPath(jobName, cloudbrain.BrainScoreMountPath+"/"), | BrainScorePath: storage.GetMinioPath(jobName, cloudbrain.BrainScoreMountPath+"/"), | ||||
| JobType: jobType, | JobType: jobType, | ||||
| GpuQueue: gpuQueue, | |||||
| Description: form.Description, | Description: form.Description, | ||||
| BranchName: branchName, | BranchName: branchName, | ||||
| BootFile: form.BootFile, | BootFile: form.BootFile, | ||||
| @@ -2586,8 +2537,8 @@ func ModelBenchmarkCreate(ctx *context.Context, form auth.CreateCloudBrainForm) | |||||
| CommitID: "", | CommitID: "", | ||||
| BenchmarkTypeID: 0, | BenchmarkTypeID: 0, | ||||
| BenchmarkChildTypeID: benchmarkChildTypeID, | BenchmarkChildTypeID: benchmarkChildTypeID, | ||||
| ResourceSpecId: resourceSpecId, | |||||
| ResultPath: storage.GetMinioPath(jobName, cloudbrain.ResultPath+"/"), | ResultPath: storage.GetMinioPath(jobName, cloudbrain.ResultPath+"/"), | ||||
| Spec: spec, | |||||
| } | } | ||||
| err = cloudbrain.GenerateTask(req) | err = cloudbrain.GenerateTask(req) | ||||
| @@ -2691,7 +2642,7 @@ func getInferenceJobCommand(form auth.CreateCloudBrainInferencForm) (string, err | |||||
| param += " --modelname" + "=" + form.CkptName | param += " --modelname" + "=" + form.CkptName | ||||
| command += "python /code/" + bootFile + param + " | tee " + cloudbrain.ResultPath + "/" + form.DisplayJobName + "-" + cloudbrain.LogFile | |||||
| command += "python /code/" + bootFile + param + " > " + cloudbrain.ResultPath + "/" + form.DisplayJobName + "-" + cloudbrain.LogFile | |||||
| return command, nil | return command, nil | ||||
| } | } | ||||
| @@ -2765,6 +2716,8 @@ func GetCloudbrainAiCenter(task models.Cloudbrain, ctx *context.Context) string | |||||
| return ctx.Tr("repo.cloudbrain1") | return ctx.Tr("repo.cloudbrain1") | ||||
| } else if task.Type == models.TypeCloudBrainTwo { | } else if task.Type == models.TypeCloudBrainTwo { | ||||
| return ctx.Tr("repo.cloudbrain2") | return ctx.Tr("repo.cloudbrain2") | ||||
| } else if task.Type == models.TypeCDCenter { | |||||
| return ctx.Tr("repo.cdCenter") | |||||
| } else if task.Type == models.TypeC2Net { | } else if task.Type == models.TypeC2Net { | ||||
| return getCutStringAiCenterByAiCenter(task.AiCenter) | return getCutStringAiCenterByAiCenter(task.AiCenter) | ||||
| } | } | ||||
| @@ -2779,7 +2732,7 @@ func getCutStringAiCenterByAiCenter(aiCenter string) string { | |||||
| } | } | ||||
| func GetCloudbrainCluster(task models.Cloudbrain, ctx *context.Context) string { | func GetCloudbrainCluster(task models.Cloudbrain, ctx *context.Context) string { | ||||
| if task.Type == models.TypeCloudBrainOne || task.Type == models.TypeCloudBrainTwo { | |||||
| if task.Type == models.TypeCloudBrainOne || task.Type == models.TypeCloudBrainTwo || task.Type == models.TypeCDCenter { | |||||
| return ctx.Tr("cloudbrain.resource_cluster_openi") | return ctx.Tr("cloudbrain.resource_cluster_openi") | ||||
| } else if task.Type == models.TypeC2Net { | } else if task.Type == models.TypeC2Net { | ||||
| return ctx.Tr("cloudbrain.resource_cluster_c2net") | return ctx.Tr("cloudbrain.resource_cluster_c2net") | ||||
| @@ -2866,10 +2819,10 @@ func GetCloudbrainFlavorName(task models.Cloudbrain) (string, error) { | |||||
| return CloudbrainOneFlavorName, nil | return CloudbrainOneFlavorName, nil | ||||
| } | } | ||||
| } | } | ||||
| } else if (task.Type == models.TypeCloudBrainTwo || task.Type == models.TypeC2Net) && task.FlavorName != "" { | |||||
| } else if (task.Type == models.TypeCloudBrainTwo || task.Type == models.TypeC2Net || task.Type == models.TypeCDCenter) && task.FlavorName != "" { | |||||
| replaceFlavorName := strings.ReplaceAll(task.FlavorName, ":", ":") | replaceFlavorName := strings.ReplaceAll(task.FlavorName, ":", ":") | ||||
| return replaceFlavorName, nil | return replaceFlavorName, nil | ||||
| } else if task.Type == models.TypeCloudBrainTwo && task.FlavorName == "" && task.FlavorCode != "" { | |||||
| } else if (task.Type == models.TypeCloudBrainTwo || task.Type == models.TypeCDCenter) && task.FlavorName == "" && task.FlavorCode != "" { | |||||
| cloudbrainTwoFlavorName := getFlavorNameByFlavorCode(task.FlavorCode) | cloudbrainTwoFlavorName := getFlavorNameByFlavorCode(task.FlavorCode) | ||||
| return cloudbrainTwoFlavorName, nil | return cloudbrainTwoFlavorName, nil | ||||
| } else if task.Type == models.TypeCloudBrainTwo && task.JobType == string(models.JobTypeDebug) && task.FlavorName == "" && task.FlavorCode == "" { | } else if task.Type == models.TypeCloudBrainTwo && task.JobType == string(models.JobTypeDebug) && task.FlavorName == "" && task.FlavorCode == "" { | ||||
| @@ -1,6 +1,8 @@ | |||||
| package repo | package repo | ||||
| import ( | import ( | ||||
| "code.gitea.io/gitea/services/cloudbrain/resource" | |||||
| "code.gitea.io/gitea/services/reward/point/account" | |||||
| "encoding/json" | "encoding/json" | ||||
| "errors" | "errors" | ||||
| "fmt" | "fmt" | ||||
| @@ -106,15 +108,11 @@ func grampusTrainJobNewDataPrepare(ctx *context.Context, processType string) err | |||||
| } | } | ||||
| } | } | ||||
| //get valid resource specs | |||||
| specs, err := grampus.GetResourceSpecs(processType) | |||||
| grampusSpecs := getFilterSpecBySpecialPool(specs, includeCenters, excludeCenters) | |||||
| if err != nil { | |||||
| log.Error("GetResourceSpecs failed:", err.Error()) | |||||
| } else { | |||||
| ctx.Data["flavor_infos"] = grampusSpecs | |||||
| //prepare available specs | |||||
| if processType == grampus.ProcessorTypeNPU { | |||||
| prepareGrampusTrainSpecs(ctx, models.NPU) | |||||
| } else if processType == grampus.ProcessorTypeGPU { | |||||
| prepareGrampusTrainSpecs(ctx, models.GPU) | |||||
| } | } | ||||
| //get branches | //get branches | ||||
| @@ -140,6 +138,15 @@ func grampusTrainJobNewDataPrepare(ctx *context.Context, processType string) err | |||||
| return nil | return nil | ||||
| } | } | ||||
| func prepareGrampusTrainSpecs(ctx *context.Context, computeResource string) { | |||||
| noteBookSpecs, _ := resource.FindAvailableSpecs(ctx.User.ID, models.FindSpecsOptions{ | |||||
| JobType: models.JobTypeTrain, | |||||
| ComputeResource: computeResource, | |||||
| Cluster: models.C2NetCluster, | |||||
| }) | |||||
| ctx.Data["Specs"] = noteBookSpecs | |||||
| } | |||||
| func getFilterSpecBySpecialPool(specs *models.GetGrampusResourceSpecsResult, includeCenters map[string]struct{}, excludeCenters map[string]struct{}) []models.GrampusSpec { | func getFilterSpecBySpecialPool(specs *models.GetGrampusResourceSpecsResult, includeCenters map[string]struct{}, excludeCenters map[string]struct{}) []models.GrampusSpec { | ||||
| if len(includeCenters) == 0 && len(excludeCenters) == 0 { | if len(includeCenters) == 0 && len(excludeCenters) == 0 { | ||||
| return specs.Infos | return specs.Infos | ||||
| @@ -206,7 +213,6 @@ func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain | |||||
| codeMinioPath := setting.CBCodePathPrefix + jobName + cloudbrain.CodeMountPath + "/" | codeMinioPath := setting.CBCodePathPrefix + jobName + cloudbrain.CodeMountPath + "/" | ||||
| dataMinioPath := setting.Attachment.Minio.BasePath + path.Join(uuid[0:1], uuid[1:2]) + "/" + uuid | dataMinioPath := setting.Attachment.Minio.BasePath + path.Join(uuid[0:1], uuid[1:2]) + "/" + uuid | ||||
| branchName := form.BranchName | branchName := form.BranchName | ||||
| flavorName := form.FlavorName | |||||
| image := strings.TrimSpace(form.Image) | image := strings.TrimSpace(form.Image) | ||||
| if !jobNamePattern.MatchString(displayJobName) { | if !jobNamePattern.MatchString(displayJobName) { | ||||
| @@ -272,6 +278,25 @@ func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain | |||||
| } | } | ||||
| } | } | ||||
| //check specification | |||||
| spec, err := resource.GetAndCheckSpec(ctx.User.ID, form.SpecId, models.FindSpecsOptions{ | |||||
| JobType: models.JobTypeTrain, | |||||
| ComputeResource: models.GPU, | |||||
| Cluster: models.C2NetCluster, | |||||
| }) | |||||
| if err != nil || spec == nil { | |||||
| grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU) | |||||
| ctx.RenderWithErr("Resource specification not available", tplGrampusTrainJobGPUNew, &form) | |||||
| return | |||||
| } | |||||
| if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) { | |||||
| log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID) | |||||
| grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU) | |||||
| ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tplGrampusTrainJobGPUNew, &form) | |||||
| return | |||||
| } | |||||
| //check dataset | //check dataset | ||||
| attachment, err := models.GetAttachmentByUUID(uuid) | attachment, err := models.GetAttachmentByUUID(uuid) | ||||
| if err != nil { | if err != nil { | ||||
| @@ -336,7 +361,6 @@ func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain | |||||
| ComputeResource: models.GPUResource, | ComputeResource: models.GPUResource, | ||||
| ProcessType: grampus.ProcessorTypeGPU, | ProcessType: grampus.ProcessorTypeGPU, | ||||
| Command: command, | Command: command, | ||||
| ResourceSpecId: form.FlavorID, | |||||
| ImageUrl: image, | ImageUrl: image, | ||||
| Description: description, | Description: description, | ||||
| BootFile: bootFile, | BootFile: bootFile, | ||||
| @@ -344,12 +368,12 @@ func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain | |||||
| CommitID: commitID, | CommitID: commitID, | ||||
| BranchName: branchName, | BranchName: branchName, | ||||
| Params: form.Params, | Params: form.Params, | ||||
| FlavorName: flavorName, | |||||
| EngineName: image, | EngineName: image, | ||||
| DatasetName: attachment.Name, | DatasetName: attachment.Name, | ||||
| IsLatestVersion: modelarts.IsLatestVersion, | IsLatestVersion: modelarts.IsLatestVersion, | ||||
| VersionCount: modelarts.VersionCountOne, | VersionCount: modelarts.VersionCountOne, | ||||
| WorkServerNumber: 1, | WorkServerNumber: 1, | ||||
| Spec: spec, | |||||
| } | } | ||||
| err = grampus.GenerateTrainJob(ctx, req) | err = grampus.GenerateTrainJob(ctx, req) | ||||
| @@ -397,7 +421,6 @@ func GrampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain | |||||
| dataObsPath := setting.BasePath + path.Join(uuid[0:1], uuid[1:2]) + "/" + uuid + "/" | dataObsPath := setting.BasePath + path.Join(uuid[0:1], uuid[1:2]) + "/" + uuid + "/" | ||||
| branchName := form.BranchName | branchName := form.BranchName | ||||
| isLatestVersion := modelarts.IsLatestVersion | isLatestVersion := modelarts.IsLatestVersion | ||||
| flavorName := form.FlavorName | |||||
| versionCount := modelarts.VersionCountOne | versionCount := modelarts.VersionCountOne | ||||
| engineName := form.EngineName | engineName := form.EngineName | ||||
| @@ -464,6 +487,24 @@ func GrampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain | |||||
| } | } | ||||
| } | } | ||||
| //check specification | |||||
| spec, err := resource.GetAndCheckSpec(ctx.User.ID, form.SpecId, models.FindSpecsOptions{ | |||||
| JobType: models.JobTypeTrain, | |||||
| ComputeResource: models.NPU, | |||||
| Cluster: models.C2NetCluster, | |||||
| }) | |||||
| if err != nil || spec == nil { | |||||
| grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU) | |||||
| ctx.RenderWithErr("Resource specification not available", tplGrampusTrainJobNPUNew, &form) | |||||
| return | |||||
| } | |||||
| if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) { | |||||
| log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID) | |||||
| grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU) | |||||
| ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tplGrampusTrainJobNPUNew, &form) | |||||
| return | |||||
| } | |||||
| //check dataset | //check dataset | ||||
| attachment, err := models.GetAttachmentByUUID(uuid) | attachment, err := models.GetAttachmentByUUID(uuid) | ||||
| if err != nil { | if err != nil { | ||||
| @@ -518,7 +559,6 @@ func GrampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain | |||||
| ComputeResource: models.NPUResource, | ComputeResource: models.NPUResource, | ||||
| ProcessType: grampus.ProcessorTypeNPU, | ProcessType: grampus.ProcessorTypeNPU, | ||||
| Command: command, | Command: command, | ||||
| ResourceSpecId: form.FlavorID, | |||||
| ImageId: form.ImageID, | ImageId: form.ImageID, | ||||
| DataUrl: dataObsPath, | DataUrl: dataObsPath, | ||||
| Description: description, | Description: description, | ||||
| @@ -531,11 +571,11 @@ func GrampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain | |||||
| IsLatestVersion: isLatestVersion, | IsLatestVersion: isLatestVersion, | ||||
| BranchName: branchName, | BranchName: branchName, | ||||
| Params: form.Params, | Params: form.Params, | ||||
| FlavorName: flavorName, | |||||
| EngineName: engineName, | EngineName: engineName, | ||||
| VersionCount: versionCount, | VersionCount: versionCount, | ||||
| TotalVersionCount: modelarts.TotalVersionCount, | TotalVersionCount: modelarts.TotalVersionCount, | ||||
| DatasetName: attachment.Name, | DatasetName: attachment.Name, | ||||
| Spec: spec, | |||||
| } | } | ||||
| err = grampus.GenerateTrainJob(ctx, req) | err = grampus.GenerateTrainJob(ctx, req) | ||||
| @@ -712,6 +752,7 @@ func GrampusTrainJobShow(ctx *context.Context) { | |||||
| taskList := make([]*models.Cloudbrain, 0) | taskList := make([]*models.Cloudbrain, 0) | ||||
| taskList = append(taskList, task) | taskList = append(taskList, task) | ||||
| prepareSpec4Show(ctx, task) | |||||
| ctx.Data["version_list_task"] = taskList | ctx.Data["version_list_task"] = taskList | ||||
| ctx.Data["datasetDownload"] = GetCloudBrainDataSetInfo(task.Uuid, task.DatasetName, false) | ctx.Data["datasetDownload"] = GetCloudBrainDataSetInfo(task.Uuid, task.DatasetName, false) | ||||
| ctx.Data["canDownload"] = cloudbrain.CanModifyJob(ctx, task) | ctx.Data["canDownload"] = cloudbrain.CanModifyJob(ctx, task) | ||||
| @@ -2,6 +2,9 @@ package repo | |||||
| import ( | import ( | ||||
| "archive/zip" | "archive/zip" | ||||
| "code.gitea.io/gitea/modules/modelarts_cd" | |||||
| "code.gitea.io/gitea/services/cloudbrain/resource" | |||||
| "code.gitea.io/gitea/services/reward/point/account" | |||||
| "encoding/json" | "encoding/json" | ||||
| "errors" | "errors" | ||||
| "fmt" | "fmt" | ||||
| @@ -60,18 +63,11 @@ func DebugJobIndex(ctx *context.Context) { | |||||
| if page <= 0 { | if page <= 0 { | ||||
| page = 1 | page = 1 | ||||
| } | } | ||||
| typeCloudBrain := models.TypeCloudBrainAll | |||||
| jobTypeNot := false | jobTypeNot := false | ||||
| if listType == models.GPUResource { | |||||
| typeCloudBrain = models.TypeCloudBrainOne | |||||
| } else if listType == models.NPUResource { | |||||
| typeCloudBrain = models.TypeCloudBrainTwo | |||||
| } else if listType == models.AllResource { | |||||
| typeCloudBrain = models.TypeCloudBrainAll | |||||
| } else { | |||||
| log.Error("listType(%s) error", listType) | |||||
| ctx.ServerError("listType error", errors.New("listType error")) | |||||
| return | |||||
| var computeResource string | |||||
| if listType != models.AllResource { | |||||
| computeResource = listType | |||||
| } | } | ||||
| var jobTypes []string | var jobTypes []string | ||||
| @@ -81,10 +77,11 @@ func DebugJobIndex(ctx *context.Context) { | |||||
| Page: page, | Page: page, | ||||
| PageSize: setting.UI.IssuePagingNum, | PageSize: setting.UI.IssuePagingNum, | ||||
| }, | }, | ||||
| RepoID: repo.ID, | |||||
| Type: typeCloudBrain, | |||||
| JobTypeNot: jobTypeNot, | |||||
| JobTypes: jobTypes, | |||||
| RepoID: repo.ID, | |||||
| ComputeResource: computeResource, | |||||
| Type: models.TypeCloudBrainAll, | |||||
| JobTypeNot: jobTypeNot, | |||||
| JobTypes: jobTypes, | |||||
| }) | }) | ||||
| if err != nil { | if err != nil { | ||||
| ctx.ServerError("Get debugjob faild:", err) | ctx.ServerError("Get debugjob faild:", err) | ||||
| @@ -134,17 +131,9 @@ func notebookNewDataPrepare(ctx *context.Context) error { | |||||
| return err | return err | ||||
| } | } | ||||
| ctx.Data["attachments"] = attachs | ctx.Data["attachments"] = attachs | ||||
| ctx.Data["images"] = setting.StImageInfos.ImageInfo | |||||
| if modelarts.ImageInfos == nil { | |||||
| json.Unmarshal([]byte(setting.ImageInfos), &modelarts.ImageInfos) | |||||
| } | |||||
| ctx.Data["images"] = modelarts.ImageInfos.ImageInfo | |||||
| if modelarts.FlavorInfos == nil { | |||||
| json.Unmarshal([]byte(setting.FlavorInfos), &modelarts.FlavorInfos) | |||||
| } | |||||
| ctx.Data["flavors"] = modelarts.FlavorInfos.FlavorInfo | |||||
| setSpecBySpecialPoolConfig(ctx, string(models.JobTypeDebug)) | |||||
| prepareCloudbrainTwoDebugSpecs(ctx) | |||||
| ctx.Data["datasetType"] = models.TypeCloudBrainTwo | ctx.Data["datasetType"] = models.TypeCloudBrainTwo | ||||
| @@ -154,6 +143,20 @@ func notebookNewDataPrepare(ctx *context.Context) error { | |||||
| return nil | return nil | ||||
| } | } | ||||
| func prepareCloudbrainTwoDebugSpecs(ctx *context.Context) { | |||||
| aiCenterCode := models.AICenterOfCloudBrainTwo | |||||
| if setting.ModelartsCD.Enabled { | |||||
| aiCenterCode = models.AICenterOfChengdu | |||||
| } | |||||
| noteBookSpecs, _ := resource.FindAvailableSpecs(ctx.User.ID, models.FindSpecsOptions{ | |||||
| JobType: models.JobTypeDebug, | |||||
| ComputeResource: models.NPU, | |||||
| Cluster: models.OpenICluster, | |||||
| AiCenterCode: aiCenterCode, | |||||
| }) | |||||
| ctx.Data["Specs"] = noteBookSpecs | |||||
| } | |||||
| func NotebookCreate(ctx *context.Context, form auth.CreateModelArtsNotebookForm) { | func NotebookCreate(ctx *context.Context, form auth.CreateModelArtsNotebookForm) { | ||||
| ctx.Data["PageIsNotebook"] = true | ctx.Data["PageIsNotebook"] = true | ||||
| jobName := form.JobName | jobName := form.JobName | ||||
| @@ -204,7 +207,6 @@ func Notebook2Create(ctx *context.Context, form auth.CreateModelArtsNotebookForm | |||||
| jobName := util.ConvertDisplayJobNameToJobName(displayJobName) | jobName := util.ConvertDisplayJobNameToJobName(displayJobName) | ||||
| uuid := form.Attachment | uuid := form.Attachment | ||||
| description := form.Description | description := form.Description | ||||
| flavor := form.Flavor | |||||
| imageId := form.ImageId | imageId := form.ImageId | ||||
| repo := ctx.Repo.Repository | repo := ctx.Repo.Repository | ||||
| @@ -239,15 +241,33 @@ func Notebook2Create(ctx *context.Context, form auth.CreateModelArtsNotebookForm | |||||
| return | return | ||||
| } | } | ||||
| } | } | ||||
| errStr := checkModelArtsSpecialPool(ctx, flavor, string(models.JobTypeDebug)) | |||||
| if errStr != "" { | |||||
| var aiCenterCode = models.AICenterOfCloudBrainTwo | |||||
| if setting.ModelartsCD.Enabled { | |||||
| aiCenterCode = models.AICenterOfChengdu | |||||
| } | |||||
| spec, err := resource.GetAndCheckSpec(ctx.User.ID, form.SpecId, models.FindSpecsOptions{ | |||||
| JobType: models.JobTypeDebug, | |||||
| ComputeResource: models.NPU, | |||||
| Cluster: models.OpenICluster, | |||||
| AiCenterCode: aiCenterCode}) | |||||
| if err != nil || spec == nil { | |||||
| notebookNewDataPrepare(ctx) | notebookNewDataPrepare(ctx) | ||||
| ctx.RenderWithErr(ctx.Tr(errStr), tplModelArtsNotebookNew, &form) | |||||
| ctx.RenderWithErr("Resource specification not available", tplModelArtsNotebookNew, &form) | |||||
| return | |||||
| } | |||||
| if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) { | |||||
| log.Error("point balance is not enough,userId=%d specId=%d ", ctx.User.ID, spec.ID) | |||||
| cloudBrainNewDataPrepare(ctx) | |||||
| ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tplModelArtsNotebookNew, &form) | |||||
| return | return | ||||
| } | } | ||||
| err = modelarts.GenerateNotebook2(ctx, displayJobName, jobName, uuid, description, flavor, imageId) | |||||
| if setting.ModelartsCD.Enabled { | |||||
| err = modelarts_cd.GenerateNotebook(ctx, displayJobName, jobName, uuid, description, imageId, spec) | |||||
| } else { | |||||
| err = modelarts.GenerateNotebook2(ctx, displayJobName, jobName, uuid, description, imageId, spec) | |||||
| } | |||||
| if err != nil { | if err != nil { | ||||
| log.Error("GenerateNotebook2 failed, %v", err, ctx.Data["MsgID"]) | log.Error("GenerateNotebook2 failed, %v", err, ctx.Data["MsgID"]) | ||||
| notebookNewDataPrepare(ctx) | notebookNewDataPrepare(ctx) | ||||
| @@ -292,24 +312,7 @@ func NotebookShow(ctx *context.Context) { | |||||
| if err == nil { | if err == nil { | ||||
| task.User = user | task.User = user | ||||
| } | } | ||||
| if modelarts.FlavorInfos == nil { | |||||
| json.Unmarshal([]byte(setting.FlavorInfos), &modelarts.FlavorInfos) | |||||
| } | |||||
| findSpec := false | |||||
| if modelarts.FlavorInfos != nil { | |||||
| ctx.Data["resource_spec"] = modelarts.FlavorInfos.FlavorInfo[0].Desc | |||||
| for _, f := range modelarts.FlavorInfos.FlavorInfo { | |||||
| if fmt.Sprint(f.Value) == task.FlavorCode { | |||||
| ctx.Data["resource_spec"] = f.Desc | |||||
| findSpec = true | |||||
| break | |||||
| } | |||||
| } | |||||
| } | |||||
| setShowSpecBySpecialPoolConfig(ctx, findSpec, task) | |||||
| prepareSpec4Show(ctx, task) | |||||
| if task.TrainJobDuration == "" { | if task.TrainJobDuration == "" { | ||||
| if task.Duration == 0 { | if task.Duration == 0 { | ||||
| var duration int64 | var duration int64 | ||||
| @@ -394,36 +397,16 @@ func setShowSpecBySpecialPoolConfig(ctx *context.Context, findSpec bool, task *m | |||||
| } | } | ||||
| } | } | ||||
| func NotebookDebug(ctx *context.Context) { | |||||
| var jobID = ctx.Params(":jobid") | |||||
| result, err := modelarts.GetJob(jobID) | |||||
| if err != nil { | |||||
| ctx.RenderWithErr(err.Error(), tplModelArtsNotebookIndex, nil) | |||||
| return | |||||
| } | |||||
| res, err := modelarts.GetJobToken(jobID) | |||||
| if err != nil { | |||||
| ctx.RenderWithErr(err.Error(), tplModelArtsNotebookIndex, nil) | |||||
| return | |||||
| } | |||||
| urls := strings.Split(result.Spec.Annotations.Url, "/") | |||||
| urlPrefix := result.Spec.Annotations.TargetDomain | |||||
| for i, url := range urls { | |||||
| if i > 2 { | |||||
| urlPrefix += "/" + url | |||||
| } | |||||
| } | |||||
| debugUrl := urlPrefix + "?token=" + res.Token | |||||
| ctx.Redirect(debugUrl) | |||||
| } | |||||
| func NotebookDebug2(ctx *context.Context) { | func NotebookDebug2(ctx *context.Context) { | ||||
| var err error | |||||
| var result *models.GetNotebook2Result | |||||
| task := ctx.Cloudbrain | task := ctx.Cloudbrain | ||||
| result, err := modelarts.GetNotebook2(task.JobID) | |||||
| if task.Type == models.TypeCloudBrainTwo { | |||||
| result, err = modelarts.GetNotebook2(task.JobID) | |||||
| } else if task.Type == models.TypeCDCenter { | |||||
| result, err = modelarts_cd.GetNotebook(task.JobID) | |||||
| } | |||||
| if err != nil { | if err != nil { | ||||
| ctx.RenderWithErr(err.Error(), tplModelArtsNotebookIndex, nil) | ctx.RenderWithErr(err.Error(), tplModelArtsNotebookIndex, nil) | ||||
| return | return | ||||
| @@ -437,6 +420,7 @@ func NotebookRestart(ctx *context.Context) { | |||||
| var resultCode = "-1" | var resultCode = "-1" | ||||
| var errorMsg = "" | var errorMsg = "" | ||||
| var status = "" | var status = "" | ||||
| var spec *models.Specification | |||||
| task := ctx.Cloudbrain | task := ctx.Cloudbrain | ||||
| @@ -464,12 +448,44 @@ func NotebookRestart(ctx *context.Context) { | |||||
| } | } | ||||
| } | } | ||||
| oldSpec, err := resource.GetCloudbrainSpec(task.ID) | |||||
| if err != nil || oldSpec == nil { | |||||
| log.Error("NotebookManage GetCloudbrainSpec error.%v", err) | |||||
| errorMsg = "Resource specification not available" | |||||
| break | |||||
| } | |||||
| aiCenterCode := models.AICenterOfCloudBrainTwo | |||||
| if task.Type == models.TypeCDCenter { | |||||
| aiCenterCode = models.AICenterOfChengdu | |||||
| } | |||||
| spec, err = resource.GetAndCheckSpec(ctx.User.ID, oldSpec.ID, models.FindSpecsOptions{ | |||||
| JobType: models.JobType(task.JobType), | |||||
| ComputeResource: models.NPU, | |||||
| Cluster: models.OpenICluster, | |||||
| AiCenterCode: aiCenterCode}) | |||||
| if err != nil || spec == nil { | |||||
| log.Error("NotebookManage GetAndCheckSpec error.task.id = %d", task.ID) | |||||
| errorMsg = "Resource specification not support any more" | |||||
| break | |||||
| } | |||||
| if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) { | |||||
| log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID) | |||||
| errorMsg = ctx.Tr("points.insufficient_points_balance") | |||||
| break | |||||
| } | |||||
| createTime := timeutil.TimeStampNow() | createTime := timeutil.TimeStampNow() | ||||
| param := models.NotebookAction{ | param := models.NotebookAction{ | ||||
| Action: models.ActionStart, | Action: models.ActionStart, | ||||
| } | } | ||||
| res, err := modelarts.ManageNotebook2(task.JobID, param) | |||||
| var res *models.NotebookActionResult | |||||
| if task.Type == models.TypeCloudBrainTwo { | |||||
| res, err = modelarts.ManageNotebook2(task.JobID, param) | |||||
| } else if task.Type == models.TypeCDCenter { | |||||
| res, err = modelarts_cd.ManageNotebook(task.JobID, param) | |||||
| } | |||||
| if err != nil { | if err != nil { | ||||
| log.Error("ManageNotebook2(%s) failed:%v", task.DisplayJobName, err.Error(), ctx.Data["MsgID"]) | log.Error("ManageNotebook2(%s) failed:%v", task.DisplayJobName, err.Error(), ctx.Data["MsgID"]) | ||||
| /* 暂不处理再次调试502的场景,详情见方案 | /* 暂不处理再次调试502的场景,详情见方案 | ||||
| @@ -507,8 +523,7 @@ func NotebookRestart(ctx *context.Context) { | |||||
| Description: task.Description, | Description: task.Description, | ||||
| CreatedUnix: createTime, | CreatedUnix: createTime, | ||||
| UpdatedUnix: createTime, | UpdatedUnix: createTime, | ||||
| FlavorCode: task.FlavorCode, | |||||
| FlavorName: task.FlavorName, | |||||
| Spec: spec, | |||||
| } | } | ||||
| err = models.RestartCloudbrain(task, newTask) | err = models.RestartCloudbrain(task, newTask) | ||||
| @@ -555,7 +570,14 @@ func NotebookStop(ctx *context.Context) { | |||||
| Action: models.ActionStop, | Action: models.ActionStop, | ||||
| } | } | ||||
| res, err := modelarts.ManageNotebook2(task.JobID, param) | |||||
| var err error | |||||
| var res *models.NotebookActionResult | |||||
| if task.Type == models.TypeCloudBrainTwo { | |||||
| res, err = modelarts.ManageNotebook2(task.JobID, param) | |||||
| } else if task.Type == models.TypeCDCenter { | |||||
| res, err = modelarts_cd.ManageNotebook(task.JobID, param) | |||||
| } | |||||
| if err != nil { | if err != nil { | ||||
| log.Error("ManageNotebook2(%s) failed:%v", task.JobName, err.Error(), ctx.Data["MsgID"]) | log.Error("ManageNotebook2(%s) failed:%v", task.JobName, err.Error(), ctx.Data["MsgID"]) | ||||
| resultCode = "-1" | resultCode = "-1" | ||||
| @@ -605,7 +627,13 @@ func NotebookDel(ctx *context.Context) { | |||||
| return | return | ||||
| } | } | ||||
| _, err := modelarts.DelNotebook2(task.JobID) | |||||
| var err error | |||||
| if task.Type == models.TypeCloudBrainTwo { | |||||
| _, err = modelarts.DelNotebook2(task.JobID) | |||||
| } else if task.Type == models.TypeCDCenter { | |||||
| _, err = modelarts_cd.DelNotebook(task.JobID) | |||||
| } | |||||
| if err != nil { | if err != nil { | ||||
| log.Error("DelNotebook2(%s) failed:%v", task.JobName, err.Error()) | log.Error("DelNotebook2(%s) failed:%v", task.JobName, err.Error()) | ||||
| if strings.Contains(err.Error(), modelarts.NotebookNotFound) || strings.Contains(err.Error(), modelarts.NotebookNoPermission) || strings.Contains(err.Error(), modelarts.NotebookInvalid) { | if strings.Contains(err.Error(), modelarts.NotebookNotFound) || strings.Contains(err.Error(), modelarts.NotebookNoPermission) || strings.Contains(err.Error(), modelarts.NotebookInvalid) { | ||||
| @@ -741,14 +769,7 @@ func trainJobNewDataPrepare(ctx *context.Context) error { | |||||
| } | } | ||||
| ctx.Data["engine_versions"] = versionInfos.Version | ctx.Data["engine_versions"] = versionInfos.Version | ||||
| var flavorInfos modelarts.Flavor | |||||
| if err = json.Unmarshal([]byte(setting.TrainJobFLAVORINFOS), &flavorInfos); err != nil { | |||||
| ctx.ServerError("json.Unmarshal failed:", err) | |||||
| return err | |||||
| } | |||||
| ctx.Data["flavor_infos"] = flavorInfos.Info | |||||
| setSpecBySpecialPoolConfig(ctx, string(models.JobTypeTrain)) | |||||
| prepareCloudbrainTwoTrainSpecs(ctx) | |||||
| ctx.Data["params"] = "" | ctx.Data["params"] = "" | ||||
| ctx.Data["branchName"] = ctx.Repo.BranchName | ctx.Data["branchName"] = ctx.Repo.BranchName | ||||
| @@ -764,10 +785,20 @@ func trainJobNewDataPrepare(ctx *context.Context) error { | |||||
| ctx.Data["WaitCount"] = waitCount | ctx.Data["WaitCount"] = waitCount | ||||
| setMultiNodeIfConfigureMatch(ctx) | setMultiNodeIfConfigureMatch(ctx) | ||||
| return nil | return nil | ||||
| } | } | ||||
| func prepareCloudbrainTwoTrainSpecs(ctx *context.Context) { | |||||
| noteBookSpecs, _ := resource.FindAvailableSpecs(ctx.User.ID, models.FindSpecsOptions{ | |||||
| JobType: models.JobTypeTrain, | |||||
| ComputeResource: models.NPU, | |||||
| Cluster: models.OpenICluster, | |||||
| AiCenterCode: models.AICenterOfCloudBrainTwo, | |||||
| }) | |||||
| ctx.Data["Specs"] = noteBookSpecs | |||||
| } | |||||
| func setMultiNodeIfConfigureMatch(ctx *context.Context) { | func setMultiNodeIfConfigureMatch(ctx *context.Context) { | ||||
| modelarts.InitMultiNode() | modelarts.InitMultiNode() | ||||
| if modelarts.MultiNodeConfig != nil { | if modelarts.MultiNodeConfig != nil { | ||||
| @@ -862,13 +893,7 @@ func trainJobErrorNewDataPrepare(ctx *context.Context, form auth.CreateModelArts | |||||
| } | } | ||||
| ctx.Data["engine_versions"] = versionInfos.Version | ctx.Data["engine_versions"] = versionInfos.Version | ||||
| var flavorInfos modelarts.Flavor | |||||
| if err = json.Unmarshal([]byte(setting.TrainJobFLAVORINFOS), &flavorInfos); err != nil { | |||||
| ctx.ServerError("json.Unmarshal failed:", err) | |||||
| return err | |||||
| } | |||||
| ctx.Data["flavor_infos"] = flavorInfos.Info | |||||
| setSpecBySpecialPoolConfig(ctx, string(models.JobTypeTrain)) | |||||
| prepareCloudbrainTwoTrainSpecs(ctx) | |||||
| configList, err := getConfigList(modelarts.PerPage, 1, modelarts.SortByCreateTime, "desc", "", modelarts.ConfigTypeCustom) | configList, err := getConfigList(modelarts.PerPage, 1, modelarts.SortByCreateTime, "desc", "", modelarts.ConfigTypeCustom) | ||||
| if err != nil { | if err != nil { | ||||
| @@ -957,14 +982,12 @@ func trainJobNewVersionDataPrepare(ctx *context.Context) error { | |||||
| } | } | ||||
| ctx.Data["engine_versions"] = versionInfos.Version | ctx.Data["engine_versions"] = versionInfos.Version | ||||
| var flavorInfos modelarts.Flavor | |||||
| if err = json.Unmarshal([]byte(setting.TrainJobFLAVORINFOS), &flavorInfos); err != nil { | |||||
| ctx.ServerError("json.Unmarshal failed:", err) | |||||
| return err | |||||
| prepareCloudbrainTwoTrainSpecs(ctx) | |||||
| spec, _ := resource.GetCloudbrainSpec(task.ID) | |||||
| if spec != nil { | |||||
| log.Info("spec_id = %d", spec.ID) | |||||
| ctx.Data["spec_id"] = spec.ID | |||||
| } | } | ||||
| ctx.Data["flavor_infos"] = flavorInfos.Info | |||||
| setSpecBySpecialPoolConfig(ctx, string(models.JobTypeTrain)) | |||||
| var Parameters modelarts.Parameters | var Parameters modelarts.Parameters | ||||
| if err = json.Unmarshal([]byte(task.Parameters), &Parameters); err != nil { | if err = json.Unmarshal([]byte(task.Parameters), &Parameters); err != nil { | ||||
| @@ -1055,13 +1078,7 @@ func versionErrorDataPrepare(ctx *context.Context, form auth.CreateModelArtsTrai | |||||
| } | } | ||||
| ctx.Data["engine_versions"] = versionInfos.Version | ctx.Data["engine_versions"] = versionInfos.Version | ||||
| var flavorInfos modelarts.Flavor | |||||
| if err = json.Unmarshal([]byte(setting.TrainJobFLAVORINFOS), &flavorInfos); err != nil { | |||||
| ctx.ServerError("json.Unmarshal failed:", err) | |||||
| return err | |||||
| } | |||||
| ctx.Data["flavor_infos"] = flavorInfos.Info | |||||
| setSpecBySpecialPoolConfig(ctx, string(models.JobTypeTrain)) | |||||
| prepareCloudbrainTwoTrainSpecs(ctx) | |||||
| var Parameters modelarts.Parameters | var Parameters modelarts.Parameters | ||||
| if err = json.Unmarshal([]byte(form.Params), &Parameters); err != nil { | if err = json.Unmarshal([]byte(form.Params), &Parameters); err != nil { | ||||
| @@ -1114,7 +1131,6 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm) | |||||
| workServerNumber := form.WorkServerNumber | workServerNumber := form.WorkServerNumber | ||||
| engineID := form.EngineID | engineID := form.EngineID | ||||
| bootFile := strings.TrimSpace(form.BootFile) | bootFile := strings.TrimSpace(form.BootFile) | ||||
| flavorCode := form.Flavor | |||||
| params := form.Params | params := form.Params | ||||
| poolID := form.PoolID | poolID := form.PoolID | ||||
| //isSaveParam := form.IsSaveParam | //isSaveParam := form.IsSaveParam | ||||
| @@ -1130,8 +1146,8 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm) | |||||
| VersionCount := modelarts.VersionCountOne | VersionCount := modelarts.VersionCountOne | ||||
| EngineName := form.EngineName | EngineName := form.EngineName | ||||
| errStr:=checkMultiNode(ctx.User.ID,form.WorkServerNumber) | |||||
| if errStr!=""{ | |||||
| errStr := checkMultiNode(ctx.User.ID, form.WorkServerNumber) | |||||
| if errStr != "" { | |||||
| trainJobErrorNewDataPrepare(ctx, form) | trainJobErrorNewDataPrepare(ctx, form) | ||||
| ctx.RenderWithErr(ctx.Tr(errStr), tplModelArtsTrainJobNew, &form) | ctx.RenderWithErr(ctx.Tr(errStr), tplModelArtsTrainJobNew, &form) | ||||
| return | return | ||||
| @@ -1167,12 +1183,23 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm) | |||||
| return | return | ||||
| } | } | ||||
| errStr = checkModelArtsSpecialPool(ctx, flavorCode, string(models.JobTypeTrain)) | |||||
| if errStr != "" { | |||||
| spec, err := resource.GetAndCheckSpec(ctx.User.ID, form.SpecId, models.FindSpecsOptions{ | |||||
| JobType: models.JobTypeTrain, | |||||
| ComputeResource: models.NPU, | |||||
| Cluster: models.OpenICluster, | |||||
| AiCenterCode: models.AICenterOfCloudBrainTwo}) | |||||
| if err != nil || spec == nil { | |||||
| trainJobErrorNewDataPrepare(ctx, form) | trainJobErrorNewDataPrepare(ctx, form) | ||||
| ctx.RenderWithErr(ctx.Tr(errStr), tplModelArtsTrainJobNew, &form) | |||||
| ctx.RenderWithErr("Resource specification not available", tplModelArtsTrainJobNew, &form) | |||||
| return | return | ||||
| } | } | ||||
| if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) { | |||||
| log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID) | |||||
| cloudBrainNewDataPrepare(ctx) | |||||
| ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tplModelArtsTrainJobNew, &form) | |||||
| return | |||||
| } | |||||
| //Determine whether the task name of the task in the project is duplicated | //Determine whether the task name of the task in the project is duplicated | ||||
| tasks, err := models.GetCloudbrainsByDisplayJobName(repo.ID, string(models.JobTypeTrain), displayJobName) | tasks, err := models.GetCloudbrainsByDisplayJobName(repo.ID, string(models.JobTypeTrain), displayJobName) | ||||
| if err == nil { | if err == nil { | ||||
| @@ -1333,7 +1360,6 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm) | |||||
| BootFileUrl: codeObsPath + bootFile, | BootFileUrl: codeObsPath + bootFile, | ||||
| BootFile: bootFile, | BootFile: bootFile, | ||||
| TrainUrl: outputObsPath, | TrainUrl: outputObsPath, | ||||
| FlavorCode: flavorCode, | |||||
| WorkServerNumber: workServerNumber, | WorkServerNumber: workServerNumber, | ||||
| EngineID: int64(engineID), | EngineID: int64(engineID), | ||||
| LogUrl: logObsPath, | LogUrl: logObsPath, | ||||
| @@ -1349,6 +1375,7 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm) | |||||
| VersionCount: VersionCount, | VersionCount: VersionCount, | ||||
| TotalVersionCount: modelarts.TotalVersionCount, | TotalVersionCount: modelarts.TotalVersionCount, | ||||
| DatasetName: datasetNames, | DatasetName: datasetNames, | ||||
| Spec: spec, | |||||
| } | } | ||||
| userCommand, userImageUrl := getUserCommand(engineID, req) | userCommand, userImageUrl := getUserCommand(engineID, req) | ||||
| req.UserCommand = userCommand | req.UserCommand = userCommand | ||||
| @@ -1371,31 +1398,31 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm) | |||||
| ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/modelarts/train-job") | ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/modelarts/train-job") | ||||
| } | } | ||||
| func checkMultiNode(userId int64, serverNum int) string{ | |||||
| if serverNum==1{ | |||||
| func checkMultiNode(userId int64, serverNum int) string { | |||||
| if serverNum == 1 { | |||||
| return "" | return "" | ||||
| } | } | ||||
| modelarts.InitMultiNode() | modelarts.InitMultiNode() | ||||
| var isServerNumValid=false | |||||
| var isServerNumValid = false | |||||
| if modelarts.MultiNodeConfig != nil { | if modelarts.MultiNodeConfig != nil { | ||||
| for _, info := range modelarts.MultiNodeConfig.Info { | for _, info := range modelarts.MultiNodeConfig.Info { | ||||
| if isInOrg, _ := models.IsOrganizationMemberByOrgName(info.Org, userId); isInOrg { | if isInOrg, _ := models.IsOrganizationMemberByOrgName(info.Org, userId); isInOrg { | ||||
| if isInNodes(info.Node,serverNum){ | |||||
| isServerNumValid=true | |||||
| if isInNodes(info.Node, serverNum) { | |||||
| isServerNumValid = true | |||||
| break | break | ||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| if isServerNumValid{ | |||||
| if isServerNumValid { | |||||
| return "" | return "" | ||||
| }else{ | |||||
| } else { | |||||
| return "repo.modelarts.no_node_right" | return "repo.modelarts.no_node_right" | ||||
| } | } | ||||
| } | } | ||||
| func checkInferenceJobMultiNode(userId int64, serverNum int) string{ | |||||
| if serverNum==1{ | |||||
| func checkInferenceJobMultiNode(userId int64, serverNum int) string { | |||||
| if serverNum == 1 { | |||||
| return "" | return "" | ||||
| } | } | ||||
| @@ -1404,8 +1431,8 @@ func checkInferenceJobMultiNode(userId int64, serverNum int) string{ | |||||
| } | } | ||||
| func isInNodes(nodes []int, num int) bool { | func isInNodes(nodes []int, num int) bool { | ||||
| for _, node:=range nodes{ | |||||
| if node==num{ | |||||
| for _, node := range nodes { | |||||
| if node == num { | |||||
| return true | return true | ||||
| } | } | ||||
| } | } | ||||
| @@ -1447,8 +1474,8 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ | |||||
| ctx.Data["PageIsTrainJob"] = true | ctx.Data["PageIsTrainJob"] = true | ||||
| var jobID = ctx.Params(":jobid") | var jobID = ctx.Params(":jobid") | ||||
| errStr:=checkMultiNode(ctx.User.ID,form.WorkServerNumber) | |||||
| if errStr!=""{ | |||||
| errStr := checkMultiNode(ctx.User.ID, form.WorkServerNumber) | |||||
| if errStr != "" { | |||||
| versionErrorDataPrepare(ctx, form) | versionErrorDataPrepare(ctx, form) | ||||
| ctx.RenderWithErr(ctx.Tr(errStr), tplModelArtsTrainJobVersionNew, &form) | ctx.RenderWithErr(ctx.Tr(errStr), tplModelArtsTrainJobVersionNew, &form) | ||||
| return | return | ||||
| @@ -1483,7 +1510,6 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ | |||||
| workServerNumber := form.WorkServerNumber | workServerNumber := form.WorkServerNumber | ||||
| engineID := form.EngineID | engineID := form.EngineID | ||||
| bootFile := strings.TrimSpace(form.BootFile) | bootFile := strings.TrimSpace(form.BootFile) | ||||
| flavorCode := form.Flavor | |||||
| params := form.Params | params := form.Params | ||||
| poolID := form.PoolID | poolID := form.PoolID | ||||
| //isSaveParam := form.IsSaveParam | //isSaveParam := form.IsSaveParam | ||||
| @@ -1521,10 +1547,20 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ | |||||
| return | return | ||||
| } | } | ||||
| errStr = checkModelArtsSpecialPool(ctx, flavorCode, string(models.JobTypeTrain)) | |||||
| if errStr != "" { | |||||
| spec, err := resource.GetAndCheckSpec(ctx.User.ID, form.SpecId, models.FindSpecsOptions{ | |||||
| JobType: models.JobTypeTrain, | |||||
| ComputeResource: models.NPU, | |||||
| Cluster: models.OpenICluster, | |||||
| AiCenterCode: models.AICenterOfCloudBrainTwo}) | |||||
| if err != nil || spec == nil { | |||||
| versionErrorDataPrepare(ctx, form) | versionErrorDataPrepare(ctx, form) | ||||
| ctx.RenderWithErr(ctx.Tr(errStr), tplModelArtsTrainJobVersionNew, &form) | |||||
| ctx.RenderWithErr("Resource specification not available", tplModelArtsTrainJobVersionNew, &form) | |||||
| return | |||||
| } | |||||
| if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) { | |||||
| log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID) | |||||
| versionErrorDataPrepare(ctx, form) | |||||
| ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tplModelArtsTrainJobVersionNew, &form) | |||||
| return | return | ||||
| } | } | ||||
| @@ -1678,7 +1714,6 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ | |||||
| BootFileUrl: codeObsPath + bootFile, | BootFileUrl: codeObsPath + bootFile, | ||||
| BootFile: bootFile, | BootFile: bootFile, | ||||
| TrainUrl: outputObsPath, | TrainUrl: outputObsPath, | ||||
| FlavorCode: flavorCode, | |||||
| WorkServerNumber: workServerNumber, | WorkServerNumber: workServerNumber, | ||||
| IsLatestVersion: isLatestVersion, | IsLatestVersion: isLatestVersion, | ||||
| EngineID: int64(engineID), | EngineID: int64(engineID), | ||||
| @@ -1695,6 +1730,7 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ | |||||
| PreVersionName: PreVersionName, | PreVersionName: PreVersionName, | ||||
| TotalVersionCount: latestTask.TotalVersionCount + 1, | TotalVersionCount: latestTask.TotalVersionCount + 1, | ||||
| DatasetName: datasetNames, | DatasetName: datasetNames, | ||||
| Spec: spec, | |||||
| } | } | ||||
| userCommand, userImageUrl := getUserCommand(engineID, req) | userCommand, userImageUrl := getUserCommand(engineID, req) | ||||
| req.UserCommand = userCommand | req.UserCommand = userCommand | ||||
| @@ -1789,7 +1825,7 @@ func paramCheckCreateTrainJob(form auth.CreateModelArtsTrainJobForm) error { | |||||
| log.Error("the boot file(%s) must be a python file", strings.TrimSpace(form.BootFile)) | log.Error("the boot file(%s) must be a python file", strings.TrimSpace(form.BootFile)) | ||||
| return errors.New("启动文件必须是python文件") | return errors.New("启动文件必须是python文件") | ||||
| } | } | ||||
| if form.BranchName == "" { | if form.BranchName == "" { | ||||
| log.Error("the branch must not be null!", form.BranchName) | log.Error("the branch must not be null!", form.BranchName) | ||||
| return errors.New("代码分支不能为空!") | return errors.New("代码分支不能为空!") | ||||
| @@ -1878,7 +1914,6 @@ func TrainJobShow(ctx *context.Context) { | |||||
| for i, task := range VersionListTasks { | for i, task := range VersionListTasks { | ||||
| var parameters models.Parameters | var parameters models.Parameters | ||||
| err := json.Unmarshal([]byte(VersionListTasks[i].Parameters), ¶meters) | err := json.Unmarshal([]byte(VersionListTasks[i].Parameters), ¶meters) | ||||
| if err != nil { | if err != nil { | ||||
| log.Error("Failed to Unmarshal Parameters: %s (%v)", VersionListTasks[i].Parameters, err) | log.Error("Failed to Unmarshal Parameters: %s (%v)", VersionListTasks[i].Parameters, err) | ||||
| @@ -1899,6 +1934,14 @@ func TrainJobShow(ctx *context.Context) { | |||||
| datasetList = append(datasetList, GetCloudBrainDataSetInfo(task.Uuid, task.DatasetName, false)) | datasetList = append(datasetList, GetCloudBrainDataSetInfo(task.Uuid, task.DatasetName, false)) | ||||
| VersionListTasks[i].CanDel = cloudbrain.CanDeleteJob(ctx, &task.Cloudbrain) | VersionListTasks[i].CanDel = cloudbrain.CanDeleteJob(ctx, &task.Cloudbrain) | ||||
| VersionListTasks[i].CanModify = cloudbrain.CanModifyJob(ctx, &task.Cloudbrain) | VersionListTasks[i].CanModify = cloudbrain.CanModifyJob(ctx, &task.Cloudbrain) | ||||
| //add spec | |||||
| s, err := resource.GetCloudbrainSpec(task.Cloudbrain.ID) | |||||
| if err != nil { | |||||
| log.Error("TrainJobShow GetCloudbrainSpec error:" + err.Error()) | |||||
| continue | |||||
| } | |||||
| VersionListTasks[i].Cloudbrain.Spec = s | |||||
| } | } | ||||
| pager := context.NewPagination(VersionListCount, setting.UI.IssuePagingNum, page, 5) | pager := context.NewPagination(VersionListCount, setting.UI.IssuePagingNum, page, 5) | ||||
| @@ -1985,7 +2028,6 @@ func TrainJobStop(ctx *context.Context) { | |||||
| ctx.RenderWithErr(err.Error(), tplModelArtsTrainJobIndex, nil) | ctx.RenderWithErr(err.Error(), tplModelArtsTrainJobIndex, nil) | ||||
| return | return | ||||
| } | } | ||||
| ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/modelarts/train-job?listType=" + listType) | ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/modelarts/train-job?listType=" + listType) | ||||
| } | } | ||||
| @@ -2066,7 +2108,6 @@ func InferenceJobCreate(ctx *context.Context, form auth.CreateModelArtsInference | |||||
| workServerNumber := form.WorkServerNumber | workServerNumber := form.WorkServerNumber | ||||
| engineID := form.EngineID | engineID := form.EngineID | ||||
| bootFile := strings.TrimSpace(form.BootFile) | bootFile := strings.TrimSpace(form.BootFile) | ||||
| flavorCode := form.Flavor | |||||
| params := form.Params | params := form.Params | ||||
| poolID := form.PoolID | poolID := form.PoolID | ||||
| repo := ctx.Repo.Repository | repo := ctx.Repo.Repository | ||||
| @@ -2088,8 +2129,8 @@ func InferenceJobCreate(ctx *context.Context, form auth.CreateModelArtsInference | |||||
| ckptUrl := "/" + form.TrainUrl + form.CkptName | ckptUrl := "/" + form.TrainUrl + form.CkptName | ||||
| log.Info("ckpt url:" + ckptUrl) | log.Info("ckpt url:" + ckptUrl) | ||||
| errStr:=checkInferenceJobMultiNode(ctx.User.ID,form.WorkServerNumber) | |||||
| if errStr!=""{ | |||||
| errStr := checkInferenceJobMultiNode(ctx.User.ID, form.WorkServerNumber) | |||||
| if errStr != "" { | |||||
| inferenceJobErrorNewDataPrepare(ctx, form) | inferenceJobErrorNewDataPrepare(ctx, form) | ||||
| ctx.RenderWithErr(ctx.Tr(errStr), tplModelArtsInferenceJobNew, &form) | ctx.RenderWithErr(ctx.Tr(errStr), tplModelArtsInferenceJobNew, &form) | ||||
| return | return | ||||
| @@ -2143,10 +2184,20 @@ func InferenceJobCreate(ctx *context.Context, form auth.CreateModelArtsInference | |||||
| } | } | ||||
| } | } | ||||
| errStr = checkModelArtsSpecialPool(ctx, flavorCode, string(models.JobTypeInference)) | |||||
| if errStr != "" { | |||||
| spec, err := resource.GetAndCheckSpec(ctx.User.ID, form.SpecId, models.FindSpecsOptions{ | |||||
| JobType: models.JobTypeInference, | |||||
| ComputeResource: models.NPU, | |||||
| Cluster: models.OpenICluster, | |||||
| AiCenterCode: models.AICenterOfCloudBrainTwo}) | |||||
| if err != nil || spec == nil { | |||||
| inferenceJobErrorNewDataPrepare(ctx, form) | inferenceJobErrorNewDataPrepare(ctx, form) | ||||
| ctx.RenderWithErr(ctx.Tr(errStr), tplModelArtsInferenceJobNew, &form) | |||||
| ctx.RenderWithErr("Resource specification not available", tplModelArtsInferenceJobNew, &form) | |||||
| return | |||||
| } | |||||
| if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) { | |||||
| log.Error("point balance is not enough,userId=%d specId=%d ", ctx.User.ID, spec.ID) | |||||
| inferenceJobErrorNewDataPrepare(ctx, form) | |||||
| ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tplModelArtsInferenceJobNew, &form) | |||||
| return | return | ||||
| } | } | ||||
| @@ -2201,7 +2252,7 @@ func InferenceJobCreate(ctx *context.Context, form auth.CreateModelArtsInference | |||||
| datasUrlList, dataUrl, datasetNames, isMultiDataset, err := getDatasUrlListByUUIDS(uuid) | datasUrlList, dataUrl, datasetNames, isMultiDataset, err := getDatasUrlListByUUIDS(uuid) | ||||
| if err != nil { | if err != nil { | ||||
| inferenceJobErrorNewDataPrepare(ctx, form) | inferenceJobErrorNewDataPrepare(ctx, form) | ||||
| ctx.RenderWithErr(ctx.Tr(errStr), tplModelArtsInferenceJobNew, &form) | |||||
| ctx.RenderWithErr(err.Error(), tplModelArtsInferenceJobNew, &form) | |||||
| return | return | ||||
| } | } | ||||
| dataPath := dataUrl | dataPath := dataUrl | ||||
| @@ -2257,7 +2308,6 @@ func InferenceJobCreate(ctx *context.Context, form auth.CreateModelArtsInference | |||||
| BootFileUrl: codeObsPath + bootFile, | BootFileUrl: codeObsPath + bootFile, | ||||
| BootFile: bootFile, | BootFile: bootFile, | ||||
| TrainUrl: trainUrl, | TrainUrl: trainUrl, | ||||
| FlavorCode: flavorCode, | |||||
| WorkServerNumber: workServerNumber, | WorkServerNumber: workServerNumber, | ||||
| EngineID: int64(engineID), | EngineID: int64(engineID), | ||||
| LogUrl: logObsPath, | LogUrl: logObsPath, | ||||
| @@ -2277,6 +2327,7 @@ func InferenceJobCreate(ctx *context.Context, form auth.CreateModelArtsInference | |||||
| ModelVersion: modelVersion, | ModelVersion: modelVersion, | ||||
| CkptName: ckptName, | CkptName: ckptName, | ||||
| ResultUrl: resultObsPath, | ResultUrl: resultObsPath, | ||||
| Spec: spec, | |||||
| DatasetName: datasetNames, | DatasetName: datasetNames, | ||||
| } | } | ||||
| @@ -2319,7 +2370,7 @@ func checkModelArtsSpecialPool(ctx *context.Context, flavorCode string, jobType | |||||
| if !isMatchPool { | if !isMatchPool { | ||||
| isMatchSpec := false | isMatchSpec := false | ||||
| if jobType == string(models.JobTypeDebug) { | if jobType == string(models.JobTypeDebug) { | ||||
| for _, flavor := range modelarts.FlavorInfos.FlavorInfo { | |||||
| for _, flavor := range setting.StFlavorInfo.FlavorInfo { | |||||
| if flavor.Value == flavorCode { | if flavor.Value == flavorCode { | ||||
| isMatchSpec = true | isMatchSpec = true | ||||
| break | break | ||||
| @@ -2457,14 +2508,7 @@ func inferenceJobNewDataPrepare(ctx *context.Context) error { | |||||
| } | } | ||||
| ctx.Data["engine_versions"] = versionInfos.Version | ctx.Data["engine_versions"] = versionInfos.Version | ||||
| var flavorInfos modelarts.Flavor | |||||
| if err = json.Unmarshal([]byte(setting.TrainJobFLAVORINFOS), &flavorInfos); err != nil { | |||||
| ctx.ServerError("json.Unmarshal failed:", err) | |||||
| return err | |||||
| } | |||||
| ctx.Data["flavor_infos"] = flavorInfos.Info | |||||
| setSpecBySpecialPoolConfig(ctx, string(models.JobTypeInference)) | |||||
| prepareCloudbrainTwoInferenceSpecs(ctx) | |||||
| ctx.Data["params"] = "" | ctx.Data["params"] = "" | ||||
| ctx.Data["branchName"] = ctx.Repo.BranchName | ctx.Data["branchName"] = ctx.Repo.BranchName | ||||
| @@ -2495,6 +2539,16 @@ func inferenceJobNewDataPrepare(ctx *context.Context) error { | |||||
| return nil | return nil | ||||
| } | } | ||||
| func prepareCloudbrainTwoInferenceSpecs(ctx *context.Context) { | |||||
| noteBookSpecs, _ := resource.FindAvailableSpecs(ctx.User.ID, models.FindSpecsOptions{ | |||||
| JobType: models.JobTypeInference, | |||||
| ComputeResource: models.NPU, | |||||
| Cluster: models.OpenICluster, | |||||
| AiCenterCode: models.AICenterOfCloudBrainTwo, | |||||
| }) | |||||
| ctx.Data["Specs"] = noteBookSpecs | |||||
| } | |||||
| func inferenceJobErrorNewDataPrepare(ctx *context.Context, form auth.CreateModelArtsInferenceJobForm) error { | func inferenceJobErrorNewDataPrepare(ctx *context.Context, form auth.CreateModelArtsInferenceJobForm) error { | ||||
| ctx.Data["PageIsCloudBrain"] = true | ctx.Data["PageIsCloudBrain"] = true | ||||
| @@ -2529,14 +2583,7 @@ func inferenceJobErrorNewDataPrepare(ctx *context.Context, form auth.CreateModel | |||||
| return err | return err | ||||
| } | } | ||||
| ctx.Data["engine_versions"] = versionInfos.Version | ctx.Data["engine_versions"] = versionInfos.Version | ||||
| var flavorInfos modelarts.Flavor | |||||
| if err = json.Unmarshal([]byte(setting.TrainJobFLAVORINFOS), &flavorInfos); err != nil { | |||||
| ctx.ServerError("json.Unmarshal failed:", err) | |||||
| return err | |||||
| } | |||||
| ctx.Data["flavor_infos"] = flavorInfos.Info | |||||
| setSpecBySpecialPoolConfig(ctx, string(models.JobTypeInference)) | |||||
| prepareCloudbrainTwoInferenceSpecs(ctx) | |||||
| configList, err := getConfigList(modelarts.PerPage, 1, modelarts.SortByCreateTime, "desc", "", modelarts.ConfigTypeCustom) | configList, err := getConfigList(modelarts.PerPage, 1, modelarts.SortByCreateTime, "desc", "", modelarts.ConfigTypeCustom) | ||||
| if err != nil { | if err != nil { | ||||
| @@ -2611,7 +2658,7 @@ func InferenceJobShow(ctx *context.Context) { | |||||
| } else { | } else { | ||||
| task.Parameters = "" | task.Parameters = "" | ||||
| } | } | ||||
| prepareSpec4Show(ctx, task) | |||||
| LabelName := strings.Fields(task.LabelName) | LabelName := strings.Fields(task.LabelName) | ||||
| ctx.Data["labelName"] = LabelName | ctx.Data["labelName"] = LabelName | ||||
| ctx.Data["jobID"] = jobID | ctx.Data["jobID"] = jobID | ||||
| @@ -2,3 +2,4 @@ package response | |||||
| var RESOURCE_QUEUE_NOT_AVAILABLE = &BizError{Code: 1001, Err: "resource queue not available"} | var RESOURCE_QUEUE_NOT_AVAILABLE = &BizError{Code: 1001, Err: "resource queue not available"} | ||||
| var SPECIFICATION_NOT_EXIST = &BizError{Code: 1002, Err: "specification not exist"} | var SPECIFICATION_NOT_EXIST = &BizError{Code: 1002, Err: "specification not exist"} | ||||
| var SPECIFICATION_NOT_AVAILABLE = &BizError{Code: 1003, Err: "specification not available"} | |||||
| @@ -0,0 +1,24 @@ | |||||
| package point | |||||
| import ( | |||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/modules/context" | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| "code.gitea.io/gitea/routers/response" | |||||
| "code.gitea.io/gitea/services/reward/point/account" | |||||
| "net/http" | |||||
| ) | |||||
| func SearchPointAccount(ctx *context.Context) { | |||||
| q := ctx.Query("q") | |||||
| page := ctx.QueryInt("page") | |||||
| resopnse, err := account.SearchPointAccount(models.SearchPointAccountOpts{ListOptions: models.ListOptions{Page: page, PageSize: 20}, Keyword: q}) | |||||
| if err != nil { | |||||
| log.Error("SearchPointAccount error.%v", err) | |||||
| ctx.JSON(http.StatusOK, response.ServerError(err.Error())) | |||||
| return | |||||
| } | |||||
| ctx.JSON(http.StatusOK, response.SuccessWithData(resopnse)) | |||||
| return | |||||
| } | |||||
| @@ -0,0 +1,45 @@ | |||||
| package point | |||||
| import ( | |||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/modules/context" | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| "code.gitea.io/gitea/routers/response" | |||||
| "code.gitea.io/gitea/services/reward/limiter" | |||||
| "net/http" | |||||
| ) | |||||
| func GetSingleDailyPointLimitConfig(ctx *context.Context) { | |||||
| r, err := limiter.GetSingleDailyPointLimitConfig() | |||||
| if err != nil { | |||||
| ctx.JSON(http.StatusOK, response.ServerError(err.Error())) | |||||
| return | |||||
| } | |||||
| resultMap := make(map[string]interface{}, 0) | |||||
| if r == nil { | |||||
| resultMap["LimitNum"] = "" | |||||
| } else { | |||||
| resultMap["LimitNum"] = r.LimitNum | |||||
| } | |||||
| ctx.JSON(http.StatusOK, response.SuccessWithData(resultMap)) | |||||
| } | |||||
| func SetSingleDailyPointLimitConfig(ctx *context.Context, config models.LimitConfigVO) { | |||||
| err := limiter.SetSingleDailyPointLimitConfig(config.LimitNum, ctx.User) | |||||
| if err != nil { | |||||
| log.Error("Set single daily point limit config error. %v", err) | |||||
| ctx.JSON(http.StatusOK, response.ServerError(err.Error())) | |||||
| return | |||||
| } | |||||
| ctx.JSON(http.StatusOK, response.Success()) | |||||
| } | |||||
| func DeletePointLimitConfig(ctx *context.Context) { | |||||
| id := ctx.QueryInt64("id") | |||||
| err := limiter.DeleteLimitConfig(id, ctx.User) | |||||
| if err != nil { | |||||
| ctx.JSON(http.StatusOK, response.ServerError(err.Error())) | |||||
| return | |||||
| } | |||||
| ctx.JSON(http.StatusOK, response.Success()) | |||||
| } | |||||
| @@ -0,0 +1,158 @@ | |||||
| package point | |||||
| import ( | |||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/modules/base" | |||||
| "code.gitea.io/gitea/modules/context" | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| "code.gitea.io/gitea/routers/response" | |||||
| "code.gitea.io/gitea/services/reward" | |||||
| "code.gitea.io/gitea/services/reward/point/account" | |||||
| "errors" | |||||
| "net/http" | |||||
| ) | |||||
| const tplPoint base.TplName = "reward/point" | |||||
| const tplPointRule base.TplName = "reward/point/rule" | |||||
| type AccountResponse struct { | |||||
| Balance int64 | |||||
| TotalEarned int64 | |||||
| TotalConsumed int64 | |||||
| } | |||||
| func GetPointAccount(ctx *context.Context) { | |||||
| userId := ctx.User.ID | |||||
| a, err := account.GetAccount(userId) | |||||
| if err != nil { | |||||
| ctx.JSON(http.StatusOK, response.ServerError(err.Error())) | |||||
| return | |||||
| } | |||||
| res := &AccountResponse{ | |||||
| Balance: a.Balance, | |||||
| TotalEarned: a.TotalEarned, | |||||
| TotalConsumed: a.TotalConsumed, | |||||
| } | |||||
| ctx.JSON(http.StatusOK, response.SuccessWithData(res)) | |||||
| } | |||||
| func GetPointRecordList(ctx *context.Context) { | |||||
| operateType := ctx.Query("Operate") | |||||
| page := ctx.QueryInt("Page") | |||||
| var orderBy models.RewardOperateOrderBy | |||||
| switch ctx.Query("sort") { | |||||
| default: | |||||
| orderBy = models.RewardOrderByIDDesc | |||||
| } | |||||
| t := models.GetRewardOperateTypeInstance(operateType) | |||||
| if t == "" { | |||||
| ctx.JSON(http.StatusOK, response.ServerError("param error")) | |||||
| return | |||||
| } | |||||
| r, err := reward.GetRewardRecordList(&models.RewardRecordListOpts{ | |||||
| ListOptions: models.ListOptions{PageSize: 10, Page: page}, | |||||
| UserId: ctx.User.ID, | |||||
| OperateType: t, | |||||
| RewardType: models.RewardTypePoint, | |||||
| OrderBy: orderBy, | |||||
| IsAdmin: false, | |||||
| UserName: ctx.User.Name, | |||||
| }) | |||||
| if err != nil { | |||||
| log.Error("GetPointRecordList error.%v", err) | |||||
| ctx.JSON(http.StatusOK, response.ServerError(err.Error())) | |||||
| return | |||||
| } | |||||
| ctx.JSON(http.StatusOK, response.SuccessWithData(r)) | |||||
| return | |||||
| } | |||||
| func OperatePointAccountBalance(ctx *context.Context, req models.AdminRewardOperateReq) { | |||||
| req.RewardType = models.RewardTypePoint | |||||
| if req.OperateType.Name() == "" || req.Remark == "" { | |||||
| ctx.JSON(http.StatusOK, "param error") | |||||
| return | |||||
| } | |||||
| err := reward.AdminBalanceOperate(req, ctx.User) | |||||
| if err != nil { | |||||
| log.Error("OperatePointAccountBalance error.%v", err) | |||||
| ctx.JSON(http.StatusOK, response.ServerError(err.Error())) | |||||
| return | |||||
| } | |||||
| ctx.JSON(http.StatusOK, response.Success()) | |||||
| } | |||||
| func GetPointPage(ctx *context.Context) { | |||||
| ctx.HTML(200, tplPoint) | |||||
| } | |||||
| func GetRulePage(ctx *context.Context) { | |||||
| ctx.HTML(200, tplPointRule) | |||||
| } | |||||
| func GetAdminRewardList(ctx *context.Context) { | |||||
| opts, err := buildAdminRewardRecordListOpts(ctx) | |||||
| if err != nil { | |||||
| log.Error("buildAdminRewardRecordListOpts error.%v", err) | |||||
| ctx.JSON(http.StatusOK, response.ServerError(err.Error())) | |||||
| return | |||||
| } | |||||
| username := ctx.Query("userName") | |||||
| if username != "" { | |||||
| user, err := models.GetUserByName(username) | |||||
| if err != nil { | |||||
| log.Error("GetUserByName error.%v", err) | |||||
| if models.IsErrUserNotExist(err) { | |||||
| ctx.JSON(http.StatusOK, response.ServerError("user not exist")) | |||||
| } else { | |||||
| ctx.JSON(http.StatusOK, response.ServerError(err.Error())) | |||||
| } | |||||
| return | |||||
| } | |||||
| opts.UserId = user.ID | |||||
| opts.UserName = user.Name | |||||
| } | |||||
| r, err := reward.GetRewardRecordList(opts) | |||||
| if err != nil { | |||||
| log.Error("GetRewardRecordList error.%v", err) | |||||
| ctx.JSON(http.StatusOK, response.ServerError(err.Error())) | |||||
| return | |||||
| } | |||||
| ctx.JSON(http.StatusOK, response.SuccessWithData(r)) | |||||
| } | |||||
| func buildAdminRewardRecordListOpts(ctx *context.Context) (*models.RewardRecordListOpts, error) { | |||||
| operateType := ctx.Query("operate") | |||||
| sourceType := ctx.Query("source") | |||||
| actionType := ctx.QueryInt("action") | |||||
| serialNo := ctx.Query("serialNo") | |||||
| status := ctx.Query("status") | |||||
| page := ctx.QueryInt("page") | |||||
| var orderBy models.RewardOperateOrderBy | |||||
| switch ctx.Query("sort") { | |||||
| default: | |||||
| orderBy = models.RewardOrderByIDDesc | |||||
| } | |||||
| t := models.GetRewardOperateTypeInstance(operateType) | |||||
| if t == "" { | |||||
| return nil, errors.New("param error") | |||||
| } | |||||
| opts := &models.RewardRecordListOpts{ | |||||
| ListOptions: models.ListOptions{PageSize: 10, Page: page}, | |||||
| OperateType: t, | |||||
| RewardType: models.RewardTypePoint, | |||||
| OrderBy: orderBy, | |||||
| SourceType: sourceType, | |||||
| ActionType: actionType, | |||||
| SerialNo: serialNo, | |||||
| IsAdmin: true, | |||||
| Status: status, | |||||
| } | |||||
| return opts, nil | |||||
| } | |||||
| @@ -6,6 +6,9 @@ package routes | |||||
| import ( | import ( | ||||
| "bytes" | "bytes" | ||||
| "code.gitea.io/gitea/routers/reward/point" | |||||
| "code.gitea.io/gitea/routers/task" | |||||
| "code.gitea.io/gitea/services/reward" | |||||
| "encoding/gob" | "encoding/gob" | ||||
| "net/http" | "net/http" | ||||
| "path" | "path" | ||||
| @@ -328,6 +331,8 @@ func RegisterRoutes(m *macaron.Macaron) { | |||||
| m.Get("/", routers.Home) | m.Get("/", routers.Home) | ||||
| m.Get("/dashboard", routers.Dashboard) | m.Get("/dashboard", routers.Dashboard) | ||||
| go routers.SocketManager.Run() | go routers.SocketManager.Run() | ||||
| go task.RunTask() | |||||
| go reward.AcceptStatusChangeAction() | |||||
| m.Get("/action/notification", routers.ActionNotification) | m.Get("/action/notification", routers.ActionNotification) | ||||
| m.Get("/recommend/home", routers.RecommendHomeInfo) | m.Get("/recommend/home", routers.RecommendHomeInfo) | ||||
| m.Get("/dashboard/invitation", routers.GetMapInfo) | m.Get("/dashboard/invitation", routers.GetMapInfo) | ||||
| @@ -643,6 +648,20 @@ func RegisterRoutes(m *macaron.Macaron) { | |||||
| m.Group("/operation", func() { | m.Group("/operation", func() { | ||||
| m.Get("/config/recommend_org", operation.Organizations) | m.Get("/config/recommend_org", operation.Organizations) | ||||
| m.Post("/config/recommend_org", bindIgnErr(operation.OrgInfos{}), operation.UpdateRecommendOrganizations) | m.Post("/config/recommend_org", bindIgnErr(operation.OrgInfos{}), operation.UpdateRecommendOrganizations) | ||||
| m.Group("/reward/point", func() { | |||||
| m.Combo("/limiter/single-daily").Get(point.GetSingleDailyPointLimitConfig).Post(bindIgnErr(models.LimitConfigVO{}), point.SetSingleDailyPointLimitConfig) | |||||
| m.Post("/limiter/delete", point.DeletePointLimitConfig) | |||||
| m.Get("/account/search", point.SearchPointAccount) | |||||
| m.Post("/account/operate", binding.Bind(models.AdminRewardOperateReq{}), point.OperatePointAccountBalance) | |||||
| m.Get("/list", point.GetAdminRewardList) | |||||
| }) | |||||
| m.Group("/task/config", func() { | |||||
| m.Get("/list", task.GetTaskConfigList) | |||||
| m.Post("/add/batch", bindIgnErr(models.BatchLimitConfigVO{}), task.BatchAddTaskConfig) | |||||
| m.Post("/^:action(new|edit|del)$", bindIgnErr(models.TaskConfigWithLimit{}), task.OperateTaskConfig) | |||||
| }) | |||||
| }, operationReq) | }, operationReq) | ||||
| // ***** END: Operation ***** | // ***** END: Operation ***** | ||||
| @@ -1116,7 +1135,7 @@ func RegisterRoutes(m *macaron.Macaron) { | |||||
| m.Get("/models", reqRepoCloudBrainReader, repo.CloudBrainShowModels) | m.Get("/models", reqRepoCloudBrainReader, repo.CloudBrainShowModels) | ||||
| m.Get("/download_model", cloudbrain.AdminOrJobCreaterRight, repo.CloudBrainDownloadModel) | m.Get("/download_model", cloudbrain.AdminOrJobCreaterRight, repo.CloudBrainDownloadModel) | ||||
| }) | }) | ||||
| m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, repo.CloudBrainNew) | |||||
| m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, context.PointAccount(), repo.CloudBrainNew) | |||||
| m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateCloudBrainForm{}), repo.CloudBrainCreate) | m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateCloudBrainForm{}), repo.CloudBrainCreate) | ||||
| m.Group("/benchmark", func() { | m.Group("/benchmark", func() { | ||||
| @@ -1127,7 +1146,7 @@ func RegisterRoutes(m *macaron.Macaron) { | |||||
| m.Post("/del", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.BenchmarkDel) | m.Post("/del", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.BenchmarkDel) | ||||
| m.Get("/rate", reqRepoCloudBrainReader, repo.GetRate) | m.Get("/rate", reqRepoCloudBrainReader, repo.GetRate) | ||||
| }) | }) | ||||
| m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, repo.CloudBrainBenchmarkNew) | |||||
| m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, context.PointAccount(), repo.CloudBrainBenchmarkNew) | |||||
| m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateCloudBrainForm{}), repo.CloudBrainBenchmarkCreate) | m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateCloudBrainForm{}), repo.CloudBrainBenchmarkCreate) | ||||
| m.Get("/get_child_types", repo.GetChildTypes) | m.Get("/get_child_types", repo.GetChildTypes) | ||||
| }) | }) | ||||
| @@ -1141,7 +1160,7 @@ func RegisterRoutes(m *macaron.Macaron) { | |||||
| //m.Get("/get_log", cloudbrain.AdminOrJobCreaterRightForTrain, repo.GetLogFromModelDir) | //m.Get("/get_log", cloudbrain.AdminOrJobCreaterRightForTrain, repo.GetLogFromModelDir) | ||||
| //m.Post("/create_version", reqWechatBind, cloudbrain.AdminOrJobCreaterRightForTrain, bindIgnErr(auth.CreateModelArtsTrainJobForm{}), repo.TrainJobCreateVersion) | //m.Post("/create_version", reqWechatBind, cloudbrain.AdminOrJobCreaterRightForTrain, bindIgnErr(auth.CreateModelArtsTrainJobForm{}), repo.TrainJobCreateVersion) | ||||
| }) | }) | ||||
| m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, repo.CloudBrainTrainJobNew) | |||||
| m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, context.PointAccount(), repo.CloudBrainTrainJobNew) | |||||
| m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateCloudBrainForm{}), repo.CloudBrainCreate) | m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateCloudBrainForm{}), repo.CloudBrainCreate) | ||||
| }) | }) | ||||
| m.Group("/inference-job", func() { | m.Group("/inference-job", func() { | ||||
| @@ -1151,7 +1170,7 @@ func RegisterRoutes(m *macaron.Macaron) { | |||||
| m.Get("/downloadall", repo.DownloadInferenceResultFile) | m.Get("/downloadall", repo.DownloadInferenceResultFile) | ||||
| }) | }) | ||||
| m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, repo.InferenceCloudBrainJobNew) | |||||
| m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, context.PointAccount(), repo.InferenceCloudBrainJobNew) | |||||
| m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateCloudBrainInferencForm{}), repo.CloudBrainInferenceJobCreate) | m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateCloudBrainInferencForm{}), repo.CloudBrainInferenceJobCreate) | ||||
| }) | }) | ||||
| }, context.RepoRef()) | }, context.RepoRef()) | ||||
| @@ -1164,11 +1183,11 @@ func RegisterRoutes(m *macaron.Macaron) { | |||||
| m.Get("/model_download", cloudbrain.AdminOrJobCreaterRightForTrain, repo.ModelDownload) | m.Get("/model_download", cloudbrain.AdminOrJobCreaterRightForTrain, repo.ModelDownload) | ||||
| }) | }) | ||||
| m.Group("/gpu", func() { | m.Group("/gpu", func() { | ||||
| m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, repo.GrampusTrainJobGPUNew) | |||||
| m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, context.PointAccount(), repo.GrampusTrainJobGPUNew) | |||||
| m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateGrampusTrainJobForm{}), repo.GrampusTrainJobGpuCreate) | m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateGrampusTrainJobForm{}), repo.GrampusTrainJobGpuCreate) | ||||
| }) | }) | ||||
| m.Group("/npu", func() { | m.Group("/npu", func() { | ||||
| m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, repo.GrampusTrainJobNPUNew) | |||||
| m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, context.PointAccount(), repo.GrampusTrainJobNPUNew) | |||||
| m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateGrampusTrainJobForm{}), repo.GrampusTrainJobNpuCreate) | m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateGrampusTrainJobForm{}), repo.GrampusTrainJobNpuCreate) | ||||
| }) | }) | ||||
| }) | }) | ||||
| @@ -1225,7 +1244,7 @@ func RegisterRoutes(m *macaron.Macaron) { | |||||
| m.Post("/stop", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.NotebookStop) | m.Post("/stop", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.NotebookStop) | ||||
| m.Post("/del", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.NotebookDel) | m.Post("/del", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.NotebookDel) | ||||
| }) | }) | ||||
| m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, repo.NotebookNew) | |||||
| m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, context.PointAccount(), repo.NotebookNew) | |||||
| m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateModelArtsNotebookForm{}), repo.Notebook2Create) | m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateModelArtsNotebookForm{}), repo.Notebook2Create) | ||||
| }) | }) | ||||
| @@ -1237,10 +1256,10 @@ func RegisterRoutes(m *macaron.Macaron) { | |||||
| m.Post("/del", cloudbrain.AdminOrOwnerOrJobCreaterRightForTrain, repo.TrainJobDel) | m.Post("/del", cloudbrain.AdminOrOwnerOrJobCreaterRightForTrain, repo.TrainJobDel) | ||||
| m.Get("/model_download", cloudbrain.AdminOrJobCreaterRightForTrain, repo.ModelDownload) | m.Get("/model_download", cloudbrain.AdminOrJobCreaterRightForTrain, repo.ModelDownload) | ||||
| m.Get("/download_log_file", cloudbrain.AdminOrJobCreaterRightForTrain, repo.TrainJobDownloadLogFile) | m.Get("/download_log_file", cloudbrain.AdminOrJobCreaterRightForTrain, repo.TrainJobDownloadLogFile) | ||||
| m.Get("/create_version", reqWechatBind, cloudbrain.AdminOrJobCreaterRightForTrain, repo.TrainJobNewVersion) | |||||
| m.Get("/create_version", reqWechatBind, cloudbrain.AdminOrJobCreaterRightForTrain, context.PointAccount(), repo.TrainJobNewVersion) | |||||
| m.Post("/create_version", reqWechatBind, cloudbrain.AdminOrJobCreaterRightForTrain, bindIgnErr(auth.CreateModelArtsTrainJobForm{}), repo.TrainJobCreateVersion) | m.Post("/create_version", reqWechatBind, cloudbrain.AdminOrJobCreaterRightForTrain, bindIgnErr(auth.CreateModelArtsTrainJobForm{}), repo.TrainJobCreateVersion) | ||||
| }) | }) | ||||
| m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, repo.TrainJobNew) | |||||
| m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, context.PointAccount(), repo.TrainJobNew) | |||||
| m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateModelArtsTrainJobForm{}), repo.TrainJobCreate) | m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateModelArtsTrainJobForm{}), repo.TrainJobCreate) | ||||
| m.Get("/para-config-list", reqRepoCloudBrainReader, repo.TrainJobGetConfigList) | m.Get("/para-config-list", reqRepoCloudBrainReader, repo.TrainJobGetConfigList) | ||||
| @@ -1253,7 +1272,7 @@ func RegisterRoutes(m *macaron.Macaron) { | |||||
| m.Get("/result_download", cloudbrain.AdminOrJobCreaterRightForTrain, repo.ResultDownload) | m.Get("/result_download", cloudbrain.AdminOrJobCreaterRightForTrain, repo.ResultDownload) | ||||
| m.Get("/downloadall", repo.DownloadMultiResultFile) | m.Get("/downloadall", repo.DownloadMultiResultFile) | ||||
| }) | }) | ||||
| m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, repo.InferenceJobNew) | |||||
| m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, context.PointAccount(), repo.InferenceJobNew) | |||||
| m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateModelArtsInferenceJobForm{}), repo.InferenceJobCreate) | m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateModelArtsInferenceJobForm{}), repo.InferenceJobCreate) | ||||
| }) | }) | ||||
| }, context.RepoRef()) | }, context.RepoRef()) | ||||
| @@ -1413,6 +1432,13 @@ func RegisterRoutes(m *macaron.Macaron) { | |||||
| m.Post("/purge", user.NotificationPurgePost) | m.Post("/purge", user.NotificationPurgePost) | ||||
| }, reqSignIn) | }, reqSignIn) | ||||
| m.Group("/reward/point", func() { | |||||
| m.Get("", point.GetPointPage) | |||||
| m.Get("/rule", point.GetRulePage) | |||||
| m.Get("/account", point.GetPointAccount) | |||||
| m.Get("/record/list", point.GetPointRecordList) | |||||
| }, reqSignIn) | |||||
| if setting.API.EnableSwagger { | if setting.API.EnableSwagger { | ||||
| m.Get("/swagger.v1.json", templates.JSONRenderer(), routers.SwaggerV1Json) | m.Get("/swagger.v1.json", templates.JSONRenderer(), routers.SwaggerV1Json) | ||||
| } | } | ||||
| @@ -0,0 +1,68 @@ | |||||
| package task | |||||
| import ( | |||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/modules/context" | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| "code.gitea.io/gitea/routers/response" | |||||
| "code.gitea.io/gitea/services/task" | |||||
| "errors" | |||||
| "net/http" | |||||
| ) | |||||
| func GetTaskConfigList(ctx *context.Context) { | |||||
| page := ctx.QueryInt("Page") | |||||
| status := ctx.QueryInt("Status") | |||||
| action := ctx.QueryInt("Action") | |||||
| r, err := task.GetTaskConfigWithLimitList(models.GetTaskConfigOpts{ | |||||
| ListOptions: models.ListOptions{PageSize: 20, Page: page}, | |||||
| Status: status, | |||||
| ActionType: action, | |||||
| }) | |||||
| if err != nil { | |||||
| log.Error("GetTaskConfigList error.%v", err) | |||||
| ctx.JSON(http.StatusOK, response.ServerError(err.Error())) | |||||
| return | |||||
| } | |||||
| ctx.JSON(http.StatusOK, response.SuccessWithData(r)) | |||||
| } | |||||
| func OperateTaskConfig(ctx *context.Context, config models.TaskConfigWithLimit) { | |||||
| action := ctx.Params(":action") | |||||
| var err error | |||||
| switch action { | |||||
| case "edit": | |||||
| err = task.EditTaskConfig(config, ctx.User) | |||||
| case "new": | |||||
| err = task.AddTaskConfig(config, ctx.User) | |||||
| case "del": | |||||
| err = task.DelTaskConfig(config.ID, ctx.User) | |||||
| default: | |||||
| err = errors.New("action type error") | |||||
| } | |||||
| if err != nil { | |||||
| log.Error("OperateTaskConfig error ,%v", err) | |||||
| ctx.JSON(http.StatusOK, response.ServerError(err.Error())) | |||||
| return | |||||
| } | |||||
| ctx.JSON(http.StatusOK, response.Success()) | |||||
| } | |||||
| func BatchAddTaskConfig(ctx *context.Context, list models.BatchLimitConfigVO) { | |||||
| successCount := 0 | |||||
| failCount := 0 | |||||
| for _, config := range list.ConfigList { | |||||
| err := task.AddTaskConfig(config, ctx.User) | |||||
| if err != nil { | |||||
| failCount++ | |||||
| } else { | |||||
| successCount++ | |||||
| } | |||||
| } | |||||
| r := make(map[string]int, 2) | |||||
| r["successCount"] = successCount | |||||
| r["failCount"] = failCount | |||||
| log.Debug("BatchAddTaskConfig success.result=%v", r) | |||||
| ctx.JSON(http.StatusOK, response.SuccessWithData(r)) | |||||
| } | |||||
| @@ -0,0 +1,15 @@ | |||||
| package task | |||||
| import ( | |||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/services/task" | |||||
| ) | |||||
| func RunTask() { | |||||
| for { | |||||
| select { | |||||
| case action := <-models.ActionChan4Task: | |||||
| task.Accomplish(action) | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -836,14 +836,12 @@ func Cloudbrains(ctx *context.Context) { | |||||
| ctx.ServerError("Get job failed:", err) | ctx.ServerError("Get job failed:", err) | ||||
| return | return | ||||
| } | } | ||||
| models.LoadSpecs4CloudbrainInfo(ciTasks) | |||||
| for i, task := range ciTasks { | for i, task := range ciTasks { | ||||
| ciTasks[i].CanDebug = true | ciTasks[i].CanDebug = true | ||||
| ciTasks[i].CanDel = true | ciTasks[i].CanDel = true | ||||
| ciTasks[i].Cloudbrain.ComputeResource = task.ComputeResource | ciTasks[i].Cloudbrain.ComputeResource = task.ComputeResource | ||||
| ciTasks[i].Cloudbrain.AiCenter = repo.GetCloudbrainAiCenter(task.Cloudbrain, ctx) | ciTasks[i].Cloudbrain.AiCenter = repo.GetCloudbrainAiCenter(task.Cloudbrain, ctx) | ||||
| _, cardType, _ := repo.GetCloudbrainCardNumAndType(task.Cloudbrain) | |||||
| ciTasks[i].Cloudbrain.CardType = cardType | |||||
| ciTasks[i].Cloudbrain.Cluster = repo.GetCloudbrainCluster(task.Cloudbrain, ctx) | ciTasks[i].Cloudbrain.Cluster = repo.GetCloudbrainCluster(task.Cloudbrain, ctx) | ||||
| } | } | ||||
| @@ -6,6 +6,7 @@ | |||||
| package setting | package setting | ||||
| import ( | import ( | ||||
| "code.gitea.io/gitea/modules/notification" | |||||
| "errors" | "errors" | ||||
| "fmt" | "fmt" | ||||
| "io/ioutil" | "io/ioutil" | ||||
| @@ -179,6 +180,7 @@ func AvatarPost(ctx *context.Context, form auth.AvatarForm) { | |||||
| if err := UpdateAvatarSetting(ctx, form, ctx.User); err != nil { | if err := UpdateAvatarSetting(ctx, form, ctx.User); err != nil { | ||||
| ctx.Flash.Error(err.Error()) | ctx.Flash.Error(err.Error()) | ||||
| } else { | } else { | ||||
| notification.NotifyChangeUserAvatar(ctx.User, form) | |||||
| ctx.Flash.Success(ctx.Tr("settings.update_avatar_success")) | ctx.Flash.Success(ctx.Tr("settings.update_avatar_success")) | ||||
| } | } | ||||
| @@ -2,12 +2,19 @@ package resource | |||||
| import ( | import ( | ||||
| "code.gitea.io/gitea/models" | "code.gitea.io/gitea/models" | ||||
| "code.gitea.io/gitea/modules/cloudbrain" | |||||
| "code.gitea.io/gitea/modules/grampus" | "code.gitea.io/gitea/modules/grampus" | ||||
| "code.gitea.io/gitea/modules/log" | "code.gitea.io/gitea/modules/log" | ||||
| "code.gitea.io/gitea/modules/modelarts" | |||||
| "code.gitea.io/gitea/modules/setting" | |||||
| "code.gitea.io/gitea/routers/response" | "code.gitea.io/gitea/routers/response" | ||||
| "code.gitea.io/gitea/services/admin/operate_log" | "code.gitea.io/gitea/services/admin/operate_log" | ||||
| "encoding/json" | |||||
| "errors" | |||||
| "fmt" | "fmt" | ||||
| "strconv" | |||||
| "strings" | "strings" | ||||
| "time" | |||||
| ) | ) | ||||
| func AddResourceSpecification(doerId int64, req models.ResourceSpecificationReq) error { | func AddResourceSpecification(doerId int64, req models.ResourceSpecificationReq) error { | ||||
| @@ -92,6 +99,7 @@ func SyncGrampusSpecs(doerId int64) error { | |||||
| GPUMemGiB: gpuMemGiB, | GPUMemGiB: gpuMemGiB, | ||||
| Status: models.SpecNotVerified, | Status: models.SpecNotVerified, | ||||
| IsAutomaticSync: true, | IsAutomaticSync: true, | ||||
| IsAvailable: true, | |||||
| CreatedBy: doerId, | CreatedBy: doerId, | ||||
| UpdatedBy: doerId, | UpdatedBy: doerId, | ||||
| }) | }) | ||||
| @@ -103,6 +111,7 @@ func SyncGrampusSpecs(doerId int64) error { | |||||
| CpuCores: spec.SpecInfo.CpuCoreNum, | CpuCores: spec.SpecInfo.CpuCoreNum, | ||||
| MemGiB: memGiB, | MemGiB: memGiB, | ||||
| GPUMemGiB: gpuMemGiB, | GPUMemGiB: gpuMemGiB, | ||||
| IsAvailable: true, | |||||
| UpdatedBy: doerId, | UpdatedBy: doerId, | ||||
| }) | }) | ||||
| } | } | ||||
| @@ -142,7 +151,9 @@ func ResourceSpecOnShelf(doerId int64, id int64, unitPrice int) *response.BizErr | |||||
| if q, err := models.GetResourceQueue(&models.ResourceQueue{ID: spec.QueueId}); err != nil || q == nil { | if q, err := models.GetResourceQueue(&models.ResourceQueue{ID: spec.QueueId}); err != nil || q == nil { | ||||
| return response.RESOURCE_QUEUE_NOT_AVAILABLE | return response.RESOURCE_QUEUE_NOT_AVAILABLE | ||||
| } | } | ||||
| if !spec.IsAvailable { | |||||
| return response.SPECIFICATION_NOT_AVAILABLE | |||||
| } | |||||
| err = models.ResourceSpecOnShelf(id, unitPrice) | err = models.ResourceSpecOnShelf(id, unitPrice) | ||||
| if err != nil { | if err != nil { | ||||
| return response.NewBizError(err) | return response.NewBizError(err) | ||||
| @@ -184,3 +195,461 @@ func AddSpecOperateLog(doerId int64, operateType string, newValue, oldValue *mod | |||||
| Comment: comment, | Comment: comment, | ||||
| }) | }) | ||||
| } | } | ||||
| func FindAvailableSpecs(userId int64, opts models.FindSpecsOptions) ([]*models.Specification, error) { | |||||
| r, err := models.FindSpecs(opts) | |||||
| if err != nil { | |||||
| log.Error("FindAvailableSpecs error.%v", err) | |||||
| return nil, err | |||||
| } | |||||
| //filter exclusive specs | |||||
| specs := filterExclusiveSpecs(r, userId) | |||||
| //distinct by sourceSpecId | |||||
| specs = distinctSpecs(specs) | |||||
| return specs, err | |||||
| } | |||||
| func filterExclusiveSpecs(r []*models.Specification, userId int64) []*models.Specification { | |||||
| specs := make([]*models.Specification, 0, len(r)) | |||||
| specMap := make(map[int64]string, 0) | |||||
| for i := 0; i < len(r); i++ { | |||||
| spec := r[i] | |||||
| if _, has := specMap[spec.ID]; has { | |||||
| continue | |||||
| } | |||||
| if !spec.IsExclusive { | |||||
| specs = append(specs, spec) | |||||
| specMap[spec.ID] = "" | |||||
| continue | |||||
| } | |||||
| orgs := strings.Split(spec.ExclusiveOrg, ";") | |||||
| for _, org := range orgs { | |||||
| isMember, _ := models.IsOrganizationMemberByOrgName(org, userId) | |||||
| if isMember { | |||||
| specs = append(specs, spec) | |||||
| specMap[spec.ID] = "" | |||||
| break | |||||
| } | |||||
| } | |||||
| } | |||||
| return specs | |||||
| } | |||||
| func distinctSpecs(r []*models.Specification) []*models.Specification { | |||||
| specs := make([]*models.Specification, 0, len(r)) | |||||
| sourceSpecIdMap := make(map[string]string, 0) | |||||
| for i := 0; i < len(r); i++ { | |||||
| spec := r[i] | |||||
| if spec.SourceSpecId == "" { | |||||
| specs = append(specs, spec) | |||||
| continue | |||||
| } | |||||
| if _, has := sourceSpecIdMap[spec.SourceSpecId]; has { | |||||
| continue | |||||
| } | |||||
| specs = append(specs, spec) | |||||
| sourceSpecIdMap[spec.SourceSpecId] = "" | |||||
| } | |||||
| return specs | |||||
| } | |||||
| func GetAndCheckSpec(userId int64, specId int64, opts models.FindSpecsOptions) (*models.Specification, error) { | |||||
| if specId == 0 { | |||||
| return nil, nil | |||||
| } | |||||
| opts.SpecId = specId | |||||
| r, err := FindAvailableSpecs(userId, opts) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if r == nil || len(r) == 0 { | |||||
| return nil, nil | |||||
| } | |||||
| return r[0], nil | |||||
| } | |||||
| func InsertCloudbrainSpec(cloudbrainId int64, s *models.Specification) error { | |||||
| c := models.CloudbrainSpec{ | |||||
| CloudbrainID: cloudbrainId, | |||||
| SpecId: s.ID, | |||||
| SourceSpecId: s.SourceSpecId, | |||||
| AccCardsNum: s.AccCardsNum, | |||||
| AccCardType: s.AccCardType, | |||||
| CpuCores: s.CpuCores, | |||||
| MemGiB: s.MemGiB, | |||||
| GPUMemGiB: s.GPUMemGiB, | |||||
| ShareMemGiB: s.ShareMemGiB, | |||||
| ComputeResource: s.ComputeResource, | |||||
| UnitPrice: s.UnitPrice, | |||||
| QueueId: s.QueueId, | |||||
| QueueCode: s.QueueCode, | |||||
| Cluster: s.Cluster, | |||||
| AiCenterCode: s.AiCenterCode, | |||||
| AiCenterName: s.AiCenterName, | |||||
| IsExclusive: s.IsExclusive, | |||||
| ExclusiveOrg: s.ExclusiveOrg, | |||||
| } | |||||
| _, err := models.InsertCloudbrainSpec(c) | |||||
| if err != nil { | |||||
| log.Error("InsertCloudbrainSpec error.CloudbrainSpec=%v. err=%v", c, err) | |||||
| return err | |||||
| } | |||||
| return nil | |||||
| } | |||||
| func GetCloudbrainSpec(cloudbrainId int64) (*models.Specification, error) { | |||||
| c, err := models.GetCloudbrainSpecByID(cloudbrainId) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if c == nil { | |||||
| return nil, nil | |||||
| } | |||||
| return c.ConvertToSpecification(), nil | |||||
| } | |||||
| func RefreshHistorySpec(scopeAll bool, ids []int64) (int64, int64, error) { | |||||
| var success int64 | |||||
| var total int64 | |||||
| if !scopeAll { | |||||
| if ids == nil || len(ids) == 0 { | |||||
| return 0, 0, nil | |||||
| } | |||||
| total = int64(len(ids)) | |||||
| tasks, err := models.GetCloudbrainWithDeletedByIDs(ids) | |||||
| if err != nil { | |||||
| return total, 0, err | |||||
| } | |||||
| for _, task := range tasks { | |||||
| err = RefreshOneHistorySpec(task) | |||||
| if err != nil { | |||||
| log.Error("RefreshOneHistorySpec error.%v", err) | |||||
| continue | |||||
| } | |||||
| success++ | |||||
| } | |||||
| } else { | |||||
| page := 1 | |||||
| pageSize := 100 | |||||
| n, err := models.CountNoSpecHistoricTask() | |||||
| if err != nil { | |||||
| log.Error("FindNoSpecHistoricTask CountNoSpecHistoricTask error. e=%v", err) | |||||
| return 0, 0, err | |||||
| } | |||||
| total = n | |||||
| for i := 0; i < 500; i++ { | |||||
| list, err := models.FindCloudbrainTask(page, pageSize) | |||||
| page++ | |||||
| if err != nil { | |||||
| log.Error("FindCloudbrainTask error.page=%d pageSize=%d e=%v", page, pageSize, err) | |||||
| return total, success, err | |||||
| } | |||||
| if len(list) == 0 { | |||||
| log.Info("RefreshHistorySpec. list is empty") | |||||
| break | |||||
| } | |||||
| for _, task := range list { | |||||
| s, err := GetCloudbrainSpec(task.ID) | |||||
| if err != nil { | |||||
| log.Error("RefreshHistorySpec GetCloudbrainSpec error.%v", err) | |||||
| continue | |||||
| } | |||||
| if s != nil { | |||||
| continue | |||||
| } | |||||
| err = RefreshOneHistorySpec(task) | |||||
| if err != nil { | |||||
| log.Error("RefreshOneHistorySpec error.%v", err) | |||||
| continue | |||||
| } | |||||
| success++ | |||||
| } | |||||
| if len(list) < pageSize { | |||||
| log.Info("RefreshHistorySpec. list < pageSize") | |||||
| break | |||||
| } | |||||
| } | |||||
| } | |||||
| return total, success, nil | |||||
| } | |||||
| func RefreshOneHistorySpec(task *models.Cloudbrain) error { | |||||
| var spec *models.Specification | |||||
| var err error | |||||
| switch task.Type { | |||||
| case models.TypeCloudBrainOne: | |||||
| spec, err = getCloudbrainOneSpec(task) | |||||
| case models.TypeCloudBrainTwo: | |||||
| spec, err = getCloudbrainTwoSpec(task) | |||||
| case models.TypeC2Net: | |||||
| spec, err = getGrampusSpec(task) | |||||
| } | |||||
| if err != nil { | |||||
| log.Error("find spec error,task.ID=%d err=%v", task.ID, err) | |||||
| return err | |||||
| } | |||||
| if spec == nil { | |||||
| log.Error("find spec failed,task.ID=%d", task.ID) | |||||
| return errors.New("find spec failed") | |||||
| } | |||||
| return InsertCloudbrainSpec(task.ID, spec) | |||||
| } | |||||
| func getCloudbrainOneSpec(task *models.Cloudbrain) (*models.Specification, error) { | |||||
| if task.GpuQueue == "" { | |||||
| log.Info("gpu queue is empty.task.ID = %d", task.ID) | |||||
| return nil, nil | |||||
| } | |||||
| //find from config | |||||
| spec, err := findCloudbrainOneSpecFromConfig(task) | |||||
| if err != nil { | |||||
| log.Error("getCloudbrainOneSpec findCloudbrainOneSpecFromConfig error.%v", err) | |||||
| return nil, err | |||||
| } | |||||
| if spec != nil { | |||||
| return spec, nil | |||||
| } | |||||
| //find from remote | |||||
| return findCloudbrainOneSpecFromRemote(task) | |||||
| } | |||||
| func findCloudbrainOneSpecFromRemote(task *models.Cloudbrain) (*models.Specification, error) { | |||||
| time.Sleep(200 * time.Millisecond) | |||||
| log.Info("start findCloudbrainOneSpecFromRemote") | |||||
| result, err := cloudbrain.GetJob(task.JobID) | |||||
| if err != nil { | |||||
| log.Error("getCloudbrainOneSpec error. %v", err) | |||||
| return nil, err | |||||
| } | |||||
| if result == nil { | |||||
| log.Info("findCloudbrainOneSpecFromRemote failed,result is empty.task.ID=%d", task.ID) | |||||
| return nil, nil | |||||
| } | |||||
| jobRes, _ := models.ConvertToJobResultPayload(result.Payload) | |||||
| memSize, _ := models.ParseMemSizeFromGrampus(jobRes.Resource.Memory) | |||||
| if task.ComputeResource == "CPU/GPU" { | |||||
| task.ComputeResource = models.GPU | |||||
| } | |||||
| var shmMB float32 | |||||
| if jobRes.Config.TaskRoles != nil && len(jobRes.Config.TaskRoles) > 0 { | |||||
| shmMB = float32(jobRes.Config.TaskRoles[0].ShmMB) / 1024 | |||||
| if jobRes.Config.TaskRoles[0].ShmMB == 103600 { | |||||
| shmMB = 100 | |||||
| } else if jobRes.Config.TaskRoles[0].ShmMB == 51800 { | |||||
| shmMB = 50 | |||||
| } | |||||
| } | |||||
| opt := models.FindSpecsOptions{ | |||||
| ComputeResource: task.ComputeResource, | |||||
| Cluster: models.OpenICluster, | |||||
| AiCenterCode: models.AICenterOfCloudBrainOne, | |||||
| QueueCode: task.GpuQueue, | |||||
| AccCardsNum: jobRes.Resource.NvidiaComGpu, | |||||
| UseAccCardsNum: true, | |||||
| CpuCores: jobRes.Resource.CPU, | |||||
| UseCpuCores: true, | |||||
| MemGiB: memSize, | |||||
| UseMemGiB: memSize > 0, | |||||
| ShareMemGiB: shmMB, | |||||
| UseShareMemGiB: shmMB > 0, | |||||
| RequestAll: true, | |||||
| } | |||||
| specs, err := models.FindSpecs(opt) | |||||
| if err != nil { | |||||
| log.Error("getCloudbrainOneSpec from remote error,%v", err) | |||||
| return nil, err | |||||
| } | |||||
| if len(specs) == 1 { | |||||
| return specs[0], nil | |||||
| } | |||||
| if len(specs) == 0 { | |||||
| s, err := InitQueueAndSpec(opt, "云脑一", "处理历史云脑任务时自动添加") | |||||
| if err != nil { | |||||
| log.Error("getCloudbrainOneSpec InitQueueAndSpec error.err=%v", err) | |||||
| return nil, nil | |||||
| } | |||||
| return s, nil | |||||
| } | |||||
| log.Error("Too many results matched.size=%d opt=%+v", len(specs), opt) | |||||
| return nil, nil | |||||
| } | |||||
| func findCloudbrainOneSpecFromConfig(task *models.Cloudbrain) (*models.Specification, error) { | |||||
| //find from config | |||||
| var specConfig *models.ResourceSpec | |||||
| hasSpec := false | |||||
| if task.JobType == string(models.JobTypeTrain) { | |||||
| if cloudbrain.TrainResourceSpecs == nil { | |||||
| json.Unmarshal([]byte(setting.TrainResourceSpecs), &cloudbrain.TrainResourceSpecs) | |||||
| } | |||||
| for _, tmp := range cloudbrain.TrainResourceSpecs.ResourceSpec { | |||||
| if tmp.Id == task.ResourceSpecId { | |||||
| hasSpec = true | |||||
| specConfig = tmp | |||||
| break | |||||
| } | |||||
| } | |||||
| } else if task.JobType == string(models.JobTypeInference) { | |||||
| if cloudbrain.InferenceResourceSpecs == nil { | |||||
| json.Unmarshal([]byte(setting.InferenceResourceSpecs), &cloudbrain.InferenceResourceSpecs) | |||||
| } | |||||
| for _, tmp := range cloudbrain.InferenceResourceSpecs.ResourceSpec { | |||||
| if tmp.Id == task.ResourceSpecId { | |||||
| hasSpec = true | |||||
| specConfig = tmp | |||||
| break | |||||
| } | |||||
| } | |||||
| } else { | |||||
| if cloudbrain.ResourceSpecs == nil { | |||||
| json.Unmarshal([]byte(setting.ResourceSpecs), &cloudbrain.ResourceSpecs) | |||||
| } | |||||
| for _, tmp := range cloudbrain.ResourceSpecs.ResourceSpec { | |||||
| if tmp.Id == task.ResourceSpecId { | |||||
| hasSpec = true | |||||
| specConfig = tmp | |||||
| break | |||||
| } | |||||
| } | |||||
| } | |||||
| if !hasSpec && cloudbrain.SpecialPools != nil { | |||||
| for _, specialPool := range cloudbrain.SpecialPools.Pools { | |||||
| if specialPool.ResourceSpec != nil { | |||||
| for _, spec := range specialPool.ResourceSpec { | |||||
| if task.ResourceSpecId == spec.Id { | |||||
| hasSpec = true | |||||
| specConfig = spec | |||||
| break | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| if specConfig == nil { | |||||
| log.Error("getCloudbrainOneSpec from config failed,task.ResourceSpecId=%d", task.ResourceSpecId) | |||||
| return nil, nil | |||||
| } | |||||
| if task.ComputeResource == "CPU/GPU" { | |||||
| task.ComputeResource = models.GPU | |||||
| } | |||||
| shareMemMiB := float32(specConfig.ShareMemMiB) / 1024 | |||||
| if specConfig.ShareMemMiB == 103600 { | |||||
| shareMemMiB = 100 | |||||
| } else if specConfig.ShareMemMiB == 51800 { | |||||
| shareMemMiB = 50 | |||||
| } | |||||
| opt := models.FindSpecsOptions{ | |||||
| JobType: models.JobType(task.JobType), | |||||
| ComputeResource: task.ComputeResource, | |||||
| Cluster: models.OpenICluster, | |||||
| AiCenterCode: models.AICenterOfCloudBrainOne, | |||||
| QueueCode: task.GpuQueue, | |||||
| AccCardsNum: specConfig.GpuNum, | |||||
| UseAccCardsNum: true, | |||||
| CpuCores: specConfig.CpuNum, | |||||
| UseCpuCores: true, | |||||
| MemGiB: float32(specConfig.MemMiB) / 1024, | |||||
| UseMemGiB: true, | |||||
| ShareMemGiB: shareMemMiB, | |||||
| UseShareMemGiB: true, | |||||
| RequestAll: true, | |||||
| } | |||||
| specs, err := models.FindSpecs(opt) | |||||
| if err != nil { | |||||
| log.Error("getCloudbrainOneSpec from config error,%v", err) | |||||
| return nil, err | |||||
| } | |||||
| if len(specs) > 1 { | |||||
| log.Error("Too many results matched.size=%d opt=%+v", len(specs), opt) | |||||
| return nil, nil | |||||
| } | |||||
| if len(specs) == 0 { | |||||
| s, err := InitQueueAndSpec(opt, "云脑一", "处理历史云脑任务时自动添加") | |||||
| if err != nil { | |||||
| log.Error("getCloudbrainOneSpec InitQueueAndSpec error.err=%v", err) | |||||
| return nil, nil | |||||
| } | |||||
| return s, nil | |||||
| } | |||||
| return specs[0], nil | |||||
| } | |||||
| func getCloudbrainTwoSpec(task *models.Cloudbrain) (*models.Specification, error) { | |||||
| specMap, err := models.GetCloudbrainTwoSpecs() | |||||
| if err != nil { | |||||
| log.Error("InitCloudbrainTwoSpecs err.%v", err) | |||||
| return nil, err | |||||
| } | |||||
| if task.FlavorCode != "" { | |||||
| return specMap[task.FlavorCode], nil | |||||
| } | |||||
| time.Sleep(200 * time.Millisecond) | |||||
| log.Info("start getCloudbrainTwoSpec FromRemote") | |||||
| if task.JobType == string(models.JobTypeDebug) { | |||||
| result, err := modelarts.GetNotebook2(task.JobID) | |||||
| if err != nil { | |||||
| log.Error("getCloudbrainTwoSpec GetNotebook2 error.%v", err) | |||||
| return nil, err | |||||
| } | |||||
| if result != nil { | |||||
| return specMap[result.Flavor], nil | |||||
| } | |||||
| } else if task.JobType == string(models.JobTypeTrain) || task.JobType == string(models.JobTypeInference) { | |||||
| result, err := modelarts.GetTrainJob(task.JobID, strconv.FormatInt(task.VersionID, 10)) | |||||
| if err != nil { | |||||
| log.Error("getCloudbrainTwoSpec GetTrainJob error:%v", task.JobName, err) | |||||
| return nil, err | |||||
| } | |||||
| if result != nil { | |||||
| return specMap[result.Flavor.Code], nil | |||||
| } | |||||
| } | |||||
| return nil, nil | |||||
| } | |||||
| func getGrampusSpec(task *models.Cloudbrain) (*models.Specification, error) { | |||||
| specMap, err := models.GetGrampusSpecs() | |||||
| if err != nil { | |||||
| log.Error("GetGrampusSpecs err.%v", err) | |||||
| return nil, err | |||||
| } | |||||
| if task.AiCenter != "" { | |||||
| c := strings.Split(task.AiCenter, "+") | |||||
| spec := specMap[task.FlavorCode+"_"+c[0]] | |||||
| if spec != nil { | |||||
| return spec, nil | |||||
| } | |||||
| } | |||||
| return specMap[task.FlavorCode], nil | |||||
| } | |||||
| func InitQueueAndSpec(opt models.FindSpecsOptions, aiCenterName string, remark string) (*models.Specification, error) { | |||||
| return models.InitQueueAndSpec(models.ResourceQueue{ | |||||
| QueueCode: opt.QueueCode, | |||||
| Cluster: opt.Cluster, | |||||
| AiCenterCode: opt.AiCenterCode, | |||||
| AiCenterName: aiCenterName, | |||||
| ComputeResource: opt.ComputeResource, | |||||
| AccCardType: models.GetCloudbrainOneAccCardType(opt.QueueCode), | |||||
| Remark: remark, | |||||
| }, models.ResourceSpecification{ | |||||
| AccCardsNum: opt.AccCardsNum, | |||||
| CpuCores: opt.CpuCores, | |||||
| MemGiB: opt.MemGiB, | |||||
| GPUMemGiB: opt.GPUMemGiB, | |||||
| ShareMemGiB: opt.ShareMemGiB, | |||||
| Status: models.SpecOffShelf, | |||||
| IsAvailable: true, | |||||
| }) | |||||
| } | |||||
| @@ -46,7 +46,7 @@ func SendVerifyCode(conn redis.Conn, phoneNumber string) error { | |||||
| if err != nil { | if err != nil { | ||||
| return err | return err | ||||
| } | } | ||||
| err = redis_client.Expire(conn, timesKey, getRemainSecondOfDay(time.Now())) | |||||
| err = redis_client.EXPIRE(conn, timesKey, getRemainSecondOfDay(time.Now())) | |||||
| if err != nil { | if err != nil { | ||||
| return err | return err | ||||
| } | } | ||||
| @@ -0,0 +1,50 @@ | |||||
| package reward | |||||
| import ( | |||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| "code.gitea.io/gitea/modules/util" | |||||
| ) | |||||
| func AdminBalanceOperate(req models.AdminRewardOperateReq, doer *models.User) error { | |||||
| logId := util.UUID() | |||||
| _, err := models.InsertRewardAdminLog(&models.RewardAdminLog{ | |||||
| LogId: logId, | |||||
| Amount: req.Amount, | |||||
| RewardType: req.RewardType.Name(), | |||||
| TargetUserId: req.TargetUserId, | |||||
| CreatorId: doer.ID, | |||||
| CreatorName: doer.Name, | |||||
| Remark: req.Remark, | |||||
| Status: models.RewardAdminLogProcessing, | |||||
| }) | |||||
| if err != nil { | |||||
| log.Error("AdminBalanceOperate InsertRewardAdminLog error.%v", err) | |||||
| return err | |||||
| } | |||||
| //reward | |||||
| err = Operate(&models.RewardOperateContext{ | |||||
| SourceType: models.SourceTypeAdminOperate, | |||||
| SourceId: logId, | |||||
| Title: "管理员操作", | |||||
| Reward: models.Reward{ | |||||
| Amount: req.Amount, | |||||
| Type: req.RewardType, | |||||
| }, | |||||
| TargetUserId: req.TargetUserId, | |||||
| RequestId: logId, | |||||
| OperateType: req.OperateType, | |||||
| Remark: req.Remark, | |||||
| RejectPolicy: models.JustReject, | |||||
| PermittedNegative: true, | |||||
| }) | |||||
| if err != nil { | |||||
| log.Error("AdminBalanceOperate operate error.%v", err) | |||||
| models.UpdateRewardAdminLogStatus(logId, models.RewardAdminLogProcessing, models.RewardAdminLogFailed) | |||||
| return err | |||||
| } | |||||
| models.UpdateRewardAdminLogStatus(logId, models.RewardAdminLogProcessing, models.RewardAdminLogSuccess) | |||||
| return nil | |||||
| } | |||||
| @@ -0,0 +1,133 @@ | |||||
| package reward | |||||
| import ( | |||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| "code.gitea.io/gitea/modules/setting" | |||||
| "fmt" | |||||
| "time" | |||||
| ) | |||||
| var ( | |||||
| ResourceSpecs *models.ResourceSpecs | |||||
| TrainResourceSpecs *models.ResourceSpecs | |||||
| ) | |||||
| const RUN_CLOUDBRAIN_TASK_TITTLE = "运行云脑任务" | |||||
| func AcceptStatusChangeAction() { | |||||
| for { | |||||
| select { | |||||
| case task := <-models.StatusChangeChan: | |||||
| DeductPoint4Cloudbrain(*task, time.Now()) | |||||
| } | |||||
| } | |||||
| } | |||||
| func StartAndGetCloudBrainPointDeductTask(task models.Cloudbrain) (*models.RewardPeriodicTask, error) { | |||||
| if !setting.CloudBrainPaySwitch { | |||||
| return nil, nil | |||||
| } | |||||
| unitPrice, err := models.GetCloudbrainTaskUnitPrice(task.ID) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if unitPrice == 0 { | |||||
| log.Debug("finish StartAndGetCloudBrainPointDeductTask, UnitPrice = 0 task.ID=%d", task.ID) | |||||
| return nil, nil | |||||
| } | |||||
| return StartAndGetPeriodicTask(&models.StartPeriodicTaskOpts{ | |||||
| SourceType: models.SourceTypeRunCloudbrainTask, | |||||
| SourceId: getCloudBrainPointTaskSourceId(task), | |||||
| TargetUserId: task.UserID, | |||||
| RequestId: getCloudBrainPointTaskSourceId(task), | |||||
| OperateType: models.OperateTypeDecrease, | |||||
| Delay: setting.CloudBrainPayDelay, | |||||
| Interval: setting.CloudBrainPayInterval, | |||||
| UnitAmount: unitPrice, | |||||
| RewardType: models.RewardTypePoint, | |||||
| StartTime: time.Unix(int64(task.StartTime), 0), | |||||
| Title: RUN_CLOUDBRAIN_TASK_TITTLE, | |||||
| }) | |||||
| } | |||||
| func StopCloudBrainPointDeductTask(task models.Cloudbrain) { | |||||
| StopPeriodicTask(models.SourceTypeRunCloudbrainTask, getCloudBrainPointTaskSourceId(task), models.OperateTypeDecrease) | |||||
| } | |||||
| func getCloudBrainPointTaskSourceId(task models.Cloudbrain) string { | |||||
| return fmt.Sprint(task.ID) | |||||
| } | |||||
| var firstTimeFlag = true | |||||
| func StartCloudbrainPointDeductTask() { | |||||
| defer func() { | |||||
| if err := recover(); err != nil { | |||||
| combinedErr := fmt.Errorf("%s\n%s", err, log.Stack(2)) | |||||
| log.Error("PANIC:%v", combinedErr) | |||||
| } | |||||
| }() | |||||
| log.Debug("try to run CloudbrainPointDeductTask") | |||||
| end := time.Now() | |||||
| start := end.Add(-1 * setting.DeductTaskRange) | |||||
| if firstTimeFlag { | |||||
| //When it is executed for the first time, it needs to process the tasks of the last 3 hours. | |||||
| //This is done to prevent the application from hanging for a long time | |||||
| start = end.Add(-1 * setting.DeductTaskRangeForFirst) | |||||
| firstTimeFlag = false | |||||
| } | |||||
| taskList, err := models.GetStartedCloudbrainTaskByUpdatedUnix(start, end) | |||||
| if err != nil { | |||||
| log.Error("GetStartedCloudbrainTaskByUpdatedUnix error. %v", err) | |||||
| return | |||||
| } | |||||
| if taskList == nil || len(taskList) == 0 { | |||||
| log.Debug("No cloudbrain task need handled") | |||||
| return | |||||
| } | |||||
| for _, t := range taskList { | |||||
| DeductPoint4Cloudbrain(t, end) | |||||
| } | |||||
| log.Debug("CloudbrainPointDeductTask completed") | |||||
| } | |||||
| func DeductPoint4Cloudbrain(t models.Cloudbrain, now time.Time) error { | |||||
| defer func() { | |||||
| if err := recover(); err != nil { | |||||
| combinedErr := fmt.Errorf("%s\n%s", err, log.Stack(2)) | |||||
| log.Error("PANIC:%v", combinedErr) | |||||
| } | |||||
| }() | |||||
| log.Debug("start to deduct point for cloudbrain[%d]", t.ID) | |||||
| if t.StartTime == 0 { | |||||
| log.Debug("cloudbrain[%d] task not start", t.ID) | |||||
| return nil | |||||
| } | |||||
| task, err := StartAndGetCloudBrainPointDeductTask(t) | |||||
| if err != nil { | |||||
| log.Error("run cloudbrain point deduct task error,err=%v", err) | |||||
| return err | |||||
| } | |||||
| if task == nil { | |||||
| log.Debug("cloudbrain[%d] deduct task is nil") | |||||
| return nil | |||||
| } | |||||
| if task.Status == models.PeriodicTaskStatusFinished { | |||||
| log.Info("Periodic task is finished") | |||||
| return nil | |||||
| } | |||||
| if t.EndTime > 0 { | |||||
| endTime := time.Unix(int64(t.EndTime), 0) | |||||
| RunRewardTask(*task, endTime) | |||||
| models.StopPeriodicTask(task.ID, task.OperateSerialNo, endTime) | |||||
| } else { | |||||
| RunRewardTask(*task, now) | |||||
| } | |||||
| log.Debug("finished deduct point for cloudbrain[%d]", t.ID) | |||||
| return nil | |||||
| } | |||||
| @@ -0,0 +1,100 @@ | |||||
| package limiter | |||||
| import ( | |||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| "code.gitea.io/gitea/modules/redis/redis_client" | |||||
| "code.gitea.io/gitea/modules/redis/redis_key" | |||||
| ) | |||||
| func GetSingleDailyPointLimitConfig() (*models.LimitConfigVO, error) { | |||||
| r, err := GetLimitConfigList(models.LimitConfigQueryOpts{ | |||||
| RefreshRate: models.PeriodDaily, | |||||
| Scope: models.LimitScopeSingleUser, | |||||
| LimitCode: models.SourceTypeAccomplishTask.Name(), | |||||
| LimitType: models.LimitTypeRewardPoint, | |||||
| }) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if r == nil || len(r) == 0 { | |||||
| return nil, nil | |||||
| } | |||||
| return r[0], nil | |||||
| } | |||||
| func SetSingleDailyPointLimitConfig(limitNum int64, doer *models.User) error { | |||||
| l := &models.LimitConfigVO{ | |||||
| RefreshRate: models.PeriodDaily, | |||||
| Scope: models.LimitScopeSingleUser.Name(), | |||||
| LimitCode: models.SourceTypeAccomplishTask.Name(), | |||||
| LimitType: models.LimitTypeRewardPoint.Name(), | |||||
| LimitNum: limitNum, | |||||
| } | |||||
| return AddLimitConfig(l, doer) | |||||
| } | |||||
| func GetLimitConfigList(opts models.LimitConfigQueryOpts) ([]*models.LimitConfigVO, error) { | |||||
| r, err := GetLimitersByLimitType(opts.LimitType) | |||||
| if err != nil { | |||||
| log.Error("GetLimitConfigList error when getting limiters by limit type.err=%v", err) | |||||
| return nil, err | |||||
| } | |||||
| result := make([]*models.LimitConfigVO, 0) | |||||
| for _, v := range r { | |||||
| if opts.LimitCode != "" && opts.LimitCode != v.LimitCode { | |||||
| continue | |||||
| } | |||||
| if opts.Scope != "" && opts.Scope.Name() != v.Scope { | |||||
| continue | |||||
| } | |||||
| if opts.RefreshRate != "" && opts.RefreshRate != v.RefreshRate { | |||||
| continue | |||||
| } | |||||
| if opts.LimitType != "" && opts.LimitType.Name() != v.LimitType { | |||||
| continue | |||||
| } | |||||
| result = append(result, v.ToLimitConfigVO()) | |||||
| } | |||||
| return result, nil | |||||
| } | |||||
| func GetLimitConfigById(id int64) (*models.LimitConfig, error) { | |||||
| return models.GetLimitConfigById(id) | |||||
| } | |||||
| func AddLimitConfig(config *models.LimitConfigVO, doer *models.User) error { | |||||
| r := &models.LimitConfig{ | |||||
| Title: config.Title, | |||||
| RefreshRate: config.RefreshRate, | |||||
| Scope: config.Scope, | |||||
| LimitNum: config.LimitNum, | |||||
| LimitCode: config.LimitCode, | |||||
| LimitType: config.LimitType, | |||||
| CreatorId: doer.ID, | |||||
| CreatorName: doer.Name, | |||||
| } | |||||
| err := models.AddLimitConfig(r) | |||||
| if err != nil { | |||||
| log.Error("add limit config error,config:%v err:%v", config, err) | |||||
| return err | |||||
| } | |||||
| redis_client.Del(redis_key.LimitConfig(config.LimitType)) | |||||
| return nil | |||||
| } | |||||
| func DeleteLimitConfig(id int64, doer *models.User) error { | |||||
| config, err := GetLimitConfigById(id) | |||||
| if err != nil { | |||||
| log.Error("GetLimitConfigById err,e=%v", err) | |||||
| return err | |||||
| } | |||||
| err = models.DeleteLimitConfig(*config, doer.ID, doer.Name) | |||||
| if err != nil { | |||||
| log.Error("add limit config error,config:%v err:%v", config, err) | |||||
| return err | |||||
| } | |||||
| redis_client.Del(redis_key.LimitConfig(config.LimitType)) | |||||
| return nil | |||||
| } | |||||
| @@ -0,0 +1,258 @@ | |||||
| package limiter | |||||
| import ( | |||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| "code.gitea.io/gitea/modules/redis/redis_client" | |||||
| "code.gitea.io/gitea/modules/redis/redis_key" | |||||
| "code.gitea.io/gitea/services/task/period" | |||||
| "encoding/json" | |||||
| "errors" | |||||
| "fmt" | |||||
| "time" | |||||
| ) | |||||
| type limiterRunner struct { | |||||
| limiters []models.LimitConfig | |||||
| index int | |||||
| userId int64 | |||||
| amount int64 | |||||
| limitCode string | |||||
| limitType models.LimitType | |||||
| rejectPolicy models.LimiterRejectPolicy | |||||
| resultMap map[int]limitResult | |||||
| minRealAmount int64 | |||||
| } | |||||
| type limitResult struct { | |||||
| isLoss bool | |||||
| planAmount int64 | |||||
| realAmount int64 | |||||
| } | |||||
| func newLimitResult(isLoss bool, planAmount int64, realAmount int64) limitResult { | |||||
| return limitResult{ | |||||
| isLoss: isLoss, | |||||
| planAmount: planAmount, | |||||
| realAmount: realAmount, | |||||
| } | |||||
| } | |||||
| func newLimiterRunner(limitCode string, limitType models.LimitType, userId, amount int64, policy models.LimiterRejectPolicy) *limiterRunner { | |||||
| return &limiterRunner{ | |||||
| userId: userId, | |||||
| amount: amount, | |||||
| limitCode: limitCode, | |||||
| limitType: limitType, | |||||
| index: 0, | |||||
| rejectPolicy: policy, | |||||
| resultMap: make(map[int]limitResult, 0), | |||||
| } | |||||
| } | |||||
| //Run run all limiters | |||||
| //return real used amount(when choose the FillUp reject policy, amount may only be partially used) | |||||
| func (l *limiterRunner) Run() error { | |||||
| if err := l.LoadLimiters(); err != nil { | |||||
| return err | |||||
| } | |||||
| l.minRealAmount = l.amount | |||||
| for l.index < len(l.limiters) { | |||||
| err := l.limit(l.limiters[l.index]) | |||||
| if err != nil { | |||||
| log.Info("limiter check failed,%v", err) | |||||
| l.Rollback() | |||||
| return err | |||||
| } | |||||
| result := l.resultMap[l.index] | |||||
| if result.isLoss { | |||||
| //find the minimum real amount | |||||
| if l.minRealAmount > result.realAmount { | |||||
| l.minRealAmount = result.realAmount | |||||
| } | |||||
| } | |||||
| l.index += 1 | |||||
| } | |||||
| //post process | |||||
| l.PostProcess() | |||||
| return nil | |||||
| } | |||||
| //Rollback rollback the usedNum from limiters[0] to limiters[index] | |||||
| func (l *limiterRunner) Rollback() error { | |||||
| for i := l.index - 1; i >= 0; i-- { | |||||
| l.rollback(l.limiters[i], l.resultMap[i]) | |||||
| } | |||||
| return nil | |||||
| } | |||||
| func (l *limiterRunner) rollback(r models.LimitConfig, result limitResult) error { | |||||
| p, err := period.GetPeriod(r.RefreshRate) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| redisKey := redis_key.LimitCount(l.userId, r.LimitCode, r.LimitType, r.Scope, p) | |||||
| redis_client.IncrBy(redisKey, -1*result.realAmount) | |||||
| return nil | |||||
| } | |||||
| //PostProcess process loss,if realAmount < planAmount | |||||
| func (l *limiterRunner) PostProcess() error { | |||||
| for i := l.index - 1; i >= 0; i-- { | |||||
| l.postProcess(l.limiters[i], l.resultMap[i]) | |||||
| } | |||||
| return nil | |||||
| } | |||||
| func (l *limiterRunner) postProcess(r models.LimitConfig, result limitResult) error { | |||||
| if result.realAmount == l.minRealAmount { | |||||
| return nil | |||||
| } | |||||
| p, err := period.GetPeriod(r.RefreshRate) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| diff := result.realAmount - l.minRealAmount | |||||
| redisKey := redis_key.LimitCount(l.userId, r.LimitCode, r.LimitType, r.Scope, p) | |||||
| redis_client.IncrBy(redisKey, -1*diff) | |||||
| return nil | |||||
| } | |||||
| func (l *limiterRunner) limit(r models.LimitConfig) error { | |||||
| p, err := period.GetPeriod(r.RefreshRate) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| redisKey := redis_key.LimitCount(l.userId, r.LimitCode, r.LimitType, r.Scope, p) | |||||
| usedNum, err := redis_client.IncrBy(redisKey, l.amount) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| //if usedNum equals amount,it is the first operation in period or redis cache deleted | |||||
| //count in database to distinguish the two cases | |||||
| if usedNum == l.amount { | |||||
| n, err := l.countInPeriod(r, p) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| if n > 0 { | |||||
| //means redis cache deleted,incr the cache with real value | |||||
| usedNum, err = redis_client.IncrBy(redisKey, n) | |||||
| } | |||||
| if p != nil { | |||||
| redis_client.Expire(redisKey, p.LeftTime) | |||||
| } else { | |||||
| //add default expire time if no period set | |||||
| redis_client.Expire(redisKey, 24*time.Hour) | |||||
| } | |||||
| } | |||||
| if usedNum > r.LimitNum { | |||||
| if usedNum-r.LimitNum >= l.amount { | |||||
| redis_client.IncrBy(redisKey, -1*l.amount) | |||||
| return errors.New(fmt.Sprintf("over limit,congfigId=%d", r.ID)) | |||||
| } | |||||
| switch l.rejectPolicy { | |||||
| case models.FillUp: | |||||
| exceed := usedNum - r.LimitNum | |||||
| realAmount := l.amount - exceed | |||||
| redis_client.IncrBy(redisKey, -1*exceed) | |||||
| l.resultMap[l.index] = newLimitResult(true, l.amount, realAmount) | |||||
| return nil | |||||
| case models.JustReject: | |||||
| redis_client.IncrBy(redisKey, -1*l.amount) | |||||
| return errors.New(fmt.Sprintf("over limit,congfigId=%d", r.ID)) | |||||
| case models.PermittedOnce: | |||||
| l.resultMap[l.index] = newLimitResult(false, l.amount, l.amount) | |||||
| return nil | |||||
| } | |||||
| } | |||||
| l.resultMap[l.index] = newLimitResult(false, l.amount, l.amount) | |||||
| return nil | |||||
| } | |||||
| func (l *limiterRunner) LoadLimiters() error { | |||||
| limiters, err := GetLimiters(l.limitCode, l.limitType) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| if limiters != nil { | |||||
| l.limiters = limiters | |||||
| } | |||||
| return nil | |||||
| } | |||||
| func (l *limiterRunner) countInPeriod(r models.LimitConfig, p *models.PeriodResult) (int64, error) { | |||||
| switch r.LimitType { | |||||
| case models.LimitTypeTask.Name(): | |||||
| return models.CountTaskAccomplishLogInTaskPeriod(r.LimitCode, l.userId, p) | |||||
| case models.LimitTypeRewardPoint.Name(): | |||||
| return models.SumRewardAmountInTaskPeriod(models.RewardTypePoint.Name(), r.LimitCode, l.userId, p) | |||||
| default: | |||||
| return 0, nil | |||||
| } | |||||
| } | |||||
| func CheckLimit(limitCode string, limitType models.LimitType, userId, amount int64, rejectPolicy models.LimiterRejectPolicy) (int64, error) { | |||||
| if rejectPolicy == "" { | |||||
| rejectPolicy = models.JustReject | |||||
| } | |||||
| r := newLimiterRunner(limitCode, limitType, userId, amount, rejectPolicy) | |||||
| err := r.Run() | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| return r.minRealAmount, nil | |||||
| } | |||||
| func GetLimiters(limitCode string, limitType models.LimitType) ([]models.LimitConfig, error) { | |||||
| limiters, err := GetLimitersByLimitType(limitType) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| result := make([]models.LimitConfig, 0) | |||||
| for i, v := range limiters { | |||||
| if v.LimitCode == "" || v.LimitCode == limitCode { | |||||
| result = append(result, limiters[i]) | |||||
| } | |||||
| } | |||||
| return result, nil | |||||
| } | |||||
| func GetLimitersByLimitType(limitType models.LimitType) ([]models.LimitConfig, error) { | |||||
| redisKey := redis_key.LimitConfig(limitType.Name()) | |||||
| val, _ := redis_client.Get(redisKey) | |||||
| if val != "" { | |||||
| if val == redis_key.EMPTY_REDIS_VAL { | |||||
| return nil, nil | |||||
| } | |||||
| limiters := make([]models.LimitConfig, 0) | |||||
| json.Unmarshal([]byte(val), &limiters) | |||||
| return limiters, nil | |||||
| } | |||||
| limiters, err := models.GetLimitConfigByLimitType(limitType) | |||||
| if err != nil { | |||||
| if models.IsErrRecordNotExist(err) { | |||||
| redis_client.Setex(redisKey, redis_key.EMPTY_REDIS_VAL, 5*time.Second) | |||||
| return nil, nil | |||||
| } | |||||
| return nil, err | |||||
| } | |||||
| jsonStr, _ := json.Marshal(limiters) | |||||
| redis_client.Setex(redisKey, string(jsonStr), 30*24*time.Hour) | |||||
| return limiters, nil | |||||
| } | |||||
| func GetLimitersByRelatedIdWithDeleted(limitType models.LimitType) ([]models.LimitConfig, error) { | |||||
| limiters, err := models.GetLimitersByRelatedIdWithDeleted(limitType) | |||||
| if err != nil { | |||||
| if models.IsErrRecordNotExist(err) { | |||||
| return nil, nil | |||||
| } | |||||
| return nil, err | |||||
| } | |||||
| return limiters, nil | |||||
| } | |||||
| @@ -0,0 +1,54 @@ | |||||
| package reward | |||||
| import ( | |||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| "code.gitea.io/gitea/modules/redis/redis_client" | |||||
| "code.gitea.io/gitea/modules/redis/redis_key" | |||||
| "code.gitea.io/gitea/modules/timeutil" | |||||
| "encoding/json" | |||||
| "fmt" | |||||
| "time" | |||||
| ) | |||||
| func NotifyRewardOperation(userId, amount int64, sourceType models.SourceType, rewardType models.RewardType, operateType models.RewardOperateType) { | |||||
| switch sourceType { | |||||
| case models.SourceTypeRunCloudbrainTask: | |||||
| return | |||||
| } | |||||
| data := &models.UserRewardOperationRedis{ | |||||
| UserId: userId, | |||||
| Amount: amount, | |||||
| RewardType: rewardType, | |||||
| OperateType: operateType, | |||||
| } | |||||
| b, _ := json.Marshal(data) | |||||
| redis_client.ZAdd(redis_key.RewardOperateNotification(), string(b), float64(time.Now().Unix())) | |||||
| } | |||||
| func GetRewardOperation(since, until timeutil.TimeStamp) []models.UserRewardOperation { | |||||
| list, err := redis_client.ZRangeByScore(redis_key.RewardOperateNotification(), float64(since), float64(until)) | |||||
| if err != nil { | |||||
| log.Error("GetRewardOperation ZRangeByScore error. %v", err) | |||||
| return nil | |||||
| } | |||||
| if len(list) == 0 { | |||||
| log.Debug("GetRewardOperation list length = 0") | |||||
| return nil | |||||
| } | |||||
| r := make([]models.UserRewardOperation, len(list)) | |||||
| for _, v := range list { | |||||
| t := models.UserRewardOperationRedis{} | |||||
| json.Unmarshal([]byte(v), &t) | |||||
| r = append(r, models.UserRewardOperation{ | |||||
| UserId: t.UserId, | |||||
| Msg: v, | |||||
| }) | |||||
| } | |||||
| redis_client.ZRemRangeByScore(redis_key.RewardOperateNotification(), float64(since), float64(until)) | |||||
| return r | |||||
| } | |||||
| func GetRewardOperateMsg(u models.UserRewardOperationRedis) string { | |||||
| return u.OperateType.Show() + fmt.Sprint(u.Amount) + u.RewardType.Show() | |||||
| } | |||||
| @@ -0,0 +1,280 @@ | |||||
| package reward | |||||
| import ( | |||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| "code.gitea.io/gitea/modules/redis/redis_key" | |||||
| "code.gitea.io/gitea/modules/redis/redis_lock" | |||||
| "code.gitea.io/gitea/services/reward/point" | |||||
| "errors" | |||||
| "fmt" | |||||
| "time" | |||||
| ) | |||||
| var RewardOperatorMap = map[string]RewardOperator{ | |||||
| fmt.Sprint(models.RewardTypePoint): new(point.PointOperator), | |||||
| } | |||||
| type RewardOperator interface { | |||||
| IsLimited(ctx *models.RewardOperateContext) error | |||||
| Operate(ctx *models.RewardOperateContext) error | |||||
| } | |||||
| func Operate(ctx *models.RewardOperateContext) error { | |||||
| defer func() { | |||||
| if err := recover(); err != nil { | |||||
| combinedErr := fmt.Errorf("%s\n%s", err, log.Stack(2)) | |||||
| log.Error("PANIC:%v", combinedErr) | |||||
| } | |||||
| }() | |||||
| if !checkRewardOperationParam(ctx) { | |||||
| log.Error("send reward error,param incorrect") | |||||
| return errors.New("param incorrect") | |||||
| } | |||||
| //add lock | |||||
| var rewardLock = redis_lock.NewDistributeLock(redis_key.RewardOperateLock(ctx.RequestId, ctx.SourceType.Name(), ctx.OperateType.Name())) | |||||
| isOk, err := rewardLock.Lock(3 * time.Second) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| if !isOk { | |||||
| log.Info("duplicated reward request,targetUserId=%d requestId=%s", ctx.TargetUserId, ctx.RequestId) | |||||
| return nil | |||||
| } | |||||
| defer rewardLock.UnLock() | |||||
| //is handled before? | |||||
| isHandled, err := isHandled(ctx.SourceType.Name(), ctx.RequestId, ctx.OperateType.Name()) | |||||
| if err != nil { | |||||
| log.Error("reward is handled error,%v", err) | |||||
| return err | |||||
| } | |||||
| if isHandled { | |||||
| log.Info("reward has been handled,ctx=%+v", ctx) | |||||
| return nil | |||||
| } | |||||
| //get operator | |||||
| operator := GetOperator(ctx.Reward.Type) | |||||
| if operator == nil { | |||||
| log.Error("operator of reward type is not exist,ctx=%v", ctx) | |||||
| return errors.New("operator of reward type is not exist") | |||||
| } | |||||
| if ctx.OperateType == models.OperateTypeIncrease { | |||||
| //is limited? | |||||
| if err := operator.IsLimited(ctx); err != nil { | |||||
| log.Info("operator IsLimited, err=%v", err) | |||||
| return err | |||||
| } | |||||
| } | |||||
| //new reward operate record | |||||
| recordId, err := initRewardOperateRecord(ctx) | |||||
| if err != nil { | |||||
| log.Error("initRewardOperateRecord error,err=%v", err) | |||||
| return err | |||||
| } | |||||
| ctx.SourceId = recordId | |||||
| //operate | |||||
| if err := operator.Operate(ctx); err != nil { | |||||
| log.Error("operator Operate error,err=%v", err) | |||||
| UpdateRewardRecordToFinalStatus(ctx.SourceType.Name(), ctx.RequestId, models.OperateStatusFailed) | |||||
| return err | |||||
| } | |||||
| UpdateRewardRecordToFinalStatus(ctx.SourceType.Name(), ctx.RequestId, models.OperateStatusSucceeded) | |||||
| NotifyRewardOperation(ctx.TargetUserId, ctx.Reward.Amount, ctx.SourceType, ctx.Reward.Type, ctx.OperateType) | |||||
| return nil | |||||
| } | |||||
| func checkRewardOperationParam(ctx *models.RewardOperateContext) bool { | |||||
| if ctx.Reward.Type == "" { | |||||
| return false | |||||
| } | |||||
| return true | |||||
| } | |||||
| func GetOperator(rewardType models.RewardType) RewardOperator { | |||||
| return RewardOperatorMap[rewardType.Name()] | |||||
| } | |||||
| func isHandled(sourceType string, requestId string, operateType string) (bool, error) { | |||||
| _, err := models.GetPointOperateRecordBySourceTypeAndRequestId(sourceType, requestId, operateType) | |||||
| if err != nil { | |||||
| log.Error("operator isHandled error. %v", err) | |||||
| if models.IsErrRecordNotExist(err) { | |||||
| return false, nil | |||||
| } | |||||
| log.Error("GetPointOperateRecordBySourceTypeAndRequestId ZRangeByScore error. %v", err) | |||||
| return false, err | |||||
| } | |||||
| return true, nil | |||||
| } | |||||
| func initRewardOperateRecord(ctx *models.RewardOperateContext) (string, error) { | |||||
| sn, err := generateOperateSerialNo() | |||||
| if err != nil { | |||||
| log.Error("generateOperateSerialNo error. %v", err) | |||||
| return "", err | |||||
| } | |||||
| record := &models.RewardOperateRecord{ | |||||
| UserId: ctx.TargetUserId, | |||||
| Amount: ctx.Reward.Amount, | |||||
| LossAmount: ctx.LossAmount, | |||||
| RewardType: ctx.Reward.Type.Name(), | |||||
| SourceType: ctx.SourceType.Name(), | |||||
| SourceId: ctx.SourceId, | |||||
| SourceTemplateId: ctx.SourceTemplateId, | |||||
| RequestId: ctx.RequestId, | |||||
| OperateType: ctx.OperateType.Name(), | |||||
| Status: models.OperateStatusOperating, | |||||
| Remark: ctx.Remark, | |||||
| Title: ctx.Title, | |||||
| SerialNo: sn, | |||||
| } | |||||
| _, err = models.InsertRewardOperateRecord(record) | |||||
| if err != nil { | |||||
| log.Error("InsertRewardOperateRecord error. %v", err) | |||||
| return "", err | |||||
| } | |||||
| return record.SerialNo, nil | |||||
| } | |||||
| func createPeriodicRewardOperateRecord(ctx *models.StartPeriodicTaskOpts) (string, error) { | |||||
| sn, err := generateOperateSerialNo() | |||||
| if err != nil { | |||||
| log.Error("createPeriodic generateOperateSerialNo error. %v", err) | |||||
| return "", err | |||||
| } | |||||
| record := &models.RewardOperateRecord{ | |||||
| UserId: ctx.TargetUserId, | |||||
| Amount: 0, | |||||
| RewardType: ctx.RewardType.Name(), | |||||
| SourceType: ctx.SourceType.Name(), | |||||
| SourceId: ctx.SourceId, | |||||
| RequestId: ctx.RequestId, | |||||
| OperateType: ctx.OperateType.Name(), | |||||
| Status: models.OperateStatusOperating, | |||||
| Remark: ctx.Remark, | |||||
| Title: ctx.Title, | |||||
| SerialNo: sn, | |||||
| } | |||||
| _, err = models.InsertRewardOperateRecord(record) | |||||
| if err != nil { | |||||
| log.Error("createPeriodic InsertRewardOperateRecord error. %v", err) | |||||
| return "", err | |||||
| } | |||||
| return record.SerialNo, nil | |||||
| } | |||||
| func UpdateRewardRecordToFinalStatus(sourceType, requestId, newStatus string) error { | |||||
| _, err := models.UpdateRewardRecordToFinalStatus(sourceType, requestId, newStatus) | |||||
| if err != nil { | |||||
| log.Error("UpdateRewardRecord UpdateRewardRecordToFinalStatus error. %v", err) | |||||
| return err | |||||
| } | |||||
| return nil | |||||
| } | |||||
| func StartPeriodicTaskAsyn(opts *models.StartPeriodicTaskOpts) { | |||||
| go StartAndGetPeriodicTask(opts) | |||||
| } | |||||
| func StartAndGetPeriodicTask(opts *models.StartPeriodicTaskOpts) (*models.RewardPeriodicTask, error) { | |||||
| defer func() { | |||||
| if err := recover(); err != nil { | |||||
| combinedErr := fmt.Errorf("%s\n%s", err, log.Stack(2)) | |||||
| log.Error("PANIC:%v", combinedErr) | |||||
| } | |||||
| }() | |||||
| //add lock | |||||
| var rewardLock = redis_lock.NewDistributeLock(redis_key.RewardOperateLock(opts.RequestId, opts.SourceType.Name(), opts.OperateType.Name())) | |||||
| isOk, err := rewardLock.Lock(3 * time.Second) | |||||
| if err != nil { | |||||
| log.Error("StartAndGetPeriodicTask RewardOperateLock error. %v", err) | |||||
| return nil, err | |||||
| } | |||||
| if !isOk { | |||||
| log.Info("duplicated operate request,targetUserId=%d requestId=%s", opts.TargetUserId, opts.RequestId) | |||||
| return nil, nil | |||||
| } | |||||
| defer rewardLock.UnLock() | |||||
| _, err = models.GetPointOperateRecordBySourceTypeAndRequestId(opts.SourceType.Name(), opts.RequestId, opts.OperateType.Name()) | |||||
| if err == nil { | |||||
| task, err := models.GetPeriodicTaskBySourceIdAndType(opts.SourceType, opts.SourceId, opts.OperateType) | |||||
| if err != nil { | |||||
| log.Error("GetPeriodicTaskBySourceIdAndType error,%v", err) | |||||
| return nil, err | |||||
| } | |||||
| return task, nil | |||||
| } | |||||
| if err != nil && !models.IsErrRecordNotExist(err) { | |||||
| log.Error("operate is handled error,%v", err) | |||||
| return nil, err | |||||
| } | |||||
| //new reward operate record | |||||
| recordId, err := createPeriodicRewardOperateRecord(opts) | |||||
| if err != nil { | |||||
| log.Error("StartAndGetPeriodicTask createPeriodicRewardOperateRecord error. %v", err) | |||||
| return nil, err | |||||
| } | |||||
| if err = NewRewardPeriodicTask(recordId, opts); err != nil { | |||||
| log.Error("StartAndGetPeriodicTask NewRewardPeriodicTask error. %v", err) | |||||
| UpdateRewardRecordToFinalStatus(opts.SourceType.Name(), opts.RequestId, models.OperateStatusFailed) | |||||
| return nil, err | |||||
| } | |||||
| task, err := models.GetPeriodicTaskBySourceIdAndType(opts.SourceType, opts.SourceId, opts.OperateType) | |||||
| if err != nil { | |||||
| log.Error("GetPeriodicTaskBySourceIdAndType error,%v", err) | |||||
| return nil, err | |||||
| } | |||||
| return task, nil | |||||
| } | |||||
| func StopPeriodicTaskAsyn(sourceType models.SourceType, sourceId string, operateType models.RewardOperateType) { | |||||
| go StopPeriodicTask(sourceType, sourceId, operateType) | |||||
| } | |||||
| func StopPeriodicTask(sourceType models.SourceType, sourceId string, operateType models.RewardOperateType) error { | |||||
| defer func() { | |||||
| if err := recover(); err != nil { | |||||
| combinedErr := fmt.Errorf("%s\n%s", err, log.Stack(2)) | |||||
| log.Error("PANIC:%v", combinedErr) | |||||
| } | |||||
| }() | |||||
| task, err := models.GetPeriodicTaskBySourceIdAndType(sourceType, sourceId, operateType) | |||||
| if err != nil { | |||||
| log.Error("StopPeriodicTask. GetPeriodicTaskBySourceIdAndType error. %v", err) | |||||
| return err | |||||
| } | |||||
| if task == nil { | |||||
| log.Info("Periodic task is not exist") | |||||
| return nil | |||||
| } | |||||
| if task.Status == models.PeriodicTaskStatusFinished { | |||||
| log.Info("Periodic task is finished") | |||||
| return nil | |||||
| } | |||||
| now := time.Now() | |||||
| RunRewardTask(*task, now) | |||||
| return models.StopPeriodicTask(task.ID, task.OperateSerialNo, now) | |||||
| } | |||||
| func generateOperateSerialNo() (string, error) { | |||||
| s, err := GetSerialNoByRedis() | |||||
| if err != nil { | |||||
| log.Error("generateOperateSerialNo error. %v", err) | |||||
| return "", err | |||||
| } | |||||
| return s, nil | |||||
| } | |||||
| @@ -0,0 +1,131 @@ | |||||
| package reward | |||||
| import ( | |||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| "code.gitea.io/gitea/modules/redis/redis_key" | |||||
| "code.gitea.io/gitea/modules/redis/redis_lock" | |||||
| "code.gitea.io/gitea/modules/timeutil" | |||||
| "code.gitea.io/gitea/routers/repo" | |||||
| "errors" | |||||
| "fmt" | |||||
| "time" | |||||
| ) | |||||
| func NewRewardPeriodicTask(operateRecordId string, opts *models.StartPeriodicTaskOpts) error { | |||||
| task := &models.RewardPeriodicTask{} | |||||
| task.DelaySeconds = int64(opts.Delay.Seconds()) | |||||
| task.IntervalSeconds = int64(opts.Interval.Seconds()) | |||||
| task.Amount = int64(opts.UnitAmount) | |||||
| task.OperateSerialNo = operateRecordId | |||||
| task.Status = models.PeriodicTaskStatusRunning | |||||
| task.NextExecuteTime = timeutil.TimeStamp(opts.StartTime.Add(opts.Delay).Unix()) | |||||
| _, err := models.InsertPeriodicTask(task) | |||||
| return err | |||||
| } | |||||
| func StartRewardTask() { | |||||
| defer func() { | |||||
| if err := recover(); err != nil { | |||||
| combinedErr := fmt.Errorf("%s\n%s", err, log.Stack(2)) | |||||
| log.Error("PANIC:%v", combinedErr) | |||||
| } | |||||
| }() | |||||
| log.Debug("try to run reward tasks") | |||||
| now := time.Now() | |||||
| taskList, err := models.GetRunningRewardTask(now) | |||||
| if err != nil { | |||||
| log.Error("GetRunningRewardTask error. %v", err) | |||||
| return | |||||
| } | |||||
| if taskList == nil || len(taskList) == 0 { | |||||
| log.Debug("No GetRunningRewardTask need handled") | |||||
| return | |||||
| } | |||||
| for _, t := range taskList { | |||||
| RunRewardTask(t, now) | |||||
| } | |||||
| } | |||||
| func RunRewardTask(t models.RewardPeriodicTask, now time.Time) error { | |||||
| lock := redis_lock.NewDistributeLock(redis_key.RewardTaskRunningLock(t.ID)) | |||||
| isOk, _ := lock.LockWithWait(5*time.Second, 5*time.Second) | |||||
| if !isOk { | |||||
| log.Error("get RewardTaskRunningLock failed,t=%+v", t) | |||||
| return errors.New("get RewardTaskRunningLock failed") | |||||
| } | |||||
| defer lock.UnLock() | |||||
| record, err := models.GetPointOperateRecordBySerialNo(t.OperateSerialNo) | |||||
| if err != nil { | |||||
| log.Error("RunRewardTask. GetPointOperateRecordBySerialNo error. %v", err) | |||||
| return errors.New("GetPointOperateRecordBySerialNo error") | |||||
| } | |||||
| if record.Status != models.OperateStatusOperating { | |||||
| log.Info("RunRewardTask. operate record is finished,record=%+v", record) | |||||
| return nil | |||||
| } | |||||
| n, _ := countExecuteTimes(t, now) | |||||
| if n == 0 { | |||||
| log.Info("countExecuteTimes result is 0") | |||||
| return nil | |||||
| } | |||||
| //get operator | |||||
| operator := GetOperator(models.GetRewardTypeInstance(record.RewardType)) | |||||
| if operator == nil { | |||||
| log.Error("RunRewardTask. operator of reward type is not exist") | |||||
| return errors.New("operator of reward type is not exist") | |||||
| } | |||||
| nextTime := timeutil.TimeStamp(int64(t.NextExecuteTime) + t.IntervalSeconds) | |||||
| log.Debug("RunRewardTask n=%d", n) | |||||
| for i := 1; int64(i) <= n; i++ { | |||||
| log.Debug("operator.Operate i=%d n=%d", i, n) | |||||
| err = operator.Operate(&models.RewardOperateContext{ | |||||
| SourceType: models.SourceTypeRunCloudbrainTask, | |||||
| SourceId: t.OperateSerialNo, | |||||
| Reward: models.Reward{ | |||||
| Amount: t.Amount, | |||||
| Type: models.GetRewardTypeInstance(record.RewardType), | |||||
| }, | |||||
| TargetUserId: record.UserId, | |||||
| OperateType: models.GetRewardOperateTypeInstance(record.OperateType), | |||||
| }) | |||||
| if err != nil { | |||||
| log.Error("RunRewardTask.operator operate error.%v", err) | |||||
| if models.IsErrInsufficientPointsBalance(err) { | |||||
| task, err := models.GetCloudbrainByID(record.SourceId) | |||||
| if err != nil { | |||||
| log.Error("RunRewardTask GetCloudbrainByID error. %v", err) | |||||
| return err | |||||
| } | |||||
| repo.StopJobs([]*models.Cloudbrain{task}) | |||||
| models.StopPeriodicTask(task.ID, t.OperateSerialNo, time.Now()) | |||||
| return nil | |||||
| } | |||||
| return nil | |||||
| } | |||||
| models.IncrRewardTaskSuccessCount(t, 1, nextTime) | |||||
| nextTime = timeutil.TimeStamp(int64(nextTime) + t.IntervalSeconds) | |||||
| } | |||||
| return nil | |||||
| } | |||||
| func countExecuteTimes(t models.RewardPeriodicTask, now time.Time) (int64, timeutil.TimeStamp) { | |||||
| interval := t.IntervalSeconds | |||||
| nextTime := int64(t.NextExecuteTime) | |||||
| if nextTime > now.Unix() { | |||||
| return 0, 0 | |||||
| } | |||||
| diff := now.Unix() - nextTime | |||||
| var n int64 | |||||
| if diff%interval == 0 { | |||||
| n = diff / interval | |||||
| } else { | |||||
| n = diff/interval + 1 | |||||
| } | |||||
| newNextTime := timeutil.TimeStamp(nextTime + n*interval) | |||||
| return n, newNextTime | |||||
| } | |||||
| @@ -0,0 +1,150 @@ | |||||
| package account | |||||
| import ( | |||||
| "bytes" | |||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| "code.gitea.io/gitea/modules/redis/redis_client" | |||||
| "code.gitea.io/gitea/modules/redis/redis_key" | |||||
| "code.gitea.io/gitea/modules/redis/redis_lock" | |||||
| "code.gitea.io/gitea/modules/setting" | |||||
| "code.gitea.io/gitea/modules/util" | |||||
| "encoding/json" | |||||
| "strings" | |||||
| "time" | |||||
| ) | |||||
| func GetAccount(userId int64) (*models.PointAccount, error) { | |||||
| redisKey := redis_key.PointAccountInfo(userId) | |||||
| val, _ := redis_client.Get(redisKey) | |||||
| if val != "" { | |||||
| account := &models.PointAccount{} | |||||
| json.Unmarshal([]byte(val), account) | |||||
| return account, nil | |||||
| } | |||||
| account, err := models.GetAccountByUserId(userId) | |||||
| if err != nil { | |||||
| if models.IsErrRecordNotExist(err) { | |||||
| a, err := InitAccount(userId) | |||||
| if err != nil { | |||||
| log.Error("InitAccount error,err=%v", err) | |||||
| return nil, err | |||||
| } | |||||
| return a, nil | |||||
| } | |||||
| log.Error("GetAccountByUserId error,err=%v", err) | |||||
| return nil, err | |||||
| } | |||||
| jsonStr, _ := json.Marshal(account) | |||||
| redis_client.Setex(redisKey, string(jsonStr), 24*time.Hour) | |||||
| return account, nil | |||||
| } | |||||
| func InitAccount(userId int64) (*models.PointAccount, error) { | |||||
| lock := redis_lock.NewDistributeLock(redis_key.PointAccountInitLock(userId)) | |||||
| isOk, err := lock.LockWithWait(3*time.Second, 3*time.Second) | |||||
| if err != nil { | |||||
| log.Error("PointAccountInitLock error,err=%v", err) | |||||
| return nil, err | |||||
| } | |||||
| if isOk { | |||||
| defer lock.UnLock() | |||||
| account, _ := models.GetAccountByUserId(userId) | |||||
| if account == nil { | |||||
| models.InsertAccount(&models.PointAccount{ | |||||
| Balance: 0, | |||||
| TotalEarned: 0, | |||||
| TotalConsumed: 0, | |||||
| UserId: userId, | |||||
| Status: models.PointAccountNormal, | |||||
| Version: 0, | |||||
| AccountCode: util.UUID(), | |||||
| }) | |||||
| return models.GetAccountByUserId(userId) | |||||
| } | |||||
| return account, nil | |||||
| } | |||||
| return nil, nil | |||||
| } | |||||
| //IsPointBalanceEnough check whether the user's point balance is bigger than task unit price | |||||
| func IsPointBalanceEnough(targetUserId int64, unitPrice int) bool { | |||||
| if !setting.CloudBrainPaySwitch { | |||||
| return true | |||||
| } | |||||
| if unitPrice == 0 { | |||||
| return true | |||||
| } | |||||
| a, err := GetAccount(targetUserId) | |||||
| if err != nil { | |||||
| log.Error("IsPointBalanceEnough GetAccount error,err=%v", err) | |||||
| return false | |||||
| } | |||||
| return a.Balance >= int64(unitPrice) | |||||
| } | |||||
| func SearchPointAccount(opt models.SearchPointAccountOpts) (*models.SearchPointAccountResponse, error) { | |||||
| var result = &models.SearchPointAccountResponse{ | |||||
| Records: make([]*models.UserPointAccount, 0), | |||||
| PageSize: opt.PageSize, | |||||
| Page: opt.Page, | |||||
| Total: 0, | |||||
| } | |||||
| userSearch := &models.SearchUserOptions{ | |||||
| Type: models.UserTypeIndividual, | |||||
| ListOptions: models.ListOptions{ | |||||
| PageSize: 20, | |||||
| }, | |||||
| SearchByEmail: true, | |||||
| OrderBy: models.SearchOrderByAlphabetically, | |||||
| } | |||||
| userSearch.Page = opt.Page | |||||
| if userSearch.Page <= 0 { | |||||
| userSearch.Page = 1 | |||||
| } | |||||
| userSearch.Keyword = strings.Trim(opt.Keyword, " ") | |||||
| if len(userSearch.Keyword) == 0 || isKeywordValid(userSearch.Keyword) { | |||||
| users, count, err := models.SearchUsers(userSearch) | |||||
| if err != nil { | |||||
| log.Error("SearchPointAccount SearchUsers error.%v", err) | |||||
| return nil, err | |||||
| } | |||||
| userIds := make([]int64, 0) | |||||
| for _, v := range users { | |||||
| userIds = append(userIds, v.ID) | |||||
| } | |||||
| accountMap, err := models.GetPointAccountMapByUserIds(userIds) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| records := make([]*models.UserPointAccount, 0) | |||||
| for _, v := range users { | |||||
| upa := &models.UserPointAccount{ | |||||
| UserId: v.ID, | |||||
| UserName: v.Name, | |||||
| Email: v.Email, | |||||
| Balance: 0, | |||||
| TotalEarned: 0, | |||||
| TotalConsumed: 0, | |||||
| } | |||||
| a := accountMap[v.ID] | |||||
| if a != nil { | |||||
| upa.Balance = a.Balance | |||||
| upa.TotalConsumed = a.TotalConsumed | |||||
| upa.TotalEarned = a.TotalEarned | |||||
| } | |||||
| records = append(records, upa) | |||||
| } | |||||
| result.Records = records | |||||
| result.Total = count | |||||
| } | |||||
| return result, nil | |||||
| } | |||||
| func isKeywordValid(keyword string) bool { | |||||
| return !bytes.Contains([]byte(keyword), []byte{0x00}) | |||||
| } | |||||
| @@ -0,0 +1,65 @@ | |||||
| package point | |||||
| import ( | |||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| "code.gitea.io/gitea/modules/redis/redis_client" | |||||
| "code.gitea.io/gitea/modules/redis/redis_key" | |||||
| "code.gitea.io/gitea/modules/redis/redis_lock" | |||||
| "code.gitea.io/gitea/services/reward/limiter" | |||||
| "code.gitea.io/gitea/services/reward/point/account" | |||||
| "errors" | |||||
| "time" | |||||
| ) | |||||
| type PointOperator struct { | |||||
| } | |||||
| func (operator *PointOperator) IsLimited(ctx *models.RewardOperateContext) error { | |||||
| realAmount, err := limiter.CheckLimit(ctx.SourceType.Name(), models.LimitTypeRewardPoint, ctx.TargetUserId, ctx.Reward.Amount, ctx.RejectPolicy) | |||||
| if err != nil { | |||||
| log.Error("PointOperator IsLimited error,err=%v", err) | |||||
| return err | |||||
| } | |||||
| if realAmount < ctx.Reward.Amount { | |||||
| ctx.LossAmount = ctx.Reward.Amount - realAmount | |||||
| ctx.Reward.Amount = realAmount | |||||
| } | |||||
| return nil | |||||
| } | |||||
| func (operator *PointOperator) Operate(ctx *models.RewardOperateContext) error { | |||||
| lock := redis_lock.NewDistributeLock(redis_key.PointAccountOperateLock(ctx.TargetUserId)) | |||||
| isOk, err := lock.LockWithWait(3*time.Second, 3*time.Second) | |||||
| if err != nil { | |||||
| log.Error("Get PointAccountOperateLock error,err=%v", err) | |||||
| return err | |||||
| } | |||||
| if isOk { | |||||
| defer lock.UnLock() | |||||
| na, err := account.GetAccount(ctx.TargetUserId) | |||||
| if err != nil || na == nil { | |||||
| log.Error("operator get account error error,err=%v", err) | |||||
| return errors.New("get account error") | |||||
| } | |||||
| if ctx.OperateType == models.OperateTypeIncrease { | |||||
| err = na.Increase(ctx.Reward.Amount, ctx.SourceId) | |||||
| } else if ctx.OperateType == models.OperateTypeDecrease { | |||||
| if !ctx.PermittedNegative && na.Balance < ctx.Reward.Amount { | |||||
| log.Info("account balance is not enough,ctx=%v", ctx) | |||||
| return models.ErrInsufficientPointsBalance{} | |||||
| } | |||||
| err = na.Decrease(ctx.Reward.Amount, ctx.SourceId) | |||||
| } | |||||
| if err != nil { | |||||
| log.Error("operate account balance error,err=%v", err) | |||||
| return err | |||||
| } | |||||
| redis_client.Del(redis_key.PointAccountInfo(ctx.TargetUserId)) | |||||
| } else { | |||||
| log.Error("Get account operate lock failed,ctx=%v", ctx) | |||||
| return errors.New("Get account operate lock failed") | |||||
| } | |||||
| return nil | |||||
| } | |||||
| @@ -0,0 +1,47 @@ | |||||
| package reward | |||||
| import ( | |||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| ) | |||||
| type RecordResponse struct { | |||||
| Records []*models.RewardOperateRecordShow | |||||
| Total int64 | |||||
| PageSize int | |||||
| Page int | |||||
| } | |||||
| func GetRewardRecordList(opts *models.RewardRecordListOpts) (*RecordResponse, error) { | |||||
| var l models.RewardRecordShowList | |||||
| var n int64 | |||||
| var err error | |||||
| if opts.IsAdmin { | |||||
| l, n, err = models.GetAdminRewardRecordShowList(opts) | |||||
| } else { | |||||
| l, n, err = models.GetRewardRecordShowList(opts) | |||||
| } | |||||
| if err != nil { | |||||
| log.Error("GetRewardRecordList error. %v", err) | |||||
| return nil, err | |||||
| } | |||||
| if len(l) == 0 { | |||||
| return &RecordResponse{Records: make([]*models.RewardOperateRecordShow, 0), Total: n, Page: opts.Page, PageSize: opts.PageSize}, nil | |||||
| } | |||||
| return &RecordResponse{Records: l, Total: n, Page: opts.Page, PageSize: opts.PageSize}, nil | |||||
| } | |||||
| func handleRecordResponse(opts *models.RewardRecordListOpts, list models.RewardRecordShowList) { | |||||
| if opts.IsAdmin { | |||||
| for _, v := range list { | |||||
| v.UserName = opts.UserName | |||||
| } | |||||
| } else { | |||||
| for _, v := range list { | |||||
| if v.Cloudbrain != nil { | |||||
| v.Cloudbrain.AiCenter = "" | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -0,0 +1,24 @@ | |||||
| package reward | |||||
| import ( | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| "code.gitea.io/gitea/modules/redis/redis_client" | |||||
| "code.gitea.io/gitea/modules/redis/redis_key" | |||||
| "fmt" | |||||
| "math/rand" | |||||
| "time" | |||||
| ) | |||||
| func GetSerialNoByRedis() (string, error) { | |||||
| now := time.Now() | |||||
| r := int64(rand.Intn(4)) + 1 | |||||
| n, err := redis_client.IncrBy(redis_key.RewardSerialCounter(now), r) | |||||
| if err != nil { | |||||
| log.Error("GetSerialNoByRedis RewardSerialCounter error. %v", err) | |||||
| return "", err | |||||
| } | |||||
| if n == r { | |||||
| redis_client.Expire(redis_key.RewardSerialCounter(now), 2*time.Minute) | |||||
| } | |||||
| return now.Format("200601021504") + fmt.Sprintf("%03d", n) + fmt.Sprint(rand.Intn(10)), nil | |||||
| } | |||||
| @@ -0,0 +1,50 @@ | |||||
| package period | |||||
| import ( | |||||
| "code.gitea.io/gitea/models" | |||||
| "errors" | |||||
| "time" | |||||
| ) | |||||
| var PeriodHandlerMap = map[string]PeriodHandler{ | |||||
| models.PeriodNotCycle: new(NoCycleHandler), | |||||
| models.PeriodDaily: new(DailyHandler), | |||||
| } | |||||
| type PeriodHandler interface { | |||||
| GetCurrentPeriod() *models.PeriodResult | |||||
| } | |||||
| type NoCycleHandler struct { | |||||
| } | |||||
| func (l *NoCycleHandler) GetCurrentPeriod() *models.PeriodResult { | |||||
| return nil | |||||
| } | |||||
| type DailyHandler struct { | |||||
| } | |||||
| func (l *DailyHandler) GetCurrentPeriod() *models.PeriodResult { | |||||
| t := time.Now() | |||||
| startTime := time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location()) | |||||
| endTime := startTime.Add(24 * time.Hour) | |||||
| leftTime := endTime.Sub(t) | |||||
| return &models.PeriodResult{ | |||||
| StartTime: startTime, | |||||
| EndTime: endTime, | |||||
| LeftTime: leftTime, | |||||
| } | |||||
| } | |||||
| func getPeriodHandler(refreshRateype string) PeriodHandler { | |||||
| return PeriodHandlerMap[refreshRateype] | |||||
| } | |||||
| func GetPeriod(refreshRate string) (*models.PeriodResult, error) { | |||||
| handler := getPeriodHandler(refreshRate) | |||||
| if handler == nil { | |||||
| return nil, errors.New("task config incorrect") | |||||
| } | |||||
| return handler.GetCurrentPeriod(), nil | |||||
| } | |||||
| @@ -0,0 +1,111 @@ | |||||
| package task | |||||
| import ( | |||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| "code.gitea.io/gitea/services/reward" | |||||
| "code.gitea.io/gitea/services/reward/limiter" | |||||
| "fmt" | |||||
| ) | |||||
| func Accomplish(action models.Action) { | |||||
| defer func() { | |||||
| if err := recover(); err != nil { | |||||
| combinedErr := fmt.Errorf("%s\n%s", err, log.Stack(2)) | |||||
| log.Error("PANIC:%v", combinedErr) | |||||
| } | |||||
| }() | |||||
| action.OpType = models.GetTaskOptType(action) | |||||
| switch action.OpType { | |||||
| //only creating public repo can be rewarded | |||||
| case models.ActionCreateRepo: | |||||
| if action.Repo.IsPrivate { | |||||
| return | |||||
| } | |||||
| //only creating public image can be rewarded | |||||
| case models.ActionCreateImage: | |||||
| if action.IsPrivate { | |||||
| return | |||||
| } | |||||
| case models.ActionBindWechat: | |||||
| n, err := models.CountWechatBindLog(action.Content, models.WECHAT_BIND) | |||||
| if err != nil { | |||||
| log.Error("CountWechatBindLog error when accomplish task,err=%v", err) | |||||
| return | |||||
| } | |||||
| //if wechatOpenId has been bound before,the action can not get reward | |||||
| if n > 1 { | |||||
| log.Debug("the wechat account has been bound before,wechatOpenId = %s", action.Content) | |||||
| return | |||||
| } | |||||
| } | |||||
| go accomplish(action) | |||||
| } | |||||
| func accomplish(action models.Action) error { | |||||
| defer func() { | |||||
| if err := recover(); err != nil { | |||||
| combinedErr := fmt.Errorf("%s\n%s", err, log.Stack(2)) | |||||
| log.Error("PANIC:%v", combinedErr) | |||||
| } | |||||
| }() | |||||
| userId := action.ActUserID | |||||
| taskType := fmt.Sprint(action.OpType) | |||||
| //get task config | |||||
| config, err := GetTaskConfig(taskType) | |||||
| if err != nil { | |||||
| log.Error("GetTaskConfig error,%v", err) | |||||
| return err | |||||
| } | |||||
| if config == nil { | |||||
| log.Info("task config not exist,userId=%d taskType=%s", userId, taskType) | |||||
| return nil | |||||
| } | |||||
| //is limited? | |||||
| if isLimited(userId, config, models.JustReject) { | |||||
| log.Info("task accomplish maximum times are reached,userId=%d taskType=%s", userId, taskType) | |||||
| return nil | |||||
| } | |||||
| //add log | |||||
| _, err = models.InsertTaskAccomplishLog(&models.TaskAccomplishLog{ | |||||
| ConfigId: config.ID, | |||||
| TaskCode: config.TaskCode, | |||||
| UserId: userId, | |||||
| ActionId: action.ID, | |||||
| }) | |||||
| if err != nil { | |||||
| log.Error("InsertTaskAccomplishLog error,%v", err) | |||||
| return err | |||||
| } | |||||
| //reward | |||||
| reward.Operate(&models.RewardOperateContext{ | |||||
| SourceType: models.SourceTypeAccomplishTask, | |||||
| SourceId: fmt.Sprint(action.ID), | |||||
| SourceTemplateId: fmt.Sprint(action.OpType), | |||||
| Title: config.Title, | |||||
| Reward: models.Reward{ | |||||
| Amount: config.AwardAmount, | |||||
| Type: models.GetRewardTypeInstance(config.AwardType), | |||||
| }, | |||||
| TargetUserId: userId, | |||||
| RequestId: fmt.Sprint(action.ID), | |||||
| OperateType: models.OperateTypeIncrease, | |||||
| RejectPolicy: models.FillUp, | |||||
| }) | |||||
| log.Debug("accomplish success,action=%v", action) | |||||
| return nil | |||||
| } | |||||
| func isLimited(userId int64, config *models.TaskConfig, rejectPolicy models.LimiterRejectPolicy) bool { | |||||
| if _, err := limiter.CheckLimit(config.TaskCode, models.LimitTypeTask, userId, 1, rejectPolicy); err != nil { | |||||
| log.Error(" isLimited CheckLimit error. %v", err) | |||||
| return true | |||||
| } | |||||
| return false | |||||
| } | |||||
| @@ -0,0 +1,183 @@ | |||||
| package task | |||||
| import ( | |||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| "code.gitea.io/gitea/modules/redis/redis_client" | |||||
| "code.gitea.io/gitea/modules/redis/redis_key" | |||||
| "code.gitea.io/gitea/modules/redis/redis_lock" | |||||
| "encoding/json" | |||||
| "errors" | |||||
| "time" | |||||
| ) | |||||
| //GetTaskConfig get task config from redis cache first | |||||
| // if not exist in redis, find in db and refresh the redis key | |||||
| func GetTaskConfig(taskType string) (*models.TaskConfig, error) { | |||||
| list, err := GetTaskConfigList() | |||||
| if err != nil { | |||||
| log.Error(" GetTaskConfigList error. %v", err) | |||||
| return nil, err | |||||
| } | |||||
| for _, v := range list { | |||||
| if v.TaskCode == taskType { | |||||
| return v, nil | |||||
| } | |||||
| } | |||||
| return nil, nil | |||||
| } | |||||
| func GetTaskConfigList() ([]*models.TaskConfig, error) { | |||||
| redisKey := redis_key.TaskConfigList() | |||||
| configStr, _ := redis_client.Get(redisKey) | |||||
| if configStr != "" { | |||||
| if configStr == redis_key.EMPTY_REDIS_VAL { | |||||
| return nil, nil | |||||
| } | |||||
| config := make([]*models.TaskConfig, 0) | |||||
| json.Unmarshal([]byte(configStr), &config) | |||||
| return config, nil | |||||
| } | |||||
| config, err := models.GetTaskConfigList() | |||||
| if err != nil { | |||||
| log.Error(" GetTaskConfigList from model error. %v", err) | |||||
| if models.IsErrRecordNotExist(err) { | |||||
| redis_client.Setex(redisKey, redis_key.EMPTY_REDIS_VAL, 5*time.Second) | |||||
| return nil, nil | |||||
| } | |||||
| return nil, err | |||||
| } | |||||
| jsonStr, _ := json.Marshal(config) | |||||
| redis_client.Setex(redisKey, string(jsonStr), 30*24*time.Hour) | |||||
| return config, nil | |||||
| } | |||||
| func GetTaskConfigPageWithDeleted(opt models.GetTaskConfigOpts) ([]*models.TaskAndLimiterConfig, int64, error) { | |||||
| config, count, err := models.GetTaskConfigPageWithDeleted(opt) | |||||
| if err != nil { | |||||
| log.Error(" GetTaskConfigPageWithDeleted from model error. %v", err) | |||||
| if models.IsErrRecordNotExist(err) { | |||||
| return nil, 0, nil | |||||
| } | |||||
| return nil, 0, err | |||||
| } | |||||
| return config, count, nil | |||||
| } | |||||
| func GetTaskConfigWithLimitList(opt models.GetTaskConfigOpts) (*models.TaskConfigWithLimitResponse, error) { | |||||
| list, n, err := GetTaskConfigPageWithDeleted(opt) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if len(list) == 0 { | |||||
| return nil, nil | |||||
| } | |||||
| r := make([]*models.TaskConfigWithSingleLimit, 0) | |||||
| for i := 0; i < len(list); i++ { | |||||
| li := list[i] | |||||
| t := &models.TaskConfigWithSingleLimit{ | |||||
| ID: li.TaskConfig.ID, | |||||
| TaskCode: li.TaskConfig.TaskCode, | |||||
| AwardType: li.TaskConfig.AwardType, | |||||
| AwardAmount: li.TaskConfig.AwardAmount, | |||||
| Creator: li.TaskConfig.CreatorName, | |||||
| CreatedUnix: li.TaskConfig.CreatedUnix, | |||||
| IsDeleted: li.TaskConfig.DeletedAt > 0, | |||||
| DeleteAt: li.TaskConfig.DeletedAt, | |||||
| LimitNum: li.LimitConfig.LimitNum, | |||||
| RefreshRate: li.LimitConfig.RefreshRate, | |||||
| } | |||||
| r = append(r, t) | |||||
| } | |||||
| return &models.TaskConfigWithLimitResponse{ | |||||
| Records: r, | |||||
| Page: opt.Page, | |||||
| PageSize: opt.PageSize, | |||||
| Total: n, | |||||
| }, nil | |||||
| } | |||||
| func AddTaskConfig(config models.TaskConfigWithLimit, doer *models.User) error { | |||||
| if config.TaskCode == "" || config.AwardType == "" { | |||||
| log.Error(" EditTaskConfig param error") | |||||
| return errors.New("param error") | |||||
| } | |||||
| var lock = redis_lock.NewDistributeLock(redis_key.TaskConfigOperateLock(config.TaskCode, config.AwardType)) | |||||
| isOk, _ := lock.LockWithWait(3*time.Second, 3*time.Second) | |||||
| if !isOk { | |||||
| return errors.New("Get lock failed") | |||||
| } | |||||
| defer lock.UnLock() | |||||
| t, err := models.GetTaskConfigByTaskCode(config.TaskCode) | |||||
| if err != nil && !models.IsErrRecordNotExist(err) { | |||||
| return err | |||||
| } | |||||
| if t != nil { | |||||
| return errors.New("task config is exist") | |||||
| } | |||||
| for i, l := range config.Limiters { | |||||
| if l.Scope == "" { | |||||
| config.Limiters[i].Scope = models.LimitScopeSingleUser.Name() | |||||
| } | |||||
| } | |||||
| err = models.NewTaskConfig(config, doer) | |||||
| if err != nil { | |||||
| log.Error("add task config error,config:%v err:%v", config, err) | |||||
| return err | |||||
| } | |||||
| redis_client.Del(redis_key.LimitConfig(models.LimitTypeTask.Name())) | |||||
| redis_client.Del(redis_key.TaskConfigList()) | |||||
| return nil | |||||
| } | |||||
| func EditTaskConfig(config models.TaskConfigWithLimit, doer *models.User) error { | |||||
| if config.TaskCode == "" || config.AwardType == "" || config.ID <= 0 { | |||||
| log.Error(" EditTaskConfig param error") | |||||
| return errors.New("param error") | |||||
| } | |||||
| var lock = redis_lock.NewDistributeLock(redis_key.TaskConfigOperateLock(config.TaskCode, config.AwardType)) | |||||
| isOk, _ := lock.LockWithWait(3*time.Second, 3*time.Second) | |||||
| if !isOk { | |||||
| return errors.New("Get lock failed") | |||||
| } | |||||
| defer lock.UnLock() | |||||
| t, err := models.GetTaskConfigByID(config.ID) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| if t == nil { | |||||
| return errors.New("task config is not exist") | |||||
| } | |||||
| for i, l := range config.Limiters { | |||||
| if l.Scope == "" { | |||||
| config.Limiters[i].Scope = models.LimitScopeSingleUser.Name() | |||||
| } | |||||
| } | |||||
| err = models.EditTaskConfig(config, doer) | |||||
| if err != nil { | |||||
| log.Error("add task config error,config:%v err:%v", config, err) | |||||
| return err | |||||
| } | |||||
| redis_client.Del(redis_key.LimitConfig(models.LimitTypeTask.Name())) | |||||
| redis_client.Del(redis_key.TaskConfigList()) | |||||
| return nil | |||||
| } | |||||
| func DelTaskConfig(id int64, doer *models.User) error { | |||||
| if id == 0 { | |||||
| log.Error(" EditTaskConfig param error") | |||||
| return errors.New("param error") | |||||
| } | |||||
| err := models.DelTaskConfig(id, doer) | |||||
| if err != nil { | |||||
| log.Error("del task config error,err:%v", err) | |||||
| return err | |||||
| } | |||||
| redis_client.Del(redis_key.LimitConfig(models.LimitTypeTask.Name())) | |||||
| redis_client.Del(redis_key.TaskConfigList()) | |||||
| return nil | |||||
| } | |||||
| @@ -1,6 +1,9 @@ | |||||
| package wechat | package wechat | ||||
| import ( | import ( | ||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/modules/auth/wechat" | |||||
| "code.gitea.io/gitea/modules/notification" | |||||
| "code.gitea.io/gitea/modules/redis/redis_client" | "code.gitea.io/gitea/modules/redis/redis_client" | ||||
| "code.gitea.io/gitea/modules/redis/redis_key" | "code.gitea.io/gitea/modules/redis/redis_key" | ||||
| "encoding/json" | "encoding/json" | ||||
| @@ -142,22 +145,26 @@ func HandleScanEvent(we WechatMsg) string { | |||||
| if val == "" { | if val == "" { | ||||
| return "" | return "" | ||||
| } | } | ||||
| qrCache := new(QRCode4BindCache) | |||||
| qrCache := new(wechat.QRCode4BindCache) | |||||
| json.Unmarshal([]byte(val), qrCache) | json.Unmarshal([]byte(val), qrCache) | ||||
| if qrCache.Status == BIND_STATUS_UNBIND { | |||||
| err := BindWechat(qrCache.UserId, we.FromUserName) | |||||
| if qrCache.Status == wechat.BIND_STATUS_UNBIND { | |||||
| err := wechat.BindWechat(qrCache.UserId, we.FromUserName) | |||||
| if err != nil { | if err != nil { | ||||
| if err, ok := err.(WechatBindError); ok { | |||||
| if err, ok := err.(wechat.WechatBindError); ok { | |||||
| return err.Reply | return err.Reply | ||||
| } | } | ||||
| return BIND_REPLY_FAILED_DEFAULT | |||||
| return wechat.BIND_REPLY_FAILED_DEFAULT | |||||
| } | } | ||||
| qrCache.Status = BIND_STATUS_BOUND | |||||
| qrCache.Status = wechat.BIND_STATUS_BOUND | |||||
| jsonStr, _ := json.Marshal(qrCache) | jsonStr, _ := json.Marshal(qrCache) | ||||
| redis_client.Setex(redis_key.WechatBindingUserIdKey(sceneStr), string(jsonStr), 60*time.Second) | redis_client.Setex(redis_key.WechatBindingUserIdKey(sceneStr), string(jsonStr), 60*time.Second) | ||||
| } | } | ||||
| u, err := models.GetUserByID(qrCache.UserId) | |||||
| if err == nil { | |||||
| notification.NotifyWechatBind(u, we.FromUserName) | |||||
| } | |||||
| return BIND_REPLY_SUCCESS | |||||
| return wechat.BIND_REPLY_SUCCESS | |||||
| } | } | ||||
| func HandleSubscribeEvent(we WechatMsg) *WechatReplyContent { | func HandleSubscribeEvent(we WechatMsg) *WechatReplyContent { | ||||
| @@ -1,4 +1,5 @@ | |||||
| {{template "base/head" .}} | {{template "base/head" .}} | ||||
| <script src="{{StaticUrlPrefix}}/js/specsuse.js?v={{MD5 AppVer}}" type="text/javascript"></script> | |||||
| <!-- 弹窗 --> | <!-- 弹窗 --> | ||||
| <div id="mask"> | <div id="mask"> | ||||
| <div id="loadingPage"> | <div id="loadingPage"> | ||||
| @@ -175,10 +176,17 @@ | |||||
| </div> | </div> | ||||
| <!-- XPU类型 --> | <!-- XPU类型 --> | ||||
| <div class="one wide column text center nowrap" style="width:8% !important;"> | <div class="one wide column text center nowrap" style="width:8% !important;"> | ||||
| <span style="font-size: 12px;" title="{{.CardType}}"> | |||||
| {{if .CardType}}{{.CardType}}{{else}}--{{end}} | |||||
| </span> | |||||
| </div> | |||||
| <span style="font-size: 12px;" title="" class="card_type_{{.DisplayJobName}}_{{$JobID}}"></span> | |||||
| </div> | |||||
| <script> | |||||
| (function(){ | |||||
| var spec = {{.Spec}} || {}; | |||||
| var cardType = getListValueWithKey(ACC_CARD_TYPE, spec.AccCardType) || '--'; | |||||
| var spanEl = document.querySelector('.card_type_{{.DisplayJobName}}_{{$JobID}}'); | |||||
| spanEl.setAttribute('title', cardType); | |||||
| spanEl.innerText = cardType; | |||||
| })(); | |||||
| </script> | |||||
| <!-- 创建者 --> | <!-- 创建者 --> | ||||
| <div class="one wide column text center nowrap" style="width:4% !important;"> | <div class="one wide column text center nowrap" style="width:4% !important;"> | ||||
| {{if .User.Name}} | {{if .User.Name}} | ||||
| @@ -4,7 +4,7 @@ | |||||
| {{template "admin/navbar" .}} | {{template "admin/navbar" .}} | ||||
| <div class="ui container"> | <div class="ui container"> | ||||
| <div id="__vue-root"></div> | <div id="__vue-root"></div> | ||||
| </duv> | |||||
| </div> | |||||
| </div> | </div> | ||||
| <script src="{{StaticUrlPrefix}}/js/vp-resources-queue.js?v={{MD5 AppVer}}"></script> | <script src="{{StaticUrlPrefix}}/js/vp-resources-queue.js?v={{MD5 AppVer}}"></script> | ||||
| {{template "base/footer" .}} | {{template "base/footer" .}} | ||||
| @@ -4,7 +4,7 @@ | |||||
| {{template "admin/navbar" .}} | {{template "admin/navbar" .}} | ||||
| <div class="ui container"> | <div class="ui container"> | ||||
| <div id="__vue-root"></div> | <div id="__vue-root"></div> | ||||
| </duv> | |||||
| </div> | |||||
| </div> | </div> | ||||
| <script src="{{StaticUrlPrefix}}/js/vp-resources-scene.js?v={{MD5 AppVer}}"></script> | <script src="{{StaticUrlPrefix}}/js/vp-resources-scene.js?v={{MD5 AppVer}}"></script> | ||||
| {{template "base/footer" .}} | {{template "base/footer" .}} | ||||