Reviewed-on: https://git.openi.org.cn/OpenI/aiforge/pulls/1234tags/v1.21.12.2
| @@ -46,3 +46,6 @@ | |||
| - 点击[这里](https://git.openi.org.cn/OpenI/aiforge/issues)在线提交问题(点击页面右上角绿色按钮**创建任务**) | |||
| - 加入微信群实时交流,获得进一步的支持 | |||
| <img src="https://git.openi.org.cn/OpenI/aiforge/wiki/raw/img/wechatgroup.jpg" width=200px /> | |||
| ## 启智社区小白训练营: | |||
| - 结合案例给大家详细讲解如何使用社区平台,帮助无技术背景的小白成长为启智社区达人 (https://git.openi.org.cn/zeizei/OpenI_Learning) | |||
| @@ -56,6 +56,7 @@ require ( | |||
| github.com/gomodule/redigo v2.0.0+incompatible | |||
| github.com/google/go-github/v24 v24.0.1 | |||
| github.com/gorilla/context v1.1.1 | |||
| github.com/gorilla/websocket v1.4.0 | |||
| github.com/hashicorp/go-retryablehttp v0.6.6 // indirect | |||
| github.com/huandu/xstrings v1.3.0 | |||
| github.com/issue9/assert v1.3.2 // indirect | |||
| @@ -394,6 +394,7 @@ github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+ | |||
| github.com/gorilla/sessions v1.1.1/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= | |||
| github.com/gorilla/sessions v1.2.0 h1:S7P+1Hm5V/AT9cjEcUD5uDaQSX0OE577aCXgoaKpYbQ= | |||
| github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= | |||
| github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= | |||
| github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= | |||
| github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= | |||
| github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= | |||
| @@ -439,6 +439,19 @@ func GetModelArtsUserAttachments(userID int64) ([]*AttachmentUsername, error) { | |||
| return getModelArtsUserAttachments(x, userID) | |||
| } | |||
| func getModelArtsTrainAttachments(e Engine, userID int64) ([]*AttachmentUsername, error) { | |||
| attachments := make([]*AttachmentUsername, 0, 10) | |||
| if err := e.Table("attachment").Join("LEFT", "`user`", "attachment.uploader_id "+ | |||
| "= `user`.id").Where("attachment.type = ? and (uploader_id= ? or is_private = ?) and attachment.decompress_state = ?", TypeCloudBrainTwo, userID, false, DecompressStateDone).Find(&attachments); err != nil { | |||
| return nil, err | |||
| } | |||
| return attachments, nil | |||
| } | |||
| func GetModelArtsTrainAttachments(userID int64) ([]*AttachmentUsername, error) { | |||
| return getModelArtsTrainAttachments(x, userID) | |||
| } | |||
| func CanDelAttachment(isSigned bool, user *User, attach *Attachment) bool { | |||
| if !isSigned { | |||
| return false | |||
| @@ -19,6 +19,9 @@ type JobType string | |||
| type ModelArtsJobStatus string | |||
| const ( | |||
| NPUResource = "NPU" | |||
| GPUResource = "CPU/GPU" | |||
| JobWaiting CloudbrainStatus = "WAITING" | |||
| JobStopped CloudbrainStatus = "STOPPED" | |||
| JobSucceeded CloudbrainStatus = "SUCCEEDED" | |||
| @@ -88,6 +91,9 @@ type Cloudbrain struct { | |||
| UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` | |||
| Duration int64 | |||
| TrainJobDuration string | |||
| Image string //GPU镜像名称 | |||
| GpuQueue string //GPU类型即GPU队列 | |||
| ResourceSpecId int //GPU规格id | |||
| DeletedAt time.Time `xorm:"deleted"` | |||
| CanDebug bool `xorm:"-"` | |||
| CanDel bool `xorm:"-"` | |||
| @@ -204,6 +210,7 @@ type CloudbrainsOptions struct { | |||
| JobType string | |||
| VersionName string | |||
| IsLatestVersion string | |||
| JobTypeNot bool | |||
| } | |||
| type TaskPod struct { | |||
| @@ -888,9 +895,15 @@ func Cloudbrains(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int64, error) { | |||
| } | |||
| if (opts.JobType) != "" { | |||
| cond = cond.And( | |||
| builder.Eq{"cloudbrain.job_type": opts.JobType}, | |||
| ) | |||
| if opts.JobTypeNot { | |||
| cond = cond.And( | |||
| builder.Neq{"cloudbrain.job_type": opts.JobType}, | |||
| ) | |||
| } else { | |||
| cond = cond.And( | |||
| builder.Eq{"cloudbrain.job_type": opts.JobType}, | |||
| ) | |||
| } | |||
| } | |||
| if (opts.IsLatestVersion) != "" { | |||
| @@ -1999,3 +1999,16 @@ func IsErrJobNotExist(err error) bool { | |||
| func (err ErrJobNotExist) Error() string { | |||
| return fmt.Sprintf("the job does not exist") | |||
| } | |||
| type ErrTagNotExist struct { | |||
| TagID int64 | |||
| } | |||
| func (err ErrTagNotExist) Error() string { | |||
| return fmt.Sprintf("the tag does not exist") | |||
| } | |||
| func IsErrTagNotExist(err error) bool { | |||
| _, ok := err.(ErrTagNotExist) | |||
| return ok | |||
| } | |||
| @@ -1397,6 +1397,8 @@ func getIssueStatsChunk(opts *IssueStatsOptions, issueIDs []int64) (*IssueStats, | |||
| if opts.MilestoneID > 0 { | |||
| sess.And("issue.milestone_id = ?", opts.MilestoneID) | |||
| } else if opts.MilestoneID == -1 { //only search for issues do not have milestone | |||
| sess.And("issue.milestone_id = ?", 0) | |||
| } | |||
| if opts.AssigneeID > 0 { | |||
| @@ -353,7 +353,7 @@ func GetMilestonesByRepoID(repoID int64, state api.StateType, listOptions ListOp | |||
| } | |||
| miles := make([]*Milestone, 0, listOptions.PageSize) | |||
| return miles, sess.Asc("deadline_unix").Asc("id").Find(&miles) | |||
| return miles, sess.Desc("id").Find(&miles) | |||
| } | |||
| // GetMilestones returns a list of milestones of given repository and status. | |||
| @@ -134,6 +134,8 @@ func init() { | |||
| new(BlockChain), | |||
| new(RecommendOrg), | |||
| new(AiModelManage), | |||
| new(OfficialTag), | |||
| new(OfficialTagRepos), | |||
| ) | |||
| tablesStatistic = append(tablesStatistic, | |||
| @@ -2470,6 +2470,12 @@ func GetBlockChainUnSuccessRepos() ([]*Repository, error) { | |||
| Find(&repos) | |||
| } | |||
| func (repo *Repository) UpdateBlockChain() error { | |||
| _, err := x.Exec("UPDATE `repository` SET block_chain_status = ?, contract_address=? WHERE id = ?", repo.BlockChainStatus, repo.ContractAddress, repo.ID) | |||
| return err | |||
| } | |||
| func (repo *Repository) IncreaseCloneCnt() { | |||
| sess := x.NewSession() | |||
| defer sess.Close() | |||
| @@ -0,0 +1,163 @@ | |||
| package models | |||
| import ( | |||
| "code.gitea.io/gitea/modules/log" | |||
| "code.gitea.io/gitea/modules/timeutil" | |||
| "fmt" | |||
| ) | |||
| type OfficialTag struct { | |||
| ID int64 `xorm:"pk autoincr"` | |||
| Name string `xorm:"NOT NULL"` | |||
| Code string `xorm:"NOT NULL"` | |||
| Limit int `xorm:"NOT NULL default(-1)"` | |||
| Status int `xorm:"NOT NULL default(0)"` | |||
| CreatedUnix timeutil.TimeStamp `xorm:"created"` | |||
| UpdatedUnix timeutil.TimeStamp `xorm:"updated"` | |||
| } | |||
| type OfficialTagRepos struct { | |||
| ID int64 `xorm:"pk autoincr"` | |||
| OrgID int64 `xorm:"NOT NULL INDEX"` | |||
| TagID int64 `xorm:"NOT NULL"` | |||
| RepoID int64 `xorm:"NOT NULL INDEX"` | |||
| CreatedUnix timeutil.TimeStamp `xorm:"created"` | |||
| UpdatedUnix timeutil.TimeStamp `xorm:"updated"` | |||
| } | |||
| type TagReposBrief struct { | |||
| RepoID int64 | |||
| RepoName string | |||
| TagID int64 | |||
| } | |||
| type TagReposSelected struct { | |||
| RepoID int64 | |||
| RepoName string | |||
| Selected bool | |||
| } | |||
| type TagsDetail struct { | |||
| TagId int64 | |||
| TagName string | |||
| TagLimit int | |||
| RepoList []Repository | |||
| } | |||
| func GetTagByID(id int64) (*OfficialTag, error) { | |||
| r := &OfficialTag{ | |||
| ID: id, | |||
| } | |||
| has, err := x.Get(r) | |||
| if err != nil { | |||
| return nil, err | |||
| } else if !has { | |||
| return nil, ErrTagNotExist{0} | |||
| } | |||
| return r, nil | |||
| } | |||
| func UpdateTagReposByID(tagID, orgID int64, repoIdList []int64) error { | |||
| sess := x.NewSession() | |||
| defer sess.Close() | |||
| if err := sess.Begin(); err != nil { | |||
| return fmt.Errorf("UpdateTagReposByID[tagId: %d, orgID: %d,error:%v", tagID, orgID, err) | |||
| } | |||
| //delete old tag repos | |||
| r := &OfficialTagRepos{ | |||
| TagID: tagID, | |||
| OrgID: orgID, | |||
| } | |||
| _, err := sess.Delete(r) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| if len(repoIdList) == 0 { | |||
| return sess.Commit() | |||
| } | |||
| //add new tag repos | |||
| data := make([]*OfficialTagRepos, 0) | |||
| for _, repoId := range repoIdList { | |||
| data = append(data, &OfficialTagRepos{ | |||
| OrgID: orgID, | |||
| TagID: tagID, | |||
| RepoID: repoId, | |||
| }) | |||
| } | |||
| _, err = sess.Insert(&data) | |||
| if err != nil { | |||
| sess.Rollback() | |||
| return err | |||
| } | |||
| return sess.Commit() | |||
| } | |||
| func GetTagRepos(tagID, orgID int64) ([]TagReposSelected, error) { | |||
| t := make([]TagReposBrief, 0) | |||
| const SQLCmd = "select t1.id as repo_id,t1.name as repo_name,t2.id as tag_id from repository t1 left join official_tag_repos t2 on (t1.id = t2.repo_id and t2.tag_id = ?) where t1.owner_id = ? and t1.is_private = false order by t1.updated_unix desc" | |||
| if err := x.SQL(SQLCmd, tagID, orgID).Find(&t); err != nil { | |||
| return nil, err | |||
| } | |||
| r := make([]TagReposSelected, 0) | |||
| for _, v := range t { | |||
| selected := false | |||
| if v.TagID > 0 { | |||
| selected = true | |||
| } | |||
| r = append(r, TagReposSelected{ | |||
| RepoID: v.RepoID, | |||
| RepoName: v.RepoName, | |||
| Selected: selected, | |||
| }) | |||
| } | |||
| return r, nil | |||
| } | |||
| func GetAllOfficialTagRepos(orgID int64, isOwner bool) ([]TagsDetail, error) { | |||
| result := make([]TagsDetail, 0) | |||
| tags, err := GetAllOfficialTags() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| for _, tag := range tags { | |||
| repos, err := GetOfficialTagDetail(orgID, tag.ID) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if len(repos) == 0 && !isOwner { | |||
| continue | |||
| } | |||
| result = append(result, TagsDetail{ | |||
| TagId: tag.ID, | |||
| TagName: tag.Name, | |||
| TagLimit: tag.Limit, | |||
| RepoList: repos, | |||
| }) | |||
| } | |||
| return result, nil | |||
| } | |||
| func GetOfficialTagDetail(orgID, tagId int64) ([]Repository, error) { | |||
| t := make([]Repository, 0) | |||
| const SQLCmd = "select t2.* from official_tag_repos t1 inner join repository t2 on t1.repo_id = t2.id where t1.org_id = ? and t1.tag_id=? order by t2.updated_unix desc" | |||
| if err := x.SQL(SQLCmd, orgID, tagId).Find(&t); err != nil { | |||
| return nil, err | |||
| } | |||
| return t, nil | |||
| } | |||
| func GetAllOfficialTags() ([]OfficialTag, error) { | |||
| //todo redis? | |||
| o := make([]OfficialTag, 0) | |||
| err := x.Where("status = ?", 0).OrderBy("updated_unix desc").Find(&o) | |||
| if err != nil { | |||
| log.Error("GetAllOfficialTags error,%v", err) | |||
| return nil, err | |||
| } | |||
| return o, nil | |||
| } | |||
| @@ -24,6 +24,8 @@ const ( | |||
| RepoWatchModeAuto // 3 | |||
| ) | |||
| var ActionChan = make(chan *Action, 200) | |||
| // Watch is connection request for receiving repository notification. | |||
| type Watch struct { | |||
| ID int64 `xorm:"pk autoincr"` | |||
| @@ -277,9 +279,17 @@ func notifyWatchers(e Engine, actions ...*Action) error { | |||
| // NotifyWatchers creates batch of actions for every watcher. | |||
| func NotifyWatchers(actions ...*Action) error { | |||
| producer(actions...) | |||
| return notifyWatchers(x, actions...) | |||
| } | |||
| func producer(actions ...*Action) { | |||
| for _, action := range actions { | |||
| ActionChan <- action | |||
| } | |||
| } | |||
| // NotifyWatchersActions creates batch of actions for every watcher. | |||
| func NotifyWatchersActions(acts []*Action) error { | |||
| sess := x.NewSession() | |||
| @@ -1,7 +1,6 @@ | |||
| package models | |||
| import ( | |||
| "encoding/json" | |||
| "fmt" | |||
| "sort" | |||
| "strconv" | |||
| @@ -202,15 +201,7 @@ func QueryUserStaticDataAll(opts *UserBusinessAnalysisQueryOptions) ([]*UserBusi | |||
| return nil, 0 | |||
| } | |||
| log.Info("query return total:" + fmt.Sprint(allCount)) | |||
| if allCount == 0 { | |||
| CommitCodeSizeMap, err := GetAllUserKPIStats() | |||
| if err != nil { | |||
| log.Info("query commit code errr.") | |||
| } else { | |||
| log.Info("query commit code size, len=" + fmt.Sprint(len(CommitCodeSizeMap))) | |||
| } | |||
| RefreshUserStaticAllTabel(make(map[string]int), CommitCodeSizeMap) | |||
| } | |||
| pageSize := 1000 | |||
| totalPage := int(allCount) / pageSize | |||
| userBusinessAnalysisReturnList := UserBusinessAnalysisAllList{} | |||
| @@ -370,7 +361,7 @@ func RefreshUserStaticAllTabel(wikiCountMap map[string]int, CommitCodeSizeMap ma | |||
| CodeMergeCountMap := queryPullRequest(start_unix, end_unix) | |||
| CommitCountMap := queryCommitAction(start_unix, end_unix, 5) | |||
| IssueCountMap := queryAction(start_unix, end_unix, 6) | |||
| IssueCountMap := queryCreateIssue(start_unix, end_unix) | |||
| CommentCountMap := queryComment(start_unix, end_unix) | |||
| FocusRepoCountMap := queryWatch(start_unix, end_unix) | |||
| @@ -395,7 +386,7 @@ func RefreshUserStaticAllTabel(wikiCountMap map[string]int, CommitCodeSizeMap ma | |||
| var indexTotal int64 | |||
| indexTotal = 0 | |||
| for { | |||
| sess.Select("`user`.*").Table("user").Where(cond).Limit(Page_SIZE, int(indexTotal)) | |||
| sess.Select("`user`.*").Table("user").Where(cond).OrderBy("id asc").Limit(Page_SIZE, int(indexTotal)) | |||
| userList := make([]*User, 0) | |||
| sess.Find(&userList) | |||
| for i, userRecord := range userList { | |||
| @@ -528,7 +519,7 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, | |||
| DataDate := startTime.Format("2006-01-02") | |||
| CodeMergeCountMap := queryPullRequest(start_unix, end_unix) | |||
| CommitCountMap := queryCommitAction(start_unix, end_unix, 5) | |||
| IssueCountMap := queryAction(start_unix, end_unix, 6) | |||
| IssueCountMap := queryCreateIssue(start_unix, end_unix) | |||
| CommentCountMap := queryComment(start_unix, end_unix) | |||
| FocusRepoCountMap := queryWatch(start_unix, end_unix) | |||
| @@ -559,7 +550,7 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, | |||
| var indexTotal int64 | |||
| indexTotal = 0 | |||
| for { | |||
| sess.Select("`user`.*").Table("user").Where(cond).Limit(Page_SIZE, int(indexTotal)) | |||
| sess.Select("`user`.*").Table("user").Where(cond).OrderBy("id asc").Limit(Page_SIZE, int(indexTotal)) | |||
| userList := make([]*User, 0) | |||
| sess.Find(&userList) | |||
| @@ -709,7 +700,7 @@ func querySolveIssue(start_unix int64, end_unix int64) map[int64]int { | |||
| issueAssigneesList := make([]*IssueAssignees, 0) | |||
| sess.Select("issue_assignees.*").Table("issue_assignees"). | |||
| Join("inner", "issue", "issue.id=issue_assignees.issue_id"). | |||
| Where(cond).Limit(Page_SIZE, int(indexTotal)) | |||
| Where(cond).OrderBy("issue_assignees.id asc").Limit(Page_SIZE, int(indexTotal)) | |||
| sess.Find(&issueAssigneesList) | |||
| @@ -744,7 +735,7 @@ func queryPullRequest(start_unix int64, end_unix int64) map[int64]int { | |||
| indexTotal = 0 | |||
| for { | |||
| issueList := make([]*Issue, 0) | |||
| sess.Select("issue.*").Table("issue").Join("inner", "pull_request", "issue.id=pull_request.issue_id").Where(cond).Limit(Page_SIZE, int(indexTotal)) | |||
| sess.Select("issue.*").Table("issue").Join("inner", "pull_request", "issue.id=pull_request.issue_id").Where(cond).OrderBy("issue.id asc").Limit(Page_SIZE, int(indexTotal)) | |||
| sess.Find(&issueList) | |||
| log.Info("query issue(PR) size=" + fmt.Sprint(len(issueList))) | |||
| for _, issueRecord := range issueList { | |||
| @@ -777,7 +768,7 @@ func queryCommitAction(start_unix int64, end_unix int64, actionType int64) map[i | |||
| var indexTotal int64 | |||
| indexTotal = 0 | |||
| for { | |||
| sess.Select("id,user_id,op_type,act_user_id").Table("action").Where(cond).Limit(Page_SIZE, int(indexTotal)) | |||
| sess.Select("id,user_id,op_type,act_user_id").Table("action").Where(cond).OrderBy("id asc").Limit(Page_SIZE, int(indexTotal)) | |||
| actionList := make([]*Action, 0) | |||
| sess.Find(&actionList) | |||
| @@ -799,29 +790,30 @@ func queryCommitAction(start_unix int64, end_unix int64, actionType int64) map[i | |||
| return resultMap | |||
| } | |||
| func queryAction(start_unix int64, end_unix int64, actionType int64) map[int64]int { | |||
| func queryCreateIssue(start_unix int64, end_unix int64) map[int64]int { | |||
| sess := x.NewSession() | |||
| defer sess.Close() | |||
| resultMap := make(map[int64]int) | |||
| cond := "op_type=" + fmt.Sprint(actionType) + " and created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) | |||
| cond := "is_pull=false and created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) | |||
| count, err := sess.Where(cond).Count(new(Action)) | |||
| count, err := sess.Where(cond).Count(new(Issue)) | |||
| if err != nil { | |||
| log.Info("query Action error. return.") | |||
| log.Info("query Issue error. return.") | |||
| return resultMap | |||
| } | |||
| var indexTotal int64 | |||
| indexTotal = 0 | |||
| for { | |||
| sess.Select("id,user_id,op_type,act_user_id").Table("action").Where(cond).Limit(Page_SIZE, int(indexTotal)) | |||
| actionList := make([]*Action, 0) | |||
| sess.Find(&actionList) | |||
| log.Info("query action size=" + fmt.Sprint(len(actionList))) | |||
| for _, actionRecord := range actionList { | |||
| if _, ok := resultMap[actionRecord.UserID]; !ok { | |||
| resultMap[actionRecord.UserID] = 1 | |||
| sess.Select("id,poster_id").Table("issue").Where(cond).OrderBy("id asc").Limit(Page_SIZE, int(indexTotal)) | |||
| issueList := make([]*Issue, 0) | |||
| sess.Find(&issueList) | |||
| log.Info("query issue size=" + fmt.Sprint(len(issueList))) | |||
| for _, issueRecord := range issueList { | |||
| if _, ok := resultMap[issueRecord.PosterID]; !ok { | |||
| resultMap[issueRecord.PosterID] = 1 | |||
| } else { | |||
| resultMap[actionRecord.UserID] += 1 | |||
| resultMap[issueRecord.PosterID] += 1 | |||
| } | |||
| } | |||
| indexTotal += Page_SIZE | |||
| @@ -830,6 +822,7 @@ func queryAction(start_unix int64, end_unix int64, actionType int64) map[int64]i | |||
| } | |||
| } | |||
| return resultMap | |||
| } | |||
| func queryComment(start_unix int64, end_unix int64) map[int64]int { | |||
| @@ -846,7 +839,7 @@ func queryComment(start_unix int64, end_unix int64) map[int64]int { | |||
| var indexTotal int64 | |||
| indexTotal = 0 | |||
| for { | |||
| sess.Select("id,type,poster_id").Table("comment").Where(cond).Limit(Page_SIZE, int(indexTotal)) | |||
| sess.Select("id,type,poster_id").Table("comment").Where(cond).OrderBy("id asc").Limit(Page_SIZE, int(indexTotal)) | |||
| commentList := make([]*Comment, 0) | |||
| sess.Find(&commentList) | |||
| log.Info("query Comment size=" + fmt.Sprint(len(commentList))) | |||
| @@ -882,7 +875,7 @@ func queryWatch(start_unix int64, end_unix int64) map[int64]int { | |||
| indexTotal = 0 | |||
| for { | |||
| watchList := make([]*Watch, 0) | |||
| sess.Select("id,user_id,repo_id").Table("watch").Where(cond).Limit(Page_SIZE, int(indexTotal)) | |||
| sess.Select("id,user_id,repo_id").Table("watch").Where(cond).OrderBy("id asc").Limit(Page_SIZE, int(indexTotal)) | |||
| sess.Find(&watchList) | |||
| log.Info("query Watch size=" + fmt.Sprint(len(watchList))) | |||
| @@ -920,7 +913,7 @@ func queryStar(start_unix int64, end_unix int64) map[int64]int { | |||
| var indexTotal int64 | |||
| indexTotal = 0 | |||
| for { | |||
| sess.Select("id,uid,repo_id").Table("star").Where(cond).Limit(Page_SIZE, int(indexTotal)) | |||
| sess.Select("id,uid,repo_id").Table("star").Where(cond).OrderBy("id asc").Limit(Page_SIZE, int(indexTotal)) | |||
| starList := make([]*Star, 0) | |||
| sess.Find(&starList) | |||
| @@ -956,7 +949,7 @@ func queryFollow(start_unix int64, end_unix int64) map[int64]int { | |||
| var indexTotal int64 | |||
| indexTotal = 0 | |||
| for { | |||
| sess.Select("id,user_id,follow_id").Table("follow").Where(cond).Limit(Page_SIZE, int(indexTotal)) | |||
| sess.Select("id,user_id,follow_id").Table("follow").Where(cond).OrderBy("id asc").Limit(Page_SIZE, int(indexTotal)) | |||
| followList := make([]*Follow, 0) | |||
| sess.Find(&followList) | |||
| @@ -992,7 +985,7 @@ func queryDatasetSize(start_unix int64, end_unix int64) map[int64]int { | |||
| var indexTotal int64 | |||
| indexTotal = 0 | |||
| for { | |||
| sess.Select("id,uploader_id,size").Table("attachment").Where(cond).Limit(Page_SIZE, int(indexTotal)) | |||
| sess.Select("id,uploader_id,size").Table("attachment").Where(cond).OrderBy("id asc").Limit(Page_SIZE, int(indexTotal)) | |||
| attachmentList := make([]*Attachment, 0) | |||
| sess.Find(&attachmentList) | |||
| @@ -1028,7 +1021,7 @@ func queryUserCreateRepo(start_unix int64, end_unix int64) map[int64]int { | |||
| var indexTotal int64 | |||
| indexTotal = 0 | |||
| for { | |||
| sess.Select("id,owner_id,name").Table("repository").Where(cond).Limit(Page_SIZE, int(indexTotal)) | |||
| sess.Select("id,owner_id,name").Table("repository").Where(cond).OrderBy("id asc").Limit(Page_SIZE, int(indexTotal)) | |||
| repoList := make([]*Repository, 0) | |||
| sess.Find(&repoList) | |||
| log.Info("query Repository size=" + fmt.Sprint(len(repoList))) | |||
| @@ -1099,8 +1092,7 @@ func queryUserRepoOpenIIndex(start_unix int64, end_unix int64) map[int64]float64 | |||
| } | |||
| } | |||
| userMapJson, _ := json.Marshal(userMap) | |||
| log.Info("userMapJson=" + string(userMapJson)) | |||
| log.Info("user openi index size=" + fmt.Sprint(len(userMap))) | |||
| return userMap | |||
| } | |||
| @@ -1119,7 +1111,7 @@ func queryLoginCount(start_unix int64, end_unix int64) map[int64]int { | |||
| var indexTotal int64 | |||
| indexTotal = 0 | |||
| for { | |||
| statictisSess.Select("id,u_id").Table("user_login_log").Where(cond).Limit(Page_SIZE, int(indexTotal)) | |||
| statictisSess.Select("id,u_id").Table("user_login_log").Where(cond).OrderBy("id asc").Limit(Page_SIZE, int(indexTotal)) | |||
| userLoginLogList := make([]*UserLoginLog, 0) | |||
| statictisSess.Find(&userLoginLogList) | |||
| log.Info("query user login size=" + fmt.Sprint(len(userLoginLogList))) | |||
| @@ -1135,7 +1127,7 @@ func queryLoginCount(start_unix int64, end_unix int64) map[int64]int { | |||
| break | |||
| } | |||
| } | |||
| log.Info("user login size=" + fmt.Sprint(len(resultMap))) | |||
| return resultMap | |||
| } | |||
| @@ -70,3 +70,7 @@ type CreateTeamForm struct { | |||
| func (f *CreateTeamForm) Validate(ctx *macaron.Context, errs binding.Errors) binding.Errors { | |||
| return validate(errs, ctx.Data, f, ctx.Locale) | |||
| } | |||
| type SubmitReposOfTagForm struct { | |||
| RepoList []int64 | |||
| } | |||
| @@ -1,6 +1,8 @@ | |||
| package cloudbrain | |||
| import ( | |||
| "code.gitea.io/gitea/modules/storage" | |||
| "encoding/json" | |||
| "errors" | |||
| "strconv" | |||
| @@ -107,6 +109,9 @@ func GenerateTask(ctx *context.Context, jobName, image, command, uuid, codePath, | |||
| uuid | |||
| var resourceSpec *models.ResourceSpec | |||
| if ResourceSpecs == nil { | |||
| json.Unmarshal([]byte(setting.ResourceSpecs), &ResourceSpecs) | |||
| } | |||
| for _, spec := range ResourceSpecs.ResourceSpec { | |||
| if resourceSpecId == spec.Id { | |||
| resourceSpec = spec | |||
| @@ -185,28 +190,143 @@ func GenerateTask(ctx *context.Context, jobName, image, command, uuid, codePath, | |||
| }, | |||
| }) | |||
| if err != nil { | |||
| log.Error("CreateJob failed:", err.Error()) | |||
| log.Error("CreateJob failed:", err.Error(), ctx.Data["MsgID"]) | |||
| return err | |||
| } | |||
| if jobResult.Code != Success { | |||
| log.Error("CreateJob(%s) failed:%s", jobName, jobResult.Msg) | |||
| log.Error("CreateJob(%s) failed:%s", jobName, jobResult.Msg, ctx.Data["MsgID"]) | |||
| return errors.New(jobResult.Msg) | |||
| } | |||
| var jobID = jobResult.Payload["jobId"].(string) | |||
| err = models.CreateCloudbrain(&models.Cloudbrain{ | |||
| Status: string(models.JobWaiting), | |||
| UserID: ctx.User.ID, | |||
| RepoID: ctx.Repo.Repository.ID, | |||
| JobID: jobID, | |||
| JobName: jobName, | |||
| SubTaskName: SubTaskName, | |||
| JobType: jobType, | |||
| Type: models.TypeCloudBrainOne, | |||
| Uuid: uuid, | |||
| Status: string(models.JobWaiting), | |||
| UserID: ctx.User.ID, | |||
| RepoID: ctx.Repo.Repository.ID, | |||
| JobID: jobID, | |||
| JobName: jobName, | |||
| SubTaskName: SubTaskName, | |||
| JobType: jobType, | |||
| Type: models.TypeCloudBrainOne, | |||
| Uuid: uuid, | |||
| Image: image, | |||
| GpuQueue: gpuQueue, | |||
| ResourceSpecId: resourceSpecId, | |||
| ComputeResource: models.GPUResource, | |||
| }) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| func RestartTask(ctx *context.Context, task *models.Cloudbrain) error { | |||
| dataActualPath := setting.Attachment.Minio.RealPath + | |||
| setting.Attachment.Minio.Bucket + "/" + | |||
| setting.Attachment.Minio.BasePath + | |||
| models.AttachmentRelativePath(task.Uuid) + | |||
| task.Uuid | |||
| jobName := task.JobName | |||
| var resourceSpec *models.ResourceSpec | |||
| if ResourceSpecs == nil { | |||
| json.Unmarshal([]byte(setting.ResourceSpecs), &ResourceSpecs) | |||
| } | |||
| for _, spec := range ResourceSpecs.ResourceSpec { | |||
| if task.ResourceSpecId == spec.Id { | |||
| resourceSpec = spec | |||
| } | |||
| } | |||
| if resourceSpec == nil { | |||
| log.Error("no such resourceSpecId(%d)", task.ResourceSpecId, ctx.Data["MsgID"]) | |||
| return errors.New("no such resourceSpec") | |||
| } | |||
| jobResult, err := CreateJob(jobName, models.CreateJobParams{ | |||
| JobName: jobName, | |||
| RetryCount: 1, | |||
| GpuType: task.GpuQueue, | |||
| Image: task.Image, | |||
| TaskRoles: []models.TaskRole{ | |||
| { | |||
| Name: SubTaskName, | |||
| TaskNumber: 1, | |||
| MinSucceededTaskCount: 1, | |||
| MinFailedTaskCount: 1, | |||
| CPUNumber: resourceSpec.CpuNum, | |||
| GPUNumber: resourceSpec.GpuNum, | |||
| MemoryMB: resourceSpec.MemMiB, | |||
| ShmMB: resourceSpec.ShareMemMiB, | |||
| Command: Command, | |||
| NeedIBDevice: false, | |||
| IsMainRole: false, | |||
| UseNNI: false, | |||
| }, | |||
| }, | |||
| Volumes: []models.Volume{ | |||
| { | |||
| HostPath: models.StHostPath{ | |||
| Path: storage.GetMinioPath(jobName, CodeMountPath + "/"), | |||
| MountPath: CodeMountPath, | |||
| ReadOnly: false, | |||
| }, | |||
| }, | |||
| { | |||
| HostPath: models.StHostPath{ | |||
| Path: dataActualPath, | |||
| MountPath: DataSetMountPath, | |||
| ReadOnly: true, | |||
| }, | |||
| }, | |||
| { | |||
| HostPath: models.StHostPath{ | |||
| Path: storage.GetMinioPath(jobName, ModelMountPath + "/"), | |||
| MountPath: ModelMountPath, | |||
| ReadOnly: false, | |||
| }, | |||
| }, | |||
| { | |||
| HostPath: models.StHostPath{ | |||
| Path: storage.GetMinioPath(jobName, BenchMarkMountPath + "/"), | |||
| MountPath: BenchMarkMountPath, | |||
| ReadOnly: true, | |||
| }, | |||
| }, | |||
| { | |||
| HostPath: models.StHostPath{ | |||
| Path: storage.GetMinioPath(jobName, Snn4imagenetMountPath + "/"), | |||
| MountPath: Snn4imagenetMountPath, | |||
| ReadOnly: true, | |||
| }, | |||
| }, | |||
| { | |||
| HostPath: models.StHostPath{ | |||
| Path: storage.GetMinioPath(jobName, BrainScoreMountPath + "/"), | |||
| MountPath: BrainScoreMountPath, | |||
| ReadOnly: true, | |||
| }, | |||
| }, | |||
| }, | |||
| }) | |||
| if err != nil { | |||
| log.Error("CreateJob failed:", err.Error(), ctx.Data["MsgID"]) | |||
| return err | |||
| } | |||
| if jobResult.Code != Success { | |||
| log.Error("CreateJob(%s) failed:%s", jobName, jobResult.Msg, ctx.Data["MsgID"]) | |||
| return errors.New(jobResult.Msg) | |||
| } | |||
| var jobID = jobResult.Payload["jobId"].(string) | |||
| task.JobID = jobID | |||
| task.Status = string(models.JobWaiting) | |||
| err = models.UpdateJob(task) | |||
| if err != nil { | |||
| log.Error("UpdateJob(%s) failed:%v", jobName, err.Error(), ctx.Data["MsgID"]) | |||
| return err | |||
| } | |||
| @@ -445,8 +445,12 @@ type Contributor struct { | |||
| Email string | |||
| } | |||
| func GetContributors(repoPath string) ([]Contributor, error){ | |||
| cmd := NewCommand("shortlog", "-sne", "--all") | |||
| func GetContributors(repoPath string, branchOrTag ...string) ([]Contributor, error) { | |||
| targetBranchOrTag := "HEAD" | |||
| if len(branchOrTag) > 0 && branchOrTag[0] != "" { | |||
| targetBranchOrTag = branchOrTag[0] | |||
| } | |||
| cmd := NewCommand("shortlog", "-sne", targetBranchOrTag) | |||
| stdout, err := cmd.RunInDir(repoPath) | |||
| if err != nil { | |||
| return nil, err | |||
| @@ -462,9 +466,9 @@ func GetContributors(repoPath string) ([]Contributor, error){ | |||
| } | |||
| number := oneCount[0:strings.Index(oneCount, "\t")] | |||
| commitCnt, _ := strconv.Atoi(number) | |||
| committer := oneCount[strings.Index(oneCount, "\t")+1:strings.LastIndex(oneCount, " ")] | |||
| committer := oneCount[strings.Index(oneCount, "\t")+1 : strings.LastIndex(oneCount, " ")] | |||
| committer = strings.Trim(committer, " ") | |||
| email := oneCount[strings.Index(oneCount, "<")+1:strings.Index(oneCount, ">")] | |||
| email := oneCount[strings.Index(oneCount, "<")+1 : strings.Index(oneCount, ">")] | |||
| contributorsInfo[i] = Contributor{ | |||
| commitCnt, committer, email, | |||
| } | |||
| @@ -48,12 +48,8 @@ const ( | |||
| PerPage = 10 | |||
| IsLatestVersion = "1" | |||
| NotLatestVersion = "0" | |||
| // ComputeResource = "NPU" | |||
| NPUResource = "NPU" | |||
| GPUResource = "CPU/GPU" | |||
| AllResource = "all" | |||
| DebugType = -1 | |||
| VersionCount = 1 | |||
| DebugType = -1 | |||
| VersionCount = 1 | |||
| SortByCreateTime = "create_time" | |||
| ConfigTypeCustom = "custom" | |||
| @@ -215,14 +211,15 @@ func GenerateTask(ctx *context.Context, jobName, uuid, description, flavor strin | |||
| } | |||
| err = models.CreateCloudbrain(&models.Cloudbrain{ | |||
| Status: string(models.JobWaiting), | |||
| UserID: ctx.User.ID, | |||
| RepoID: ctx.Repo.Repository.ID, | |||
| JobID: jobResult.ID, | |||
| JobName: jobName, | |||
| JobType: string(models.JobTypeDebug), | |||
| Type: models.TypeCloudBrainTwo, | |||
| Uuid: uuid, | |||
| Status: string(models.JobWaiting), | |||
| UserID: ctx.User.ID, | |||
| RepoID: ctx.Repo.Repository.ID, | |||
| JobID: jobResult.ID, | |||
| JobName: jobName, | |||
| JobType: string(models.JobTypeDebug), | |||
| Type: models.TypeCloudBrainTwo, | |||
| Uuid: uuid, | |||
| ComputeResource: models.NPUResource, | |||
| }) | |||
| if err != nil { | |||
| @@ -277,7 +274,7 @@ func GenerateTrainJob(ctx *context.Context, req *GenerateTrainJobReq) (err error | |||
| DatasetName: attach.Name, | |||
| CommitID: req.CommitID, | |||
| IsLatestVersion: req.IsLatestVersion, | |||
| ComputeResource: NPUResource, | |||
| ComputeResource: models.NPUResource, | |||
| EngineID: req.EngineID, | |||
| TrainUrl: req.TrainUrl, | |||
| BranchName: req.BranchName, | |||
| @@ -360,7 +357,7 @@ func GenerateTrainJobVersion(ctx *context.Context, req *GenerateTrainJobReq, job | |||
| CommitID: req.CommitID, | |||
| IsLatestVersion: req.IsLatestVersion, | |||
| PreVersionName: req.PreVersionName, | |||
| ComputeResource: NPUResource, | |||
| ComputeResource: models.GPUResource, | |||
| EngineID: req.EngineID, | |||
| TrainUrl: req.TrainUrl, | |||
| BranchName: req.BranchName, | |||
| @@ -174,7 +174,7 @@ sendjob: | |||
| return &result, nil | |||
| } | |||
| func StopJob(jobID string, param models.NotebookAction) (*models.NotebookActionResult, error) { | |||
| func ManageNotebook(jobID string, param models.NotebookAction) (*models.NotebookActionResult, error) { | |||
| checkSetting() | |||
| client := getRestyClient() | |||
| var result models.NotebookActionResult | |||
| @@ -207,8 +207,8 @@ sendjob: | |||
| } | |||
| if len(response.ErrorCode) != 0 { | |||
| log.Error("StopJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) | |||
| return &result, fmt.Errorf("StopJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) | |||
| log.Error("ManageNotebook failed(%s): %s", response.ErrorCode, response.ErrorMsg) | |||
| return &result, fmt.Errorf("ManageNotebook failed(%s): %s", response.ErrorCode, response.ErrorMsg) | |||
| } | |||
| return &result, nil | |||
| @@ -21,7 +21,8 @@ type Fields struct { | |||
| Format string `json:"format"` | |||
| } | |||
| type MatchPhrase struct { | |||
| Message string `json:"message"` | |||
| Message string `json:"message,omitempty"` | |||
| TagName string `json:"tagName.keyword,omitempty"` | |||
| } | |||
| type Should struct { | |||
| MatchPhrase MatchPhrase `json:"match_phrase"` | |||
| @@ -144,7 +145,7 @@ func ProjectViewInit(User string, Project string, Gte string, Lte string) (proje | |||
| inputStruct.Batch[0].Request.Params.Body.Fields = make([]Fields, 1) | |||
| inputStruct.Batch[0].Request.Params.Body.Fields[0].Field = setting.TimeField | |||
| inputStruct.Batch[0].Request.Params.Body.Fields[0].Format = setting.ElkTimeFormat | |||
| inputStruct.Batch[0].Request.Params.Body.Query.BoolIn.Filter = make([]Filter, 3) | |||
| inputStruct.Batch[0].Request.Params.Body.Query.BoolIn.Filter = make([]Filter, 4) | |||
| //限定查询时间 | |||
| var timeRange Range | |||
| timeRange.Timestamptest.Gte = Gte | |||
| @@ -159,6 +160,24 @@ func ProjectViewInit(User string, Project string, Gte string, Lte string) (proje | |||
| var projectName FilterMatchPhrase | |||
| projectName.ProjectName = Project | |||
| inputStruct.Batch[0].Request.Params.Body.Query.BoolIn.Filter[2].FilterMatchPhrase = &projectName | |||
| //限定页面 | |||
| var bool Bool | |||
| bool.Should = make([]Should, 14) | |||
| bool.Should[0].MatchPhrase.TagName = "%{[request][3]}" | |||
| bool.Should[1].MatchPhrase.TagName = "datasets?type=0" | |||
| bool.Should[2].MatchPhrase.TagName = "datasets?type=1" | |||
| bool.Should[3].MatchPhrase.TagName = "issues" | |||
| bool.Should[4].MatchPhrase.TagName = "labels" | |||
| bool.Should[5].MatchPhrase.TagName = "pulls" | |||
| bool.Should[6].MatchPhrase.TagName = "wiki" | |||
| bool.Should[7].MatchPhrase.TagName = "activity" | |||
| bool.Should[8].MatchPhrase.TagName = "cloudbrain" | |||
| bool.Should[9].MatchPhrase.TagName = "modelarts" | |||
| bool.Should[10].MatchPhrase.TagName = "blockchain" | |||
| bool.Should[11].MatchPhrase.TagName = "watchers" | |||
| bool.Should[12].MatchPhrase.TagName = "stars" | |||
| bool.Should[13].MatchPhrase.TagName = "forks" | |||
| inputStruct.Batch[0].Request.Params.Body.Query.BoolIn.Filter[3].Bool = &bool | |||
| return inputStruct | |||
| } | |||
| @@ -80,3 +80,7 @@ func (l *LocalStorage) HasObject(path string) (bool, error) { | |||
| func (l *LocalStorage) UploadObject(fileName, filePath string) error { | |||
| return nil | |||
| } | |||
| func (l *LocalStorage) DeleteDir(dir string) error { | |||
| return nil | |||
| } | |||
| @@ -11,6 +11,9 @@ import ( | |||
| "strings" | |||
| "time" | |||
| "code.gitea.io/gitea/modules/log" | |||
| "code.gitea.io/gitea/modules/setting" | |||
| "github.com/minio/minio-go" | |||
| ) | |||
| @@ -76,6 +79,29 @@ func (m *MinioStorage) Delete(path string) error { | |||
| return m.client.RemoveObject(m.bucket, m.buildMinioPath(path)) | |||
| } | |||
| // Delete delete a file | |||
| func (m *MinioStorage) DeleteDir(dir string) error { | |||
| objectsCh := make(chan string) | |||
| // Send object names that are needed to be removed to objectsCh | |||
| go func() { | |||
| defer close(objectsCh) | |||
| // List all objects from a bucket-name with a matching prefix. | |||
| for object := range m.client.ListObjects(m.bucket, dir, true, nil) { | |||
| if object.Err != nil { | |||
| log.Error("ListObjects failed:%v", object.Err) | |||
| } | |||
| objectsCh <- object.Key | |||
| } | |||
| }() | |||
| for rErr := range m.client.RemoveObjects(m.bucket, objectsCh) { | |||
| log.Error("Error detected during deletion: ", rErr) | |||
| } | |||
| return nil | |||
| } | |||
| //Get Presigned URL for get object | |||
| func (m *MinioStorage) PresignedGetURL(path string, fileName string) (string, error) { | |||
| // Set request parameters for content-disposition. | |||
| @@ -128,3 +154,7 @@ func (m *MinioStorage) UploadObject(fileName, filePath string) error { | |||
| _, err := m.client.FPutObject(m.bucket, fileName, filePath, minio.PutObjectOptions{}) | |||
| return err | |||
| } | |||
| func GetMinioPath(jobName, suffixPath string) string { | |||
| return setting.Attachment.Minio.RealPath + setting.Attachment.Minio.Bucket + "/" + setting.CBCodePathPrefix + jobName + suffixPath | |||
| } | |||
| @@ -23,6 +23,7 @@ type ObjectStorage interface { | |||
| Save(path string, r io.Reader) (int64, error) | |||
| Open(path string) (io.ReadCloser, error) | |||
| Delete(path string) error | |||
| DeleteDir(dir string) error | |||
| PresignedGetURL(path string, fileName string) (string, error) | |||
| PresignedPutURL(path string) (string, error) | |||
| HasObject(path string) (bool, error) | |||
| @@ -222,6 +222,7 @@ contributors = Contributors | |||
| [explore] | |||
| repos = Repositories | |||
| select_repos = Select the project | |||
| users = Users | |||
| organizations = Organizations | |||
| images = CloudImages | |||
| @@ -234,6 +235,8 @@ org_no_results = No matching organizations found. | |||
| code_no_results = No source code matching your search term found. | |||
| code_search_results = Search results for '%s' | |||
| code_last_indexed_at = Last indexed %s | |||
| save=save | |||
| cancel=cancel | |||
| [auth] | |||
| create_new_account = Register Account | |||
| @@ -778,6 +781,10 @@ datasets = Datasets | |||
| datasets.desc = Enable Dataset | |||
| cloudbrain_helper=Use GPU/NPU resources to open notebooks, model training tasks, etc. | |||
| model_manager = Model | |||
| model_noright=No right | |||
| model_rename=Duplicate model name, please modify model name. | |||
| debug=Debug | |||
| stop=Stop | |||
| delete=Delete | |||
| @@ -896,6 +903,8 @@ model.manage.F1 = F1 | |||
| model.manage.Precision = Precision | |||
| model.manage.Recall = Recall | |||
| model.manage.sava_model = Sava Model | |||
| model.manage.model_manage = ModelManage | |||
| model.manage.model_accuracy = Model Accuracy | |||
| template.items = Template Items | |||
| template.git_content = Git Content (Default Branch) | |||
| @@ -1127,6 +1136,7 @@ issues.filter_label_exclude = `Use <code>alt</code> + <code>click/enter</code> t | |||
| issues.filter_label_no_select = All labels | |||
| issues.filter_milestone = Milestone | |||
| issues.filter_milestone_no_select = All milestones | |||
| issues.filter_milestone_no_add = Not add milestones | |||
| issues.filter_assignee = Assignee | |||
| issues.filter_assginee_no_select = All assignees | |||
| issues.filter_type = Type | |||
| @@ -224,6 +224,7 @@ contributors=贡献者 | |||
| [explore] | |||
| repos=项目 | |||
| select_repos=精选项目 | |||
| users=用户 | |||
| organizations=组织 | |||
| images = 云脑镜像 | |||
| @@ -238,6 +239,8 @@ org_no_results=未找到匹配的组织。 | |||
| code_no_results=未找到与搜索字词匹配的源代码。 | |||
| code_search_results=“%s” 的搜索结果是 | |||
| code_last_indexed_at=最后索引于 %s | |||
| save=保存 | |||
| cancel=取消 | |||
| [auth] | |||
| create_new_account=注册帐号 | |||
| @@ -784,8 +787,10 @@ cloudbrain_helper=使用GPU/NPU资源,开启Notebook、模型训练任务等 | |||
| model_manager = 模型 | |||
| model_noright=无权限操作 | |||
| model_rename=模型名称重复,请修改模型名称 | |||
| debug=调试 | |||
| debug_again=再次调试 | |||
| stop=停止 | |||
| delete=删除 | |||
| model_download=模型下载 | |||
| @@ -908,6 +913,8 @@ model.manage.F1 = F1值 | |||
| model.manage.Precision = 精确率 | |||
| model.manage.Recall = 召回率 | |||
| model.manage.sava_model = 保存模型 | |||
| model.manage.model_manage = 模型管理 | |||
| model.manage.model_accuracy = 模型精度 | |||
| template.items=模板选项 | |||
| template.git_content=Git数据(默认分支) | |||
| @@ -1140,6 +1147,7 @@ issues.filter_label_exclude=`使用 <code>alt</code> + <code>鼠标左键 / 回 | |||
| issues.filter_label_no_select=所有标签 | |||
| issues.filter_milestone=里程碑筛选 | |||
| issues.filter_milestone_no_select=所有里程碑 | |||
| issues.filter_milestone_no_add=未添加里程碑 | |||
| issues.filter_assignee=指派人筛选 | |||
| issues.filter_assginee_no_select=所有指派成员 | |||
| issues.filter_type=类型筛选 | |||
| @@ -11655,6 +11655,11 @@ | |||
| "autolinker": "~0.28.0" | |||
| } | |||
| }, | |||
| "remixicon": { | |||
| "version": "2.5.0", | |||
| "resolved": "https://registry.npmjs.org/remixicon/-/remixicon-2.5.0.tgz", | |||
| "integrity": "sha512-q54ra2QutYDZpuSnFjmeagmEiN9IMo56/zz5dDNitzKD23oFRw77cWo4TsrAdmdkPiEn8mxlrTqxnkujDbEGww==" | |||
| }, | |||
| "remove-bom-buffer": { | |||
| "version": "3.0.0", | |||
| "resolved": "https://registry.npmjs.org/remove-bom-buffer/-/remove-bom-buffer-3.0.0.tgz", | |||
| @@ -42,6 +42,7 @@ | |||
| "postcss-preset-env": "6.7.0", | |||
| "postcss-safe-parser": "4.0.2", | |||
| "qs": "6.9.4", | |||
| "remixicon": "2.5.0", | |||
| "spark-md5": "3.0.1", | |||
| "svg-sprite-loader": "5.0.0", | |||
| "svgo": "1.3.2", | |||
| @@ -0,0 +1,53 @@ | |||
| package routers | |||
| import ( | |||
| "code.gitea.io/gitea/models" | |||
| "code.gitea.io/gitea/modules/context" | |||
| "code.gitea.io/gitea/modules/log" | |||
| "code.gitea.io/gitea/services/socketwrap" | |||
| "github.com/gorilla/websocket" | |||
| ) | |||
| var upgrader = websocket.Upgrader{ | |||
| ReadBufferSize: 1024, | |||
| WriteBufferSize: 1024, | |||
| } | |||
| var SocketManager = socketwrap.NewClientsManager() | |||
| func ActionNotification(ctx *context.Context) { | |||
| conn, err := upgrader.Upgrade(ctx.Resp, ctx.Req.Request, nil) | |||
| if err != nil { | |||
| log.Warn("can not create connection.", err) | |||
| return | |||
| } | |||
| client := &socketwrap.Client{Manager: SocketManager, Conn: conn, Send: make(chan *models.Action, 256)} | |||
| WriteLastTenActionsIfHave(conn) | |||
| client.Manager.Register <- client | |||
| go client.WritePump() | |||
| } | |||
| func WriteLastTenActionsIfHave(conn *websocket.Conn) { | |||
| socketwrap.LastTenActionsQueue.Mutex.RLock() | |||
| { | |||
| size := socketwrap.LastTenActionsQueue.Queue.Len() | |||
| if size > 0 { | |||
| tempE := socketwrap.LastTenActionsQueue.Queue.Front() | |||
| conn.WriteJSON(tempE) | |||
| for i := 1; i < size; i++ { | |||
| tempE = tempE.Next() | |||
| conn.WriteJSON(tempE) | |||
| } | |||
| } | |||
| } | |||
| socketwrap.LastTenActionsQueue.Mutex.RUnlock() | |||
| } | |||
| @@ -15,6 +15,7 @@ import ( | |||
| "code.gitea.io/gitea/modules/log" | |||
| "code.gitea.io/gitea/modules/modelarts" | |||
| "code.gitea.io/gitea/modules/storage" | |||
| routerRepo "code.gitea.io/gitea/routers/repo" | |||
| ) | |||
| func GetModelArtsNotebook(ctx *context.APIContext) { | |||
| @@ -237,7 +238,7 @@ func DelTrainJobVersion(ctx *context.APIContext) { | |||
| JobID: jobID, | |||
| }) | |||
| if err != nil { | |||
| ctx.ServerError("get VersionListCount faild", err) | |||
| ctx.ServerError("get VersionListCount failed", err) | |||
| return | |||
| } | |||
| if VersionListCount > 0 { | |||
| @@ -255,6 +256,8 @@ func DelTrainJobVersion(ctx *context.APIContext) { | |||
| return | |||
| } | |||
| } | |||
| } else { //已删除该任务下的所有版本 | |||
| routerRepo.DeleteJobStorage(task.JobName) | |||
| } | |||
| ctx.JSON(http.StatusOK, map[string]interface{}{ | |||
| @@ -130,5 +130,13 @@ func Home(ctx *context.Context) { | |||
| pager.SetDefaultParams(ctx) | |||
| ctx.Data["Page"] = pager | |||
| //find org tag info | |||
| tags, err := models.GetAllOfficialTagRepos(org.ID, ctx.Org.IsOwner) | |||
| if err != nil { | |||
| ctx.ServerError("GetAllOfficialTagRepos", err) | |||
| return | |||
| } | |||
| ctx.Data["tags"] = tags | |||
| ctx.HTML(200, tplOrgHome) | |||
| } | |||
| @@ -0,0 +1,90 @@ | |||
| // Copyright 2014 The Gogs Authors. All rights reserved. | |||
| // Copyright 2020 The Gitea Authors. | |||
| // Use of this source code is governed by a MIT-style | |||
| // license that can be found in the LICENSE file. | |||
| package org | |||
| import ( | |||
| "code.gitea.io/gitea/models" | |||
| "code.gitea.io/gitea/modules/auth" | |||
| "code.gitea.io/gitea/modules/context" | |||
| "errors" | |||
| "strconv" | |||
| ) | |||
| const DefaultOrgTagLimit = -1 | |||
| // SubmitTags submit repos of org tag | |||
| func SubmitTags(ctx *context.Context, form auth.SubmitReposOfTagForm) { | |||
| if !ctx.Org.IsOwner { | |||
| ctx.ServerError("UpdateTagReposByID", errors.New("no access to submit tags")) | |||
| return | |||
| } | |||
| tag := getTagFromContext(ctx) | |||
| if ctx.Written() { | |||
| return | |||
| } | |||
| if tag.Limit != DefaultOrgTagLimit && len(form.RepoList) > tag.Limit { | |||
| ctx.ServerError("UpdateTagReposByID", errors.New("tags size over limit")) | |||
| return | |||
| } | |||
| err := models.UpdateTagReposByID(tag.ID, ctx.Org.Organization.ID, form.RepoList) | |||
| if err != nil { | |||
| ctx.ServerError("UpdateTagReposByID", err) | |||
| return | |||
| } | |||
| ctx.JSON(200, map[string]interface{}{ | |||
| "code": "00", | |||
| "msg": "success", | |||
| }) | |||
| } | |||
| // GetTagRepos get repos under org tag | |||
| func GetTagRepos(ctx *context.Context) { | |||
| if !ctx.Org.IsOwner { | |||
| ctx.ServerError("GetTagRepos", errors.New("no access to get tags")) | |||
| return | |||
| } | |||
| tag := getTagFromContext(ctx) | |||
| if ctx.Written() { | |||
| return | |||
| } | |||
| r, err := models.GetTagRepos(tag.ID, ctx.Org.Organization.ID) | |||
| if err != nil { | |||
| ctx.ServerError("GetTagRepos", err) | |||
| return | |||
| } | |||
| ctx.JSON(200, map[string]interface{}{ | |||
| "code": "00", | |||
| "msg": "success", | |||
| "data": r, | |||
| }) | |||
| } | |||
| // getTagFromContext finds out tag info From context. | |||
| func getTagFromContext(ctx *context.Context) *models.OfficialTag { | |||
| var tag *models.OfficialTag | |||
| var err error | |||
| tagIdStr := ctx.Query("tagId") | |||
| if len(tagIdStr) == 0 { | |||
| ctx.ServerError("GetTagInfo", errors.New("tag is not exist")) | |||
| return nil | |||
| } | |||
| tagId, _ := strconv.ParseInt(tagIdStr, 10, 32) | |||
| tag, err = models.GetTagByID(tagId) | |||
| if err != nil { | |||
| if models.IsErrTagNotExist(err) { | |||
| ctx.NotFound("GetTagInfo", err) | |||
| } else { | |||
| ctx.ServerError("GetTagInfo", err) | |||
| } | |||
| return nil | |||
| } | |||
| return tag | |||
| } | |||
| @@ -105,6 +105,23 @@ func saveModelByParameters(jobId string, versionName string, name string, versio | |||
| return nil | |||
| } | |||
| func SaveNewNameModel(ctx *context.Context) { | |||
| name := ctx.Query("Name") | |||
| if name == "" { | |||
| ctx.Error(500, fmt.Sprintf("name or version is null.")) | |||
| return | |||
| } | |||
| aimodels := models.QueryModelByName(name, ctx.Repo.Repository.ID) | |||
| if len(aimodels) > 0 { | |||
| ctx.Error(500, ctx.Tr("repo.model_rename")) | |||
| return | |||
| } | |||
| SaveModel(ctx) | |||
| log.Info("save model end.") | |||
| } | |||
| func SaveModel(ctx *context.Context) { | |||
| log.Info("save model start.") | |||
| JobId := ctx.Query("JobId") | |||
| @@ -72,7 +72,7 @@ func HandleBlockChainInitNotify(ctx *context.Context) { | |||
| repo.BlockChainStatus = models.RepoBlockChainSuccess | |||
| repo.ContractAddress = req.ContractAddress | |||
| if err = models.UpdateRepositoryCols(repo, "block_chain_status", "contract_address"); err != nil { | |||
| if err = repo.UpdateBlockChain(); err != nil { | |||
| log.Error("UpdateRepositoryCols failed:%v", err.Error(), ctx.Data["msgID"]) | |||
| ctx.JSON(200, map[string]string{ | |||
| "code": "-1", | |||
| @@ -206,7 +206,7 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { | |||
| } | |||
| repo := ctx.Repo.Repository | |||
| downloadCode(repo, codePath) | |||
| uploadCodeToMinio(codePath+"/", jobName, "/code/") | |||
| uploadCodeToMinio(codePath+"/", jobName, cloudbrain.CodeMountPath+"/") | |||
| modelPath := setting.JobPath + jobName + cloudbrain.ModelMountPath + "/" | |||
| mkModelPath(modelPath) | |||
| @@ -236,15 +236,89 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { | |||
| uploadCodeToMinio(brainScorePath+"/", jobName, cloudbrain.BrainScoreMountPath+"/") | |||
| } | |||
| err = cloudbrain.GenerateTask(ctx, jobName, image, command, uuid, codePath, getMinioPath(jobName, cloudbrain.ModelMountPath+"/"), | |||
| getMinioPath(jobName, cloudbrain.BenchMarkMountPath+"/"), getMinioPath(jobName, cloudbrain.Snn4imagenetMountPath+"/"), | |||
| getMinioPath(jobName, cloudbrain.BrainScoreMountPath+"/"), jobType, gpuQueue, resourceSpecId) | |||
| err = cloudbrain.GenerateTask(ctx, jobName, image, command, uuid, storage.GetMinioPath(jobName, cloudbrain.CodeMountPath+"/"), | |||
| storage.GetMinioPath(jobName, cloudbrain.ModelMountPath+"/"), | |||
| storage.GetMinioPath(jobName, cloudbrain.BenchMarkMountPath+"/"), storage.GetMinioPath(jobName, cloudbrain.Snn4imagenetMountPath+"/"), | |||
| storage.GetMinioPath(jobName, cloudbrain.BrainScoreMountPath+"/"), jobType, gpuQueue, resourceSpecId) | |||
| if err != nil { | |||
| cloudBrainNewDataPrepare(ctx) | |||
| ctx.RenderWithErr(err.Error(), tplCloudBrainNew, &form) | |||
| return | |||
| } | |||
| ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/debugjob") | |||
| ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/debugjob?debugListType=all") | |||
| } | |||
| func CloudBrainRestart(ctx *context.Context) { | |||
| var jobID = ctx.Params(":jobid") | |||
| var resultCode = "0" | |||
| var errorMsg = "" | |||
| var status = "" | |||
| for { | |||
| task, err := models.GetCloudbrainByJobID(jobID) | |||
| if err != nil { | |||
| log.Error("GetCloudbrainByJobID(%s) failed:%v", jobID, err.Error(), ctx.Data["MsgID"]) | |||
| resultCode = "-1" | |||
| errorMsg = "system error" | |||
| break | |||
| } | |||
| if task.Status != string(models.JobStopped) && task.Status != string(models.JobSucceeded) && task.Status != string(models.JobFailed) { | |||
| log.Error("the job(%s) is not stopped", task.JobName, ctx.Data["MsgID"]) | |||
| resultCode = "-1" | |||
| errorMsg = "the job is not stopped" | |||
| break | |||
| } | |||
| if task.Image == "" || task.GpuQueue == "" || task.Type != models.TypeCloudBrainOne { | |||
| log.Error("the job(%s) version is too old", task.JobName, ctx.Data["MsgID"]) | |||
| resultCode = "-1" | |||
| errorMsg = "the job's version is too old and can not be restarted" | |||
| break | |||
| } | |||
| if !ctx.IsSigned || (ctx.User.ID != task.UserID && !ctx.IsUserSiteAdmin()){ | |||
| log.Error("the user has no right ro restart the job", task.JobName, ctx.Data["MsgID"]) | |||
| resultCode = "-1" | |||
| errorMsg = "you have no right to restart the job" | |||
| break | |||
| } | |||
| count, err := models.GetCloudbrainCountByUserID(ctx.User.ID) | |||
| if err != nil { | |||
| log.Error("GetCloudbrainCountByUserID failed:%v", err, ctx.Data["MsgID"]) | |||
| resultCode = "-1" | |||
| errorMsg = "system error" | |||
| break | |||
| } else { | |||
| if count >= 1 { | |||
| log.Error("the user already has running or waiting task", ctx.Data["MsgID"]) | |||
| resultCode = "-1" | |||
| errorMsg = "you have already a running or waiting task, can not create more" | |||
| break | |||
| } | |||
| } | |||
| err = cloudbrain.RestartTask(ctx, task) | |||
| if err != nil { | |||
| log.Error("RestartTask failed:%v", err.Error(), ctx.Data["MsgID"]) | |||
| resultCode = "-1" | |||
| errorMsg = "system error" | |||
| break | |||
| } | |||
| status = task.Status | |||
| jobID = task.JobID | |||
| break | |||
| } | |||
| ctx.JSON(200, map[string]string{ | |||
| "result_code": resultCode, | |||
| "error_msg": errorMsg, | |||
| "status": status, | |||
| "job_id": jobID, | |||
| }) | |||
| } | |||
| func CloudBrainShow(ctx *context.Context) { | |||
| @@ -351,32 +425,53 @@ func CloudBrainCommitImage(ctx *context.Context, form auth.CommitImageCloudBrain | |||
| func CloudBrainStop(ctx *context.Context) { | |||
| var jobID = ctx.Params(":jobid") | |||
| task, err := models.GetCloudbrainByJobID(jobID) | |||
| if err != nil { | |||
| ctx.ServerError("GetCloudbrainByJobID failed", err) | |||
| return | |||
| } | |||
| var resultCode = "0" | |||
| var errorMsg = "" | |||
| var status = "" | |||
| if task.Status == string(models.JobStopped) || task.Status == string(models.JobFailed) { | |||
| log.Error("the job(%s) has been stopped", task.JobName, ctx.Data["msgID"]) | |||
| ctx.ServerError("the job has been stopped", errors.New("the job has been stopped")) | |||
| return | |||
| } | |||
| for { | |||
| task, err := models.GetCloudbrainByJobID(jobID) | |||
| if err != nil { | |||
| log.Error("GetCloudbrainByJobID(%s) failed:%v", task.JobName, err, ctx.Data["msgID"]) | |||
| resultCode = "-1" | |||
| errorMsg = "system error" | |||
| break | |||
| } | |||
| err = cloudbrain.StopJob(jobID) | |||
| if err != nil { | |||
| log.Error("StopJob(%s) failed:%v", task.JobName, err.Error(), ctx.Data["msgID"]) | |||
| ctx.ServerError("StopJob failed", err) | |||
| return | |||
| } | |||
| if task.Status == string(models.JobStopped) || task.Status == string(models.JobFailed) { | |||
| log.Error("the job(%s) has been stopped", task.JobName, ctx.Data["msgID"]) | |||
| resultCode = "-1" | |||
| errorMsg = "system error" | |||
| break | |||
| } | |||
| task.Status = string(models.JobStopped) | |||
| err = models.UpdateJob(task) | |||
| if err != nil { | |||
| ctx.ServerError("UpdateJob failed", err) | |||
| return | |||
| err = cloudbrain.StopJob(jobID) | |||
| if err != nil { | |||
| log.Error("StopJob(%s) failed:%v", task.JobName, err, ctx.Data["msgID"]) | |||
| resultCode = "-1" | |||
| errorMsg = "system error" | |||
| break | |||
| } | |||
| task.Status = string(models.JobStopped) | |||
| err = models.UpdateJob(task) | |||
| if err != nil { | |||
| log.Error("UpdateJob(%s) failed:%v", task.JobName, err, ctx.Data["msgID"]) | |||
| resultCode = "-1" | |||
| errorMsg = "system error" | |||
| break | |||
| } | |||
| status = task.Status | |||
| break | |||
| } | |||
| ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/debugjob") | |||
| ctx.JSON(200, map[string]string{ | |||
| "result_code": resultCode, | |||
| "error_msg": errorMsg, | |||
| "status": status, | |||
| "job_id": jobID, | |||
| }) | |||
| } | |||
| func StopJobsByUserID(userID int64) { | |||
| @@ -423,7 +518,7 @@ func StopJobs(cloudBrains []*models.Cloudbrain) { | |||
| Action: models.ActionStop, | |||
| } | |||
| err := retry(3, time.Second*30, func() error { | |||
| _, err := modelarts.StopJob(taskInfo.JobID, param) | |||
| _, err := modelarts.ManageNotebook(taskInfo.JobID, param) | |||
| return err | |||
| }) | |||
| logErrorAndUpdateJobStatus(err, taskInfo) | |||
| @@ -478,7 +573,9 @@ func CloudBrainDel(ctx *context.Context) { | |||
| ctx.ServerError("DeleteJob failed", err) | |||
| return | |||
| } | |||
| ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/debugjob") | |||
| deleteJobStorage(task.JobName, models.TypeCloudBrainOne) | |||
| ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/debugjob?debugListType=all") | |||
| } | |||
| func CloudBrainShowModels(ctx *context.Context) { | |||
| @@ -560,7 +657,7 @@ func getImages(ctx *context.Context, imageType string) { | |||
| func GetModelDirs(jobName string, parentDir string) (string, error) { | |||
| var req string | |||
| modelActualPath := getMinioPath(jobName, cloudbrain.ModelMountPath+"/") | |||
| modelActualPath := storage.GetMinioPath(jobName, cloudbrain.ModelMountPath+"/") | |||
| if parentDir == "" { | |||
| req = "baseDir=" + modelActualPath | |||
| } else { | |||
| @@ -570,10 +667,6 @@ func GetModelDirs(jobName string, parentDir string) (string, error) { | |||
| return getDirs(req) | |||
| } | |||
| func getMinioPath(jobName, suffixPath string) string { | |||
| return setting.Attachment.Minio.RealPath + setting.Attachment.Minio.Bucket + "/" + setting.CBCodePathPrefix + jobName + suffixPath | |||
| } | |||
| func CloudBrainDownloadModel(ctx *context.Context) { | |||
| parentDir := ctx.Query("parentDir") | |||
| fileName := ctx.Query("fileName") | |||
| @@ -756,6 +849,35 @@ func mkModelPath(modelPath string) error { | |||
| return nil | |||
| } | |||
| func deleteJobStorage(jobName string, cloudbrainType int) error { | |||
| //delete local | |||
| localJobPath := setting.JobPath + jobName | |||
| err := os.RemoveAll(localJobPath) | |||
| if err != nil { | |||
| log.Error("RemoveAll(%s) failed:%v", localJobPath, err) | |||
| } | |||
| //delete oss | |||
| if cloudbrainType == models.TypeCloudBrainOne { | |||
| dirPath := setting.CBCodePathPrefix + jobName + "/" | |||
| err = storage.Attachments.DeleteDir(dirPath) | |||
| if err != nil { | |||
| log.Error("DeleteDir(%s) failed:%v", localJobPath, err) | |||
| } | |||
| } else if cloudbrainType == models.TypeCloudBrainTwo { | |||
| //dirPath := setting.CodePathPrefix + jobName + "/" | |||
| //err = storage.ObsRemoveObject(setting.Bucket, dirPath) | |||
| //if err != nil { | |||
| // log.Error("ObsRemoveObject(%s) failed:%v", localJobPath, err) | |||
| //} | |||
| log.Info("no need to delete") | |||
| } else { | |||
| log.Error("cloudbrainType(%d) error", cloudbrainType) | |||
| } | |||
| return nil | |||
| } | |||
| func SyncCloudbrainStatus() { | |||
| cloudBrains, err := models.GetCloudBrainUnStoppedJob() | |||
| if err != nil { | |||
| @@ -193,6 +193,8 @@ func issues(ctx *context.Context, milestoneID int64, isPullOption util.OptionalB | |||
| var mileIDs []int64 | |||
| if milestoneID > 0 { | |||
| mileIDs = []int64{milestoneID} | |||
| } else if milestoneID == -1 { //only search no milestone | |||
| mileIDs = []int64{0} | |||
| } | |||
| var issues []*models.Issue | |||
| @@ -355,7 +357,8 @@ func Issues(ctx *context.Context) { | |||
| var err error | |||
| // Get milestones. | |||
| ctx.Data["Milestones"], err = models.GetMilestonesByRepoID(ctx.Repo.Repository.ID, api.StateType(ctx.Query("state")), models.ListOptions{}) | |||
| ctx.Data["OpenMilestones"], err = models.GetMilestonesByRepoID(ctx.Repo.Repository.ID, api.StateOpen, models.ListOptions{}) | |||
| ctx.Data["ClosedMilestones"], err = models.GetMilestonesByRepoID(ctx.Repo.Repository.ID, api.StateClosed, models.ListOptions{}) | |||
| if err != nil { | |||
| ctx.ServerError("GetAllRepoMilestones", err) | |||
| return | |||
| @@ -268,6 +268,7 @@ func MilestoneIssuesAndPulls(ctx *context.Context) { | |||
| ctx.Data["CanWriteIssues"] = ctx.Repo.CanWriteIssuesOrPulls(false) | |||
| ctx.Data["CanWritePulls"] = ctx.Repo.CanWriteIssuesOrPulls(true) | |||
| ctx.Data["PageIsIssueList"] = true | |||
| ctx.HTML(200, tplMilestoneIssues) | |||
| } | |||
| @@ -11,11 +11,10 @@ import ( | |||
| "strings" | |||
| "time" | |||
| "code.gitea.io/gitea/modules/cloudbrain" | |||
| "code.gitea.io/gitea/models" | |||
| "code.gitea.io/gitea/modules/auth" | |||
| "code.gitea.io/gitea/modules/base" | |||
| "code.gitea.io/gitea/modules/cloudbrain" | |||
| "code.gitea.io/gitea/modules/context" | |||
| "code.gitea.io/gitea/modules/git" | |||
| "code.gitea.io/gitea/modules/log" | |||
| @@ -42,6 +41,7 @@ const ( | |||
| func DebugJobIndex(ctx *context.Context) { | |||
| debugListType := ctx.Query("debugListType") | |||
| ctx.Data["ListType"] = debugListType | |||
| MustEnableCloudbrain(ctx) | |||
| repo := ctx.Repo.Repository | |||
| page := ctx.QueryInt("page") | |||
| @@ -49,12 +49,9 @@ func DebugJobIndex(ctx *context.Context) { | |||
| page = 1 | |||
| } | |||
| debugType := modelarts.DebugType | |||
| jobType := string(models.JobTypeDebug) | |||
| if debugListType == modelarts.GPUResource { | |||
| if debugListType == models.GPUResource { | |||
| debugType = models.TypeCloudBrainOne | |||
| jobType = "" | |||
| } | |||
| if debugListType == modelarts.NPUResource { | |||
| } else if debugListType == models.NPUResource { | |||
| debugType = models.TypeCloudBrainTwo | |||
| } | |||
| @@ -63,9 +60,10 @@ func DebugJobIndex(ctx *context.Context) { | |||
| Page: page, | |||
| PageSize: setting.UI.IssuePagingNum, | |||
| }, | |||
| RepoID: repo.ID, | |||
| Type: debugType, | |||
| JobType: jobType, | |||
| RepoID: repo.ID, | |||
| Type: debugType, | |||
| JobTypeNot: true, | |||
| JobType: string(models.JobTypeTrain), | |||
| }) | |||
| if err != nil { | |||
| ctx.ServerError("Get debugjob faild:", err) | |||
| @@ -73,21 +71,13 @@ func DebugJobIndex(ctx *context.Context) { | |||
| } | |||
| for i, task := range ciTasks { | |||
| if task.Cloudbrain.Type == models.TypeCloudBrainOne { | |||
| ciTasks[i].CanDebug = cloudbrain.CanCreateOrDebugJob(ctx) | |||
| ciTasks[i].CanDel = cloudbrain.CanDeleteJob(ctx, &task.Cloudbrain) | |||
| ciTasks[i].Cloudbrain.ComputeResource = modelarts.GPUResource | |||
| } | |||
| if task.Cloudbrain.Type == models.TypeCloudBrainTwo { | |||
| ciTasks[i].CanDebug = cloudbrain.CanCreateOrDebugJob(ctx) | |||
| ciTasks[i].CanDel = cloudbrain.CanDeleteJob(ctx, &task.Cloudbrain) | |||
| ciTasks[i].Cloudbrain.ComputeResource = modelarts.NPUResource | |||
| } | |||
| ciTasks[i].CanDebug = cloudbrain.CanModifyJob(ctx, &task.Cloudbrain) | |||
| ciTasks[i].CanDel = cloudbrain.CanDeleteJob(ctx, &task.Cloudbrain) | |||
| ciTasks[i].Cloudbrain.ComputeResource = task.ComputeResource | |||
| } | |||
| pager := context.NewPagination(int(count), setting.UI.IssuePagingNum, page, 5) | |||
| pager.SetDefaultParams(ctx) | |||
| pager.AddParam(ctx, "debugListType", "ListType") | |||
| ctx.Data["Page"] = pager | |||
| ctx.Data["PageIsCloudBrain"] = true | |||
| ctx.Data["Tasks"] = ciTasks | |||
| @@ -156,7 +146,7 @@ func NotebookCreate(ctx *context.Context, form auth.CreateModelArtsNotebookForm) | |||
| ctx.RenderWithErr(err.Error(), tplModelArtsNotebookNew, &form) | |||
| return | |||
| } | |||
| ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/debugjob") | |||
| ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/debugjob?debugListType=all") | |||
| } | |||
| func NotebookShow(ctx *context.Context) { | |||
| @@ -232,38 +222,105 @@ func NotebookDebug(ctx *context.Context) { | |||
| ctx.Redirect(debugUrl) | |||
| } | |||
| func NotebookStop(ctx *context.Context) { | |||
| func NotebookManage(ctx *context.Context) { | |||
| var jobID = ctx.Params(":jobid") | |||
| log.Info(jobID) | |||
| task, err := models.GetCloudbrainByJobID(jobID) | |||
| if err != nil { | |||
| ctx.ServerError("GetCloudbrainByJobID failed", err) | |||
| return | |||
| } | |||
| var action = ctx.Params(":action") | |||
| var resultCode = "0" | |||
| var errorMsg = "" | |||
| var status = "" | |||
| if task.Status != string(models.JobRunning) { | |||
| log.Error("the job(%s) is not running", task.JobName) | |||
| ctx.ServerError("the job is not running", errors.New("the job is not running")) | |||
| return | |||
| } | |||
| for { | |||
| task, err := models.GetCloudbrainByJobID(jobID) | |||
| if err != nil { | |||
| log.Error("GetCloudbrainByJobID failed:%v", err, ctx.Data["MsgID"]) | |||
| resultCode = "-1" | |||
| errorMsg = "system error" | |||
| break | |||
| } | |||
| param := models.NotebookAction{ | |||
| Action: models.ActionStop, | |||
| } | |||
| res, err := modelarts.StopJob(jobID, param) | |||
| if err != nil { | |||
| log.Error("StopJob(%s) failed:%v", task.JobName, err.Error()) | |||
| ctx.ServerError("StopJob failed", err) | |||
| return | |||
| } | |||
| if action == models.ActionStop { | |||
| if task.Status != string(models.ModelArtsRunning) { | |||
| log.Error("the job(%s) is not running", task.JobName, ctx.Data["MsgID"]) | |||
| resultCode = "-1" | |||
| errorMsg = "the job is not running" | |||
| break | |||
| } | |||
| task.Status = res.CurrentStatus | |||
| err = models.UpdateJob(task) | |||
| if err != nil { | |||
| ctx.ServerError("UpdateJob failed", err) | |||
| return | |||
| if !ctx.IsSigned || (ctx.User.ID != task.UserID && !ctx.IsUserSiteAdmin() && !ctx.IsUserRepoOwner()) { | |||
| log.Error("the user has no right ro stop the job", task.JobName, ctx.Data["MsgID"]) | |||
| resultCode = "-1" | |||
| errorMsg = "you have no right to stop the job" | |||
| break | |||
| } | |||
| } else if action == models.ActionRestart { | |||
| if task.Status != string(models.ModelArtsStopped) && task.Status != string(models.ModelArtsStartFailed) && task.Status != string(models.ModelArtsCreateFailed) { | |||
| log.Error("the job(%s) is not stopped", task.JobName, ctx.Data["MsgID"]) | |||
| resultCode = "-1" | |||
| errorMsg = "the job is not stopped" | |||
| break | |||
| } | |||
| if !ctx.IsSigned || (ctx.User.ID != task.UserID && !ctx.IsUserSiteAdmin()) { | |||
| log.Error("the user has no right ro restart the job", task.JobName, ctx.Data["MsgID"]) | |||
| resultCode = "-1" | |||
| errorMsg = "you have no right to restart the job" | |||
| break | |||
| } | |||
| count, err := models.GetCloudbrainNotebookCountByUserID(ctx.User.ID) | |||
| if err != nil { | |||
| log.Error("GetCloudbrainNotebookCountByUserID failed:%v", err, ctx.Data["MsgID"]) | |||
| resultCode = "-1" | |||
| errorMsg = "system error" | |||
| break | |||
| } else { | |||
| if count >= 1 { | |||
| log.Error("the user already has running or waiting task", ctx.Data["MsgID"]) | |||
| resultCode = "-1" | |||
| errorMsg = "you have already a running or waiting task, can not create more" | |||
| break | |||
| } | |||
| } | |||
| action = models.ActionStart | |||
| } else { | |||
| log.Error("the action(%s) is illegal", action, ctx.Data["MsgID"]) | |||
| resultCode = "-1" | |||
| errorMsg = "非法操作" | |||
| break | |||
| } | |||
| param := models.NotebookAction{ | |||
| Action: action, | |||
| } | |||
| res, err := modelarts.ManageNotebook(jobID, param) | |||
| if err != nil { | |||
| log.Error("ManageNotebook(%s) failed:%v", task.JobName, err.Error(), ctx.Data["MsgID"]) | |||
| resultCode = "-1" | |||
| errorMsg = "启动失败" | |||
| break | |||
| } | |||
| task.Status = res.CurrentStatus | |||
| err = models.UpdateJob(task) | |||
| if err != nil { | |||
| log.Error("UpdateJob(%s) failed:%v", task.JobName, err.Error(), ctx.Data["MsgID"]) | |||
| resultCode = "-1" | |||
| errorMsg = "system error" | |||
| break | |||
| } | |||
| status = task.Status | |||
| break | |||
| } | |||
| ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/debugjob") | |||
| ctx.JSON(200, map[string]string{ | |||
| "result_code": resultCode, | |||
| "error_msg": errorMsg, | |||
| "status": status, | |||
| "job_id": jobID, | |||
| }) | |||
| } | |||
| func NotebookDel(ctx *context.Context) { | |||
| @@ -274,7 +331,7 @@ func NotebookDel(ctx *context.Context) { | |||
| return | |||
| } | |||
| if task.Status != string(models.JobStopped) { | |||
| if task.Status != string(models.ModelArtsCreateFailed) && task.Status != string(models.ModelArtsStartFailed) && task.Status != string(models.ModelArtsStopped) { | |||
| log.Error("the job(%s) has not been stopped", task.JobName) | |||
| ctx.ServerError("the job has not been stopped", errors.New("the job has not been stopped")) | |||
| return | |||
| @@ -293,7 +350,7 @@ func NotebookDel(ctx *context.Context) { | |||
| return | |||
| } | |||
| ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/debugjob") | |||
| ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/debugjob?debugListType=all") | |||
| } | |||
| func TrainJobIndex(ctx *context.Context) { | |||
| @@ -312,6 +369,7 @@ func TrainJobIndex(ctx *context.Context) { | |||
| }, | |||
| RepoID: repo.ID, | |||
| Type: models.TypeCloudBrainTwo, | |||
| JobTypeNot: false, | |||
| JobType: string(models.JobTypeTrain), | |||
| IsLatestVersion: modelarts.IsLatestVersion, | |||
| }) | |||
| @@ -323,6 +381,7 @@ func TrainJobIndex(ctx *context.Context) { | |||
| for i, task := range tasks { | |||
| tasks[i].CanDel = cloudbrain.CanDeleteJob(ctx, &task.Cloudbrain) | |||
| tasks[i].CanModify = cloudbrain.CanModifyJob(ctx, &task.Cloudbrain) | |||
| tasks[i].ComputeResource = models.NPUResource | |||
| } | |||
| pager := context.NewPagination(int(count), setting.UI.IssuePagingNum, page, 5) | |||
| @@ -364,7 +423,7 @@ func trainJobNewDataPrepare(ctx *context.Context) error { | |||
| var jobName = cutString(ctx.User.Name, 5) + t.Format("2006010215") + strconv.Itoa(int(t.Unix()))[5:] | |||
| ctx.Data["job_name"] = jobName | |||
| attachs, err := models.GetModelArtsUserAttachments(ctx.User.ID) | |||
| attachs, err := models.GetModelArtsTrainAttachments(ctx.User.ID) | |||
| if err != nil { | |||
| ctx.ServerError("GetAllUserAttachments failed:", err) | |||
| return err | |||
| @@ -433,7 +492,7 @@ func trainJobErrorNewDataPrepare(ctx *context.Context, form auth.CreateModelArts | |||
| var jobName = cutString(ctx.User.Name, 5) + t.Format("2006010215") + strconv.Itoa(int(t.Unix()))[5:] | |||
| ctx.Data["job_name"] = jobName | |||
| attachs, err := models.GetModelArtsUserAttachments(ctx.User.ID) | |||
| attachs, err := models.GetModelArtsTrainAttachments(ctx.User.ID) | |||
| if err != nil { | |||
| ctx.ServerError("GetAllUserAttachments failed:", err) | |||
| return err | |||
| @@ -521,7 +580,7 @@ func trainJobNewVersionDataPrepare(ctx *context.Context) error { | |||
| var jobName = cutString(ctx.User.Name, 5) + t.Format("2006010215") + strconv.Itoa(int(t.Unix()))[5:] | |||
| ctx.Data["job_name"] = task.JobName | |||
| attachs, err := models.GetModelArtsUserAttachments(ctx.User.ID) | |||
| attachs, err := models.GetModelArtsTrainAttachments(ctx.User.ID) | |||
| if err != nil { | |||
| ctx.ServerError("GetAllUserAttachments failed:", err) | |||
| return err | |||
| @@ -610,7 +669,7 @@ func versionErrorDataPrepare(ctx *context.Context, form auth.CreateModelArtsTrai | |||
| var jobName = cutString(ctx.User.Name, 5) + t.Format("2006010215") + strconv.Itoa(int(t.Unix()))[5:] | |||
| ctx.Data["job_name"] = task.JobName | |||
| attachs, err := models.GetModelArtsUserAttachments(ctx.User.ID) | |||
| attachs, err := models.GetModelArtsTrainAttachments(ctx.User.ID) | |||
| if err != nil { | |||
| ctx.ServerError("GetAllUserAttachments failed:", err) | |||
| return err | |||
| @@ -1352,6 +1411,11 @@ func TrainJobDel(ctx *context.Context) { | |||
| } | |||
| } | |||
| //删除存储 | |||
| if len(VersionListTasks) > 0 { | |||
| DeleteJobStorage(VersionListTasks[0].JobName) | |||
| } | |||
| ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/modelarts/train-job") | |||
| } | |||
| @@ -1477,3 +1541,21 @@ func ModelDownload(ctx *context.Context) { | |||
| } | |||
| http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently) | |||
| } | |||
| func DeleteJobStorage(jobName string) error { | |||
| //delete local | |||
| localJobPath := setting.JobPath + jobName | |||
| err := os.RemoveAll(localJobPath) | |||
| if err != nil { | |||
| log.Error("RemoveAll(%s) failed:%v", localJobPath, err) | |||
| } | |||
| //delete oss | |||
| dirPath := setting.CodePathPrefix + jobName + "/" | |||
| err = storage.ObsRemoveObject(setting.Bucket, dirPath) | |||
| if err != nil { | |||
| log.Error("ObsRemoveObject(%s) failed:%v", localJobPath, err) | |||
| } | |||
| return nil | |||
| } | |||
| @@ -605,7 +605,7 @@ func getContributorInfo(contributorInfos []*ContributorInfo, email string) *Cont | |||
| func Home(ctx *context.Context) { | |||
| if len(ctx.Repo.Units) > 0 { | |||
| //get repo contributors info | |||
| contributors, err := git.GetContributors(ctx.Repo.Repository.RepoPath()) | |||
| contributors, err := git.GetContributors(ctx.Repo.Repository.RepoPath(), ctx.Repo.BranchName) | |||
| if err == nil && contributors != nil { | |||
| startTime := time.Now() | |||
| var contributorInfos []*ContributorInfo | |||
| @@ -924,7 +924,9 @@ func ContributorsAPI(ctx *context.Context) { | |||
| count := 0 | |||
| errorCode := 0 | |||
| errorMsg := "" | |||
| contributors, err := git.GetContributors(ctx.Repo.Repository.RepoPath()) | |||
| branchOrTag := ctx.Query("name") | |||
| contributors, err := git.GetContributors(ctx.Repo.Repository.RepoPath(), branchOrTag) | |||
| var contributorInfos []*ContributorInfo | |||
| if err == nil && contributors != nil { | |||
| contributorInfoHash := make(map[string]*ContributorInfo) | |||
| @@ -315,6 +315,8 @@ func RegisterRoutes(m *macaron.Macaron) { | |||
| }) | |||
| m.Get("/", routers.Home) | |||
| m.Get("/dashboard", routers.Dashboard) | |||
| go routers.SocketManager.Run() | |||
| m.Get("/action/notification", routers.ActionNotification) | |||
| m.Get("/recommend/org", routers.RecommendOrgFromPromote) | |||
| m.Get("/recommend/repo", routers.RecommendRepoFromPromote) | |||
| m.Group("/explore", func() { | |||
| @@ -625,6 +627,10 @@ func RegisterRoutes(m *macaron.Macaron) { | |||
| m.Group("/org", func() { | |||
| m.Group("/:org", func() { | |||
| m.Get("/members", org.Members) | |||
| m.Group("/org_tag", func() { | |||
| m.Get("/repo_list", org.GetTagRepos) | |||
| m.Post("/repo_submit", bindIgnErr(auth.SubmitReposOfTagForm{}), org.SubmitTags) | |||
| }) | |||
| }, context.OrgAssignment()) | |||
| }) | |||
| m.Group("/org", func() { | |||
| @@ -962,10 +968,11 @@ func RegisterRoutes(m *macaron.Macaron) { | |||
| m.Group("/cloudbrain", func() { | |||
| m.Group("/:jobid", func() { | |||
| m.Get("", reqRepoCloudBrainReader, repo.CloudBrainShow) | |||
| m.Get("/debug", reqRepoCloudBrainWriter, repo.CloudBrainDebug) | |||
| m.Get("/debug", cloudbrain.AdminOrJobCreaterRight, repo.CloudBrainDebug) | |||
| m.Post("/commit_image", cloudbrain.AdminOrOwnerOrJobCreaterRight, bindIgnErr(auth.CommitImageCloudBrainForm{}), repo.CloudBrainCommitImage) | |||
| m.Post("/stop", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.CloudBrainStop) | |||
| m.Post("/del", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.CloudBrainDel) | |||
| m.Post("/restart", reqRepoCloudBrainWriter, repo.CloudBrainRestart) | |||
| m.Get("/rate", reqRepoCloudBrainReader, repo.GetRate) | |||
| m.Get("/models", reqRepoCloudBrainReader, repo.CloudBrainShowModels) | |||
| m.Get("/download_model", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.CloudBrainDownloadModel) | |||
| @@ -975,6 +982,7 @@ func RegisterRoutes(m *macaron.Macaron) { | |||
| }, context.RepoRef()) | |||
| m.Group("/modelmanage", func() { | |||
| m.Post("/create_model", reqRepoModelManageWriter, repo.SaveModel) | |||
| m.Post("/create_new_model", reqRepoModelManageWriter, repo.SaveNewNameModel) | |||
| m.Delete("/delete_model", repo.DeleteModel) | |||
| m.Put("/modify_model", repo.ModifyModelInfo) | |||
| m.Get("/show_model", reqRepoModelManageReader, repo.ShowModelTemplate) | |||
| @@ -999,8 +1007,8 @@ func RegisterRoutes(m *macaron.Macaron) { | |||
| m.Group("/notebook", func() { | |||
| m.Group("/:jobid", func() { | |||
| m.Get("", reqRepoCloudBrainReader, repo.NotebookShow) | |||
| m.Get("/debug", reqRepoCloudBrainWriter, repo.NotebookDebug) | |||
| m.Post("/stop", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.NotebookStop) | |||
| m.Get("/debug", cloudbrain.AdminOrJobCreaterRight, repo.NotebookDebug) | |||
| m.Post("/:action", reqRepoCloudBrainWriter, repo.NotebookManage) | |||
| m.Post("/del", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.NotebookDel) | |||
| }) | |||
| m.Get("/create", reqRepoCloudBrainWriter, repo.NotebookNew) | |||
| @@ -0,0 +1,50 @@ | |||
| package socketwrap | |||
| import ( | |||
| "code.gitea.io/gitea/models" | |||
| "code.gitea.io/gitea/modules/log" | |||
| "github.com/gorilla/websocket" | |||
| ) | |||
| type Client struct { | |||
| Manager *ClientsManager | |||
| Conn *websocket.Conn | |||
| Send chan *models.Action | |||
| } | |||
| func (c *Client) WritePump() { | |||
| defer func() { | |||
| c.Manager.Unregister <- c | |||
| c.Conn.Close() | |||
| }() | |||
| for { | |||
| select { | |||
| case message, ok := <-c.Send: | |||
| if !ok { | |||
| c.Conn.WriteMessage(websocket.CloseMessage, []byte{}) | |||
| log.Warn("send socket is closed") | |||
| return | |||
| } | |||
| log.Warn("socket:", message) | |||
| err := c.Conn.WriteJSON(message) | |||
| if err != nil { | |||
| log.Warn("can not send message", err) | |||
| return | |||
| } | |||
| n := len(c.Send) | |||
| for i := 0; i < n; i++ { | |||
| err = c.Conn.WriteJSON(<-c.Send) | |||
| if err != nil { | |||
| log.Warn("can not send message", err) | |||
| return | |||
| } | |||
| } | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,46 @@ | |||
| package socketwrap | |||
| import ( | |||
| "code.gitea.io/gitea/models" | |||
| ) | |||
| type ClientsManager struct { | |||
| Clients map[*Client]bool | |||
| Register chan *Client | |||
| Unregister chan *Client | |||
| } | |||
| func NewClientsManager() *ClientsManager { | |||
| return &ClientsManager{ | |||
| Register: make(chan *Client), | |||
| Unregister: make(chan *Client), | |||
| Clients: make(map[*Client]bool), | |||
| } | |||
| } | |||
| var LastTenActionsQueue = NewSyncQueue(10) | |||
| func (h *ClientsManager) Run() { | |||
| for { | |||
| select { | |||
| case client := <-h.Register: | |||
| h.Clients[client] = true | |||
| case client := <-h.Unregister: | |||
| if _, ok := h.Clients[client]; ok { | |||
| delete(h.Clients, client) | |||
| close(client.Send) | |||
| } | |||
| case message := <-models.ActionChan: | |||
| LastTenActionsQueue.Push(message) | |||
| for client := range h.Clients { | |||
| select { | |||
| case client.Send <- message: | |||
| default: | |||
| close(client.Send) | |||
| delete(h.Clients, client) | |||
| } | |||
| } | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,34 @@ | |||
| package socketwrap | |||
| import ( | |||
| "container/list" | |||
| "sync" | |||
| ) | |||
| type SyncQueue struct { | |||
| Queue *list.List | |||
| Mutex *sync.RWMutex | |||
| MaxSize int | |||
| } | |||
| func (q *SyncQueue) Push(value interface{}) { | |||
| q.Mutex.Lock() | |||
| { | |||
| if q.Queue.Len() < q.MaxSize { | |||
| q.Queue.PushBack(value) | |||
| } else { | |||
| q.Queue.PushBack(value) | |||
| q.Queue.Remove(q.Queue.Front()) | |||
| } | |||
| } | |||
| q.Mutex.Unlock() | |||
| } | |||
| func NewSyncQueue(maxSize int) *SyncQueue { | |||
| return &SyncQueue{ | |||
| list.New(), | |||
| &sync.RWMutex{}, | |||
| maxSize, | |||
| } | |||
| } | |||
| @@ -20,10 +20,11 @@ | |||
| </div> | |||
| <div class="ui container"> | |||
| <div class="ui stackable grid"> | |||
| {{template "org/navber" .}} | |||
| <div class="ui fourteen wide computer column"> | |||
| {{template "org/navber" .}} | |||
| {{template "org/select_pro" .}} | |||
| <div class="ui stackable grid"> | |||
| <div class="ui sixteen wide computer column"> | |||
| <div class="ui mobile reversed stackable grid"> | |||
| <div class="ui ten wide tablet eleven wide computer column"> | |||
| {{if .CanCreateOrgRepo}} | |||
| @@ -3,10 +3,11 @@ | |||
| {{template "org/header" .}} | |||
| <div class="ui container"> | |||
| {{template "base/alert" .}} | |||
| {{template "org/navber" .}} | |||
| <div class="ui stackable grid"> | |||
| {{template "org/navber" .}} | |||
| <div class="ui fourteen wide computer column list"> | |||
| <div class="ui sixteen wide computer column list"> | |||
| {{ range .Members}} | |||
| <div class="item ui grid"> | |||
| <div class="ui two wide column"> | |||
| @@ -1,4 +1,4 @@ | |||
| <div class="tablet only mobile only sixteen wide mobile sixteen wide tablet column row"> | |||
| <!-- <div class="tablet only mobile only sixteen wide mobile sixteen wide tablet column row"> | |||
| <div class="ui secondary pointing tabular top attached borderless menu navbar"> | |||
| <a class="{{if $.PageIsOrgHome}}active{{end}} item" href="{{.HomeLink}}"> | |||
| {{svg "octicon-home" 16}} {{$.i18n.Tr "org.home"}} | |||
| @@ -12,10 +12,10 @@ | |||
| </a> | |||
| {{end}} | |||
| </div> | |||
| </div> | |||
| </div> --> | |||
| <!--平板、移动端--> | |||
| <div class="computer only two wide computer column"> | |||
| <!-- <div class="computer only two wide computer column"> | |||
| <div class="ui grid"> | |||
| <div class="sixteen wide column ui secondary sticky pointing tabular vertical menu"> | |||
| {{with .Org}} | |||
| @@ -33,5 +33,37 @@ | |||
| {{end}} | |||
| </div> | |||
| </div> | |||
| </div> | |||
| <!--电脑、宽屏--> | |||
| </div> --> | |||
| <!--电脑、宽屏--> | |||
| <style> | |||
| .dis{ | |||
| margin-bottom: 10px; | |||
| } | |||
| .active{ | |||
| color:#0366D6 !important; | |||
| } | |||
| .mleft{ | |||
| margin-left: 30% !important; | |||
| } | |||
| .mbom{ | |||
| margin-bottom: 10px !important; | |||
| } | |||
| </style> | |||
| <div class="row"> | |||
| <div class="ui secondary pointing tabular top attached borderless menu navbar mbom"> | |||
| {{with .Org}} | |||
| <a class="{{if $.PageIsOrgHome}}active{{end}} item mleft" href="{{.HomeLink}}"> | |||
| {{svg "octicon-home" 16}} {{$.i18n.Tr "org.home"}} | |||
| </a> | |||
| {{end}} | |||
| <a class="{{if $.PageIsOrgMembers}}active{{end}} item" href="{{$.OrgLink}}/members"> | |||
| {{svg "octicon-organization" 16}} {{$.i18n.Tr "org.people"}} | |||
| </a> | |||
| {{if or ($.IsOrganizationMember) ($.IsOrganizationOwner)}} | |||
| <a class="{{if $.PageIsOrgTeams}}active{{end}} item" href="{{$.OrgLink}}/teams"> | |||
| {{svg "octicon-jersey" 16}} {{$.i18n.Tr "org.teams"}} | |||
| </a> | |||
| {{end}} | |||
| </div> | |||
| </div> | |||
| @@ -0,0 +1,319 @@ | |||
| <style> | |||
| .text-right{ | |||
| float:right !important; | |||
| } | |||
| .header{ | |||
| font-weight:bold; | |||
| font-size: 18px; | |||
| font-family: SourceHanSansSC-medium; | |||
| } | |||
| .cor{ | |||
| color:#0366D6 !important; | |||
| } | |||
| .header_card{ | |||
| /* color:#003A8C !important; */ | |||
| color:#0366D6 !important; | |||
| margin: 10px 0 0px 0; | |||
| height: 25px; | |||
| } | |||
| .marg{ | |||
| margin: 0 5px !important; | |||
| } | |||
| .content_list{ | |||
| max-height: 130px; | |||
| overflow: auto; | |||
| } | |||
| .Relist{ | |||
| color:#0366D6 !important; | |||
| } | |||
| .descript_height{ | |||
| color: #101010 !important; | |||
| margin: 10px 0; | |||
| height: 40px !important; | |||
| word-break:break-all; | |||
| line-height: 20px; | |||
| overflow: hidden; | |||
| /* overflow: hidden!important; | |||
| word-wrap:break-word!important; */ | |||
| } | |||
| .tags_height{ | |||
| height: 30px !important; | |||
| } | |||
| .full_height{ | |||
| height: 100%; | |||
| } | |||
| .omit{ | |||
| overflow: hidden; white-space: nowrap; text-overflow: ellipsis; | |||
| } | |||
| /deep/ ui.checkbox input[type=checkbox]::after{ | |||
| border: 1px solid #0366D6 !important; | |||
| } | |||
| .nowrap-2 { | |||
| /* height: 2.837em; */ | |||
| /* line-height: 1.4285em; */ | |||
| overflow: hidden; | |||
| overflow: hidden; | |||
| display: -webkit-box; | |||
| -webkit-line-clamp: 2; | |||
| -webkit-box-orient: vertical; | |||
| } | |||
| </style> | |||
| <div class="ui stackable grid"> | |||
| <div style="width: 100%;margin:15px 0;"> | |||
| {{if .tags}} | |||
| <span class="header"> | |||
| 精选项目 | |||
| </span> | |||
| <!-- {{.IsOrganizationOwner}} --> | |||
| {{if .IsOrganizationOwner}} | |||
| <a class="text-right" id="model" onclick="showcreate()" >{{svg "octicon-gear" 16}}自定义</a> | |||
| {{end}} | |||
| {{end}} | |||
| </div> | |||
| <div style="width: 100%;"> | |||
| {{ range .tags}} | |||
| {{if eq .TagName "精选项目"}} | |||
| <div class="ui three cards" style="margin-bottom: 10px;"> | |||
| {{ range .RepoList}} | |||
| <div class="card" > | |||
| <div class="extra full_height cor" > | |||
| <div class=" header header_card omit" > | |||
| <a class="header_card image poping up " href="{{.Link}}" data-content="{{.Name}}" data-position="top left" data-variation="tiny inverted"> {{.Name}}</a> | |||
| </div> | |||
| <div class='content descript_height nowrap-2'> | |||
| {{.Description}} | |||
| </div> | |||
| <div class="content " > | |||
| {{if .Topics }} | |||
| <div class=" tags " style="position: relative;"> | |||
| {{range .Topics}} | |||
| {{if ne . "" }}<a style="max-width:100%;margin: 5px 0;display:inline-flex;" href="{{AppSubUrl}}/explore/repos?q={{.}}&topic={{$.Topic}}" ><span class="ui small label topic omit" >{{.}}</span></a>{{end}} | |||
| {{end}} | |||
| </div> | |||
| {{end}} | |||
| </div> | |||
| </div> | |||
| <div class=" extra " style="color:#888888;border-top: none !important"> | |||
| <div class="ui mini right compact marg" > | |||
| <a class="item marg "> | |||
| {{svg "octicon-eye" 16}} {{.NumWatches}} | |||
| </a> | |||
| <a class="item marg"> | |||
| {{svg "octicon-star" 16}} {{.NumStars}} | |||
| </a> | |||
| <a class="item marg"> | |||
| {{svg "octicon-git-branch" 16}} {{.NumForks}} | |||
| </a> | |||
| </div> | |||
| </div> | |||
| </div> | |||
| {{end}} | |||
| </div> | |||
| {{end}} | |||
| {{end}} | |||
| </div> | |||
| </div> | |||
| <div class="ui modal"> | |||
| <div class="header" style="padding: 1rem;background-color: rgba(240, 240, 240, 100);"> | |||
| <h4 id="model_header">自定义精选项目</h4> | |||
| </div> | |||
| <div class="content content-padding" style="color: black;"> | |||
| <p>最多可选9个公开项目</p> | |||
| <div class="ui search" > | |||
| <div class="ui input" style="width: 100%;"> | |||
| <input type="text" id = 'search_selectPro' placeholder="Search ..." value = '' oninput="search()"> | |||
| </div> | |||
| </div> | |||
| <div style="margin: 10px ;"> | |||
| <div id ='org_list' style="margin-bottom: 20px;"class="content_list" > | |||
| </div> | |||
| </div> | |||
| <p id='recommend'></p> | |||
| <div class="inline field" style="margin-left: 37%;"> | |||
| <div class="actions"> | |||
| <button id="submitId" type="button" class="ui create_train_job green deny button" onclick="saveSeletedPro(1)"> | |||
| {{.i18n.Tr "explore.save"}} | |||
| </button> | |||
| <button class="ui button cancel" >{{.i18n.Tr "explore.cancel"}}</button> | |||
| </div> | |||
| </div> | |||
| </div> | |||
| </div> | |||
| <script> | |||
| var data; | |||
| var filterData=[]; | |||
| var num=0; | |||
| function showcreate(obj){ | |||
| document.getElementById("search_selectPro").value='' | |||
| $('.ui.modal') | |||
| .modal({ | |||
| centered: false, | |||
| onShow:function(){ | |||
| $("#org_list").empty() | |||
| getPro(1) | |||
| }, | |||
| onHide:function(){ | |||
| } | |||
| }) | |||
| .modal('show') | |||
| } | |||
| function getPro(typeTag){ | |||
| $.ajax({ | |||
| type:"GET", | |||
| url:"/org/{{.Org.Name}}/org_tag/repo_list?tagId="+typeTag, | |||
| dataType:"json", | |||
| async:false, | |||
| success:function(json){ | |||
| data = json.data; | |||
| var n_length = data.length | |||
| pro_html = getHTML(data) | |||
| $("#org_list").append(pro_html) | |||
| // console.log('原始',data) | |||
| checkedNum(0) | |||
| } | |||
| }); | |||
| } | |||
| function getHTML(data){ | |||
| let pro_html='' | |||
| for (let i=0;i<data.length;i++){ | |||
| if (data[i].Selected==true){ | |||
| console.log("data[i]:",data[i]) | |||
| pro_html += `<div class="ui checkbox" style="width: 33%;margin-bottom:10px" > <input type="checkbox" id = " ${i}" checked="" onclick="checkedNum(${i})" class="Relist" name ='select_pro_name' data-repoid="${data[i].RepoID}" data-reponame="${data[i].RepoName}" data-selected=${data[i].Selected} > <label class='omit image poping up' data-content=${data[i].RepoName} data-position="top left " data-variation="mini"> ${data[i].RepoName}</label></div>` | |||
| pro_html += '</div>' | |||
| } | |||
| else{ | |||
| pro_html += `<div class="ui checkbox" style="width: 33%;margin-bottom:10px" > <input type="checkbox" id = "${i}" onclick="checkedNum(${i})" class="Relist" name ='select_pro_name' data-repoid="${data[i].RepoID}" data-reponame="${data[i].RepoName}" data-selected= ${data[i].Selected}> <label class='omit image poping up' data-content=${data[i].RepoName} data-position="top left " data-variation="mini"> ${data[i].RepoName} </label></div>` | |||
| pro_html += '</div>' | |||
| } | |||
| } | |||
| return pro_html | |||
| } | |||
| function saveSeletedPro(typeTag){ | |||
| var saveData=[]; | |||
| $('input[name="select_pro_name"]:checked').each(function(){ | |||
| console.log('值',this.dataset.repoid) | |||
| saveData.push(parseInt(this.dataset.repoid)); | |||
| }) | |||
| if(saveData.length>9){ | |||
| alert("最多可选9个,保存失败") | |||
| return | |||
| } | |||
| // saveData = getSelecteDataID(); | |||
| // console.log("数据:",saveData) | |||
| $.ajax({ | |||
| type:"POST", | |||
| url:"/org/{{.Org.Name}}/org_tag/repo_submit?tagId="+typeTag, | |||
| contentType:'application/json', | |||
| dataType:"json", | |||
| async:false, | |||
| data:JSON.stringify({'repoList':saveData | |||
| }), | |||
| success:function(res){ | |||
| console.log('保存成功'); | |||
| location.reload() | |||
| } | |||
| }); | |||
| } | |||
| function getSelecteData(){ | |||
| var selectedData=[]; | |||
| $('input[name="select_pro_name"]:checked').each(function(){ | |||
| // console.log(this) | |||
| // console.log('值',this.dataset.selected) | |||
| selectedData.push({"RepoID":parseInt(this.dataset.repoid),"RepoName":this.dataset.reponame,"Selected":JSON.parse(this.dataset.selected)}); | |||
| }) | |||
| return selectedData | |||
| } | |||
| function search(){ | |||
| var selectedData = getSelecteData(); | |||
| var searchValue = document.getElementById("search_selectPro").value; | |||
| filterData=[]; | |||
| console.log("searchValue:",searchValue) | |||
| for (let i=0;i<data.length;i++){ | |||
| var isInclude=false; | |||
| if(data[i].RepoName.toLowerCase().includes(searchValue.toLowerCase())){ | |||
| filterData.push(data[i]) | |||
| } | |||
| } | |||
| console.log("选中的值:",selectedData) | |||
| console.log("筛选包括选中的值:",filterData) | |||
| var showData=[]; | |||
| for(i=0;i<selectedData.length;i++){ | |||
| filterData =filterData.filter((item)=>{ | |||
| return item.RepoID!=selectedData[i].RepoID | |||
| }); | |||
| } | |||
| console.log("筛选后不包括选中的值:",filterData) | |||
| $("#org_list").empty() | |||
| if(searchValue!=""){ | |||
| if (filterData.length!=0){ | |||
| var pro_html = getHTML(selectedData); | |||
| console.log("selectedData_pro_html:",pro_html) | |||
| $("#org_list").append(pro_html) | |||
| pro_html= getHTML(filterData); | |||
| $("#org_list").append(pro_html) | |||
| }else{ | |||
| var pro_html = getHTML(selectedData); | |||
| $("#org_list").append(pro_html) | |||
| } | |||
| }else{ | |||
| var pro_html = getHTML(data); | |||
| $("#org_list").append(pro_html) | |||
| } | |||
| } | |||
| function checkedNum(id){ | |||
| num=0; | |||
| var inputs = document.getElementsByName("select_pro_name") | |||
| for (var i=0;i<inputs.length;i++){ | |||
| if(inputs[i].checked){ | |||
| num++ | |||
| if(num>9){ | |||
| document.getElementById(id).checked=false | |||
| alert("选择超过9个,请重新选择!") | |||
| return | |||
| } | |||
| } | |||
| } | |||
| var show_num = 9-num; | |||
| document.getElementById("recommend").innerHTML="还能推荐"+show_num+"个" | |||
| } | |||
| </script> | |||
| @@ -3,12 +3,12 @@ | |||
| {{template "org/header" .}} | |||
| <div class="ui container"> | |||
| {{template "base/alert" .}} | |||
| {{template "org/navber" .}} | |||
| <div class="ui stackable grid"> | |||
| {{template "org/navber" .}} | |||
| <div class="ui fourteen wide computer column list"> | |||
| <div class="ui sixteen wide computer column list"> | |||
| <div class="ui two column grid"> | |||
| {{range .Teams}} | |||
| <div class="column"> | |||
| @@ -93,6 +93,14 @@ | |||
| display: none; | |||
| } | |||
| .icons{ | |||
| /* position: absolute !important; | |||
| right: 150px; | |||
| top: 14px; | |||
| z-index: 2; */ | |||
| } | |||
| </style> | |||
| <div id="mask"> | |||
| @@ -182,9 +190,10 @@ | |||
| </select> | |||
| </div> | |||
| <div class="inline required field"> | |||
| <div class="inline required field" style="position: relative;"> | |||
| <label>镜像</label> | |||
| <input type="text" list="cloudbrain_image" placeholder="选择镜像" name="image" required autofocus maxlength="254"> | |||
| <i class="times circle outline icon icons" style="visibility: hidden;" onclick="clearValue()"></i> | |||
| <datalist class="ui search" id="cloudbrain_image" style='width:385px;' name="image"> | |||
| {{range .images}} | |||
| <option name="image" value="{{.Place}}">{{.PlaceView}}</option> | |||
| @@ -261,9 +270,17 @@ | |||
| <script> | |||
| let form = document.getElementById('form_id'); | |||
| $('#messageInfo').css('display','none') | |||
| let inputs = document.querySelectorAll('input[list]'); | |||
| inputs[0].addEventListener('change', function() { | |||
| $(".icon.icons").css("visibility","visible") | |||
| }); | |||
| $('#messageInfo').css('display','none') | |||
| function clearValue(){ | |||
| context=inputs[0] | |||
| context.value='' | |||
| $(".icon.icons").css("visibility","hidden") | |||
| } | |||
| form.onsubmit = function(e){ | |||
| let value_task = $("input[name='job_name']").val() | |||
| let value_image = $("input[name='image']").val() | |||
| @@ -202,14 +202,17 @@ | |||
| <div class="rect5"></div> | |||
| </div> | |||
| </div> | |||
| <!-- 提示框 --> | |||
| <div class="alert"></div> | |||
| <div class="alert"></div> | |||
| <div class="repository release dataset-list view"> | |||
| {{template "repo/header" .}} | |||
| {{template "base/alert" .}} | |||
| <!-- 提示框 --> | |||
| <!-- 列表容器 --> | |||
| <div class="ui container"> | |||
| <div class="ui two column stackable grid "> | |||
| <div class="ui two column stackable grid"> | |||
| <div class="column"> | |||
| <div class="ui blue small menu compact selectcloudbrain"> | |||
| <a class="active item" href="{{.RepoLink}}/debugjob?debugListType=all">{{$.i18n.Tr "repo.modelarts.notebook"}}</a> | |||
| @@ -282,7 +285,7 @@ | |||
| <div class="row"> | |||
| <!-- 任务名 --> | |||
| <div class="four wide column"> | |||
| <a class="title" href='{{if eq .ComputeResource "CPU/GPU"}}{{$.RepoLink}}/cloudbrain{{else}}{{$.RepoLink}}/modelarts/notebook{{end}}/{{.JobID}}' title="{{.JobName}}" style="font-size: 14px;"> | |||
| <a class="title" href='{{if eq .ComputeResource "CPU/GPU"}}{{$.RepoLink}}/cloudbrain{{else}}{{$.RepoLink}}/modelarts/notebook{{end}}/{{.JobID}}' title="{{.JobName}}" style="font-size: 14px;"> | |||
| <span class="fitted text_over" style="width: 90%;vertical-align: middle;">{{.JobName}}</span> | |||
| </a> | |||
| </div> | |||
| @@ -315,34 +318,49 @@ | |||
| </a> | |||
| {{end}} --> | |||
| <!-- 调试 --> | |||
| {{if .CanDebug}} | |||
| {{if eq .ComputeResource "CPU/GPU"}} | |||
| <a id="model-debug-{{.JobID}}" class='ui basic {{if ne .Status "RUNNING"}} disabled {{else}}blue {{end}}button' href="{{$.RepoLink}}/cloudbrain/{{.JobID}}/debug" target="_blank"> | |||
| {{$.i18n.Tr "repo.debug"}} | |||
| </a> | |||
| <form id="debugAgainForm-{{.JobID}}"> | |||
| {{$.CsrfTokenHtml}} | |||
| {{if .CanDebug}} | |||
| {{if eq .Status "RUNNING"}} | |||
| <a style="margin: 0 1rem;" id="model-debug-{{.JobID}}" class='ui basic blue button' onclick='debugAgain("{{.JobID}}","{{if eq .ComputeResource "CPU/GPU"}}{{$.RepoLink}}/cloudbrain{{else}}{{$.RepoLink}}/modelarts/notebook{{end}}/{{.JobID}}/")'> | |||
| {{$.i18n.Tr "repo.debug"}} | |||
| </a> | |||
| {{else}} | |||
| <a id="model-debug-{{.JobID}}" class='ui basic {{if eq .Status "CREATING" "STOPPING" "WAITING" "STARTING"}} disabled {{else}}blue {{end}}button' onclick='debugAgain("{{.JobID}}","{{if eq .ComputeResource "CPU/GPU"}}{{$.RepoLink}}/cloudbrain{{else}}{{$.RepoLink}}/modelarts/notebook{{end}}/{{.JobID}}/")'> | |||
| {{$.i18n.Tr "repo.debug_again"}} | |||
| </a> | |||
| {{end}} | |||
| {{else}} | |||
| <a id="model-debug-{{.JobID}}" class='ui basic {{if ne .Status "RUNNING"}} disabled {{else}}blue {{end}}button' href="{{$.RepoLink}}/modelarts/notebook/{{.JobID}}/debug" target="_blank"> | |||
| {{$.i18n.Tr "repo.debug"}} | |||
| </a> | |||
| {{if eq .Status "RUNNING"}} | |||
| <a class="ui basic disabled button"> | |||
| {{$.i18n.Tr "repo.debug"}} | |||
| </a> | |||
| {{else}} | |||
| <a class="ui basic disabled button"> | |||
| {{$.i18n.Tr "repo.debug_again"}} | |||
| </a> | |||
| {{end}} | |||
| {{end}} | |||
| {{else}} | |||
| <a class="ui basic disabled button"> | |||
| {{$.i18n.Tr "repo.debug"}} | |||
| </a> | |||
| {{end}} | |||
| </form> | |||
| <!-- 停止 --> | |||
| <form id="stopForm-{{.JobID}}" action="{{if eq .ComputeResource "CPU/GPU"}}{{$.RepoLink}}/cloudbrain{{else}}{{$.RepoLink}}/modelarts/notebook{{end}}/{{.JobID}}/stop" method="post" style="margin-left:-1px;"> | |||
| <form id="stopForm-{{.JobID}}" style="margin-left:-1px;"> | |||
| {{$.CsrfTokenHtml}} | |||
| {{if .CanDel}} | |||
| <a id="stop-model-debug-{{.JobID}}" class='ui basic {{if eq .Status "STOPPED" "FAILED" "START_FAILED" "STOPPING" "CREATING" "STARTING"}}disabled {{else}}blue {{end}}button' onclick="document.getElementById('stopForm-{{.JobID}}').submit();"> | |||
| {{$.i18n.Tr "repo.stop"}} | |||
| </a> | |||
| {{if eq .ComputeResource "CPU/GPU" }} | |||
| <a id="stop-model-debug-{{.JobID}}" class='ui basic {{if eq .Status "STOPPED" "FAILED" "START_FAILED" "STOPPING" "CREATING" "STARTING"}}disabled {{else}}blue {{end}}button' onclick='stopDebug("{{.JobID}}","{{$.RepoLink}}/cloudbrain/{{.JobID}}/stop")'> | |||
| {{$.i18n.Tr "repo.stop"}} | |||
| </a> | |||
| {{else}} | |||
| <a id="stop-model-debug-{{.JobID}}" class='ui basic {{if eq .Status "STOPPED" "FAILED" "START_FAILED" "STOPPING" "CREATING" "STARTING"}}disabled {{else}}blue {{end}}button' onclick='stopDebug("{{.JobID}}","{{$.RepoLink}}/modelarts/notebook/{{.JobID}}/stop")'> | |||
| {{$.i18n.Tr "repo.stop"}} | |||
| </a> | |||
| {{end}} | |||
| {{else}} | |||
| <a class="ui basic disabled button" onclick="document.getElementById('stopForm-{{.JobID}}').submit();"> | |||
| <a class="ui basic disabled button"> | |||
| {{$.i18n.Tr "repo.stop"}} | |||
| </a> | |||
| {{end}} | |||
| <input type="hidden" name="debugListType" value="all"> | |||
| </form> | |||
| <!-- 删除 --> | |||
| <form id="delForm-{{.JobID}}" action="{{if eq .ComputeResource "CPU/GPU"}}{{$.RepoLink}}/cloudbrain{{else}}{{$.RepoLink}}/modelarts/notebook{{end}}/{{.JobID}}/del" method="post"> | |||
| @@ -384,6 +402,13 @@ | |||
| <a class="ui basic disabled button">{{$.i18n.Tr "repo.download"}}</a> | |||
| {{end}} | |||
| </div> | |||
| {{if and (ne .JobType "DEBUG") (eq .Cloudbrain.Type 0)}} | |||
| <div class="item" style="padding: 0 !important;"> | |||
| <a class="ui basic blue button" href="{{$.RepoLink}}/cloudbrain/{{.JobID}}/rate" target="_blank"> | |||
| 评分 | |||
| </a> | |||
| </div> | |||
| {{end}} | |||
| </div> | |||
| </div> | |||
| @@ -463,10 +488,21 @@ | |||
| <script> | |||
| // 调试和评分新开窗口 | |||
| const {AppSubUrl, StaticUrlPrefix, csrf} = window.config; | |||
| let url={{.RepoLink}} | |||
| let getParam=location.search.split('?debugListType=').pop() | |||
| let getParam=getQueryVariable('debugListType') | |||
| let dropdownValue = getParam==='all'||getParam==='' ? '全部' : getParam | |||
| localStorage.setItem('all',location.href) | |||
| function getQueryVariable(variable) | |||
| { | |||
| let query = window.location.search.substring(1); | |||
| let vars = query.split("&"); | |||
| for (let i=0;i<vars.length;i++) { | |||
| let pair = vars[i].split("="); | |||
| if(pair[0] == variable){return pair[1];} | |||
| } | |||
| return(false); | |||
| } | |||
| function stop(obj) { | |||
| if (obj.style.color != "rgb(204, 204, 204)") { | |||
| obj.target = '_blank' | |||
| @@ -489,6 +525,7 @@ | |||
| onApprove: function() { | |||
| document.getElementById(delId).submit() | |||
| flag = true | |||
| $('.alert').html('操作成功!').removeClass('alert-danger').addClass('alert-success').show().delay(1500).fadeOut(); | |||
| }, | |||
| onHidden: function() { | |||
| if (flag == false) { | |||
| @@ -499,7 +536,67 @@ | |||
| .modal('show') | |||
| } | |||
| } | |||
| function debugAgain(JobID,debugUrl){ | |||
| if($('#' + JobID+ '-text').text()==="RUNNING"){ | |||
| window.open(debugUrl+'debug') | |||
| }else{ | |||
| $.ajax({ | |||
| type:"POST", | |||
| url:debugUrl+'restart', | |||
| data:$('#debugAgainForm-'+JobID).serialize(), | |||
| success:function(res){ | |||
| if(res.result_code==="0"){ | |||
| if(res.job_id!==JobID){ | |||
| location.reload() | |||
| }else{ | |||
| $('#' + JobID+'-icon').removeClass().addClass(res.status) | |||
| $('#' + JobID+ '-text').text(res.status) | |||
| $('#model-debug-'+JobID).removeClass('blue').addClass('disabled') | |||
| $('#model-delete-'+JobID).removeClass('blue').addClass('disabled') | |||
| } | |||
| }else{ | |||
| $('.alert').html(res.error_msg).removeClass('alert-success').addClass('alert-danger').show().delay(2000).fadeOut(); | |||
| } | |||
| }, | |||
| error :function(res){ | |||
| console.log(res) | |||
| } | |||
| }) | |||
| } | |||
| } | |||
| function stopDebug(JobID,stopUrl){ | |||
| $.ajax({ | |||
| type:"POST", | |||
| url:stopUrl, | |||
| data:$('#stopForm-'+JobID).serialize(), | |||
| success:function(res){ | |||
| if(res.result_code==="0"){ | |||
| $('#' + JobID+'-icon').removeClass().addClass(res.status) | |||
| $('#' + JobID+ '-text').text(res.status) | |||
| if(res.status==="STOPPED"){ | |||
| $('#model-debug-'+JobID).removeClass('disabled').addClass('blue').text("再次调试").css("margin","0") | |||
| $('#model-image-'+JobID).removeClass('blue').addClass('disabled') | |||
| $('#stop-model-debug-'+JobID).removeClass('blue').addClass('disabled') | |||
| $('#model-delete-'+JobID).removeClass('disabled').addClass('blue') | |||
| } | |||
| else{ | |||
| $('#model-debug-'+JobID).removeClass('blue').addClass('disabled') | |||
| $('#stop-model-debug-'+JobID).removeClass('blue').addClass('disabled') | |||
| } | |||
| }else{ | |||
| $('.alert').html(res.error_msg).removeClass('alert-success').addClass('alert-danger').show().delay(2000).fadeOut(); | |||
| } | |||
| }, | |||
| error :function(res){ | |||
| console.log(res) | |||
| } | |||
| }) | |||
| } | |||
| // 加载任务状态 | |||
| var timeid = window.setInterval(loadJobStatus, 15000); | |||
| $(document).ready(loadJobStatus); | |||
| @@ -508,8 +605,9 @@ | |||
| const jobID = job.dataset.jobid; | |||
| const repoPath = job.dataset.repopath; | |||
| const computeResource = job.dataset.resource | |||
| const initArray = ['STOPPED','FAILED','START_FAILED','CREATE_FAILED'] | |||
| const initArray = ['STOPPED','FAILED','START_FAILED','CREATE_FAILED','SUCCEEDED'] | |||
| if (initArray.includes(job.textContent.trim())) { | |||
| return | |||
| } | |||
| const diffResource = computeResource == "NPU" ? 'modelarts/notebook' : 'cloudbrain' | |||
| @@ -521,32 +619,30 @@ | |||
| $('#' + jobID+ '-text').text(status) | |||
| } | |||
| if(status==="RUNNING"){ | |||
| $('#model-debug-'+jobID).removeClass('disabled') | |||
| $('#model-debug-'+jobID).addClass('blue') | |||
| $('#model-image-'+jobID).removeClass('disabled') | |||
| $('#model-image-'+jobID).addClass('blue') | |||
| $('#model-debug-'+jobID).removeClass('disabled').addClass('blue').text('调试').css("margin","0 1rem") | |||
| $('#model-image-'+jobID).removeClass('disabled').addClass('blue') | |||
| } | |||
| if(status!=="RUNNING"){ | |||
| $('#model-debug-'+jobID).removeClass('blue') | |||
| $('#model-debug-'+jobID).addClass('disabled') | |||
| $('#model-image-'+jobID).removeClass('blue') | |||
| $('#model-image-'+jobID).addClass('disabled') | |||
| // $('#model-debug-'+jobID).removeClass('blue') | |||
| // $('#model-debug-'+jobID).addClass('disabled') | |||
| $('#model-image-'+jobID).removeClass('blue').addClass('disabled') | |||
| } | |||
| if(["CREATING","STOPPING","WAITING","STARTING"].includes(status)){ | |||
| $('#model-debug-'+jobID).removeClass('blue').addClass('disabled') | |||
| } | |||
| if(['STOPPED','FAILED','START_FAILED','CREATE_FAILED','SUCCEEDED'].includes(status)){ | |||
| $('#model-debug-'+jobID).removeClass('disabled').addClass('blue').text('再次调试').css("margin","0") | |||
| } | |||
| if(["RUNNING","WAITING"].includes(status)){ | |||
| $('#stop-model-debug-'+jobID).removeClass('disabled') | |||
| $('#stop-model-debug-'+jobID).addClass('blue') | |||
| $('#stop-model-debug-'+jobID).removeClass('disabled').addClass('blue') | |||
| } | |||
| if(["CREATING","STOPPING","STARTING","STOPPED","FAILED","START_FAILED"].includes(status)){ | |||
| $('#stop-model-debug-'+jobID).removeClass('blue') | |||
| $('#stop-model-debug-'+jobID).addClass('disabled') | |||
| $('#stop-model-debug-'+jobID).removeClass('blue').addClass('disabled') | |||
| } | |||
| if(status==="STOPPED" || status==="FAILED"|| status==="START_FAILED"){ | |||
| $('#model-delete-'+jobID).removeClass('disabled') | |||
| $('#model-delete-'+jobID).addClass('blue') | |||
| $('#model-delete-'+jobID).removeClass('disabled').addClass('blue') | |||
| }else{ | |||
| $('#model-delete-'+jobID).removeClass('blue') | |||
| $('#model-delete-'+jobID).addClass('disabled') | |||
| $('#model-delete-'+jobID).removeClass('blue').addClass('disabled') | |||
| } | |||
| }).fail(function(err) { | |||
| console.log(err); | |||
| @@ -554,6 +650,7 @@ | |||
| }); | |||
| }; | |||
| $(document).ready(function(){ | |||
| dropdownValue = dropdownValue==="CPU%2FGPU"? 'CPU/GPU' : dropdownValue | |||
| $('.default.text').text(dropdownValue) | |||
| $('.ui.dropdown') | |||
| .dropdown({ | |||
| @@ -564,6 +661,12 @@ | |||
| location.href = `${url}/debugjob?debugListType=${value}` | |||
| } | |||
| }) | |||
| $('.message .close') | |||
| .on('click', function() { | |||
| $(this) | |||
| .closest('.message') | |||
| .transition('fade') | |||
| }) | |||
| }) | |||
| @@ -601,7 +704,6 @@ | |||
| // 显示弹窗,弹出相应的信息 | |||
| function showmask() { | |||
| var image_tag = !$('#image_tag').val() | |||
| console.log("image_tag",image_tag) | |||
| if(image_tag){ | |||
| return | |||
| } | |||
| @@ -612,8 +714,6 @@ | |||
| var responseText = $("iframe")[0].contentDocument.body.getElementsByTagName("pre")[0].innerHTML; | |||
| var json1 = JSON.parse(responseText) | |||
| $('#mask').css('display', 'none') | |||
| parent.location.href | |||
| if (json1.result_code === "0") { | |||
| $('.alert').html('操作成功!').removeClass('alert-danger').addClass('alert-success').show().delay(1500).fadeOut(); | |||
| } else { | |||
| @@ -337,7 +337,8 @@ | |||
| {{end}} | |||
| <div class="ui right"> | |||
| <a class="membersmore text grey" href="{{.RepoLink}}/contributors">全部 {{svg "octicon-chevron-right" 16}}</a> | |||
| <!-- <a class="membersmore text grey" href="{{.RepoLink}}/contributors">全部 {{svg "octicon-chevron-right" 16}}</a> --> | |||
| <a class="membersmore text grey" href="{{.RepoLink}}/contributors?type={{if .IsViewBranch}}branch{{else}}tag{{end}}&name={{.BranchName}}">全部 {{svg "octicon-chevron-right" 16}}</a> | |||
| </div> | |||
| </h4> | |||
| <div class="ui members" id="contributorInfo"> | |||
| @@ -1,3 +1,12 @@ | |||
| <style> | |||
| .repository .filter.menu.labels .svg{ | |||
| margin-right: 2px !important; | |||
| } | |||
| .ovfl{ | |||
| overflow-y:hidden !important; | |||
| min-width: 140px!important; | |||
| } | |||
| </style> | |||
| {{template "base/head" .}} | |||
| <div class="repository"> | |||
| {{template "repo/header" .}} | |||
| @@ -57,16 +66,41 @@ | |||
| </div> | |||
| <!-- Milestone --> | |||
| <div class="ui {{if not .Milestones}}disabled{{end}} dropdown jump item"> | |||
| <div class="ui {{if and (not .OpenMilestones) (not .ClosedMilestones)}}disabled{{end}} dropdown jump item"> | |||
| <span class="text"> | |||
| {{.i18n.Tr "repo.issues.filter_milestone"}} | |||
| <i class="dropdown icon"></i> | |||
| </span> | |||
| <div class="menu"> | |||
| <a class="item" href="{{$.Link}}?q={{$.Keyword}}&type={{$.ViewType}}&sort={{$.SortType}}&state={{$.State}}&labels={{.SelectLabels}}&assignee={{$.AssigneeID}}">{{.i18n.Tr "repo.issues.filter_milestone_no_select"}}</a> | |||
| {{range .Milestones}} | |||
| <a class="{{if eq $.MilestoneID .ID}}active selected{{end}} item" href="{{$.Link}}?type={{$.ViewType}}&sort={{$.SortType}}&state={{$.State}}&labels={{$.SelectLabels}}&milestone={{.ID}}&assignee={{$.AssigneeID}}">{{.Name}}</a> | |||
| {{end}} | |||
| <a class="item" href="{{$.Link}}?q={{$.Keyword}}&type={{$.ViewType}}&sort={{$.SortType}}&state={{$.State}}&labels={{.SelectLabels}}&milestone=-1&assignee={{$.AssigneeID}}">{{.i18n.Tr "repo.issues.filter_milestone_no_add"}}</a> | |||
| {{if .OpenMilestones}} | |||
| <div class="divider" ></div> | |||
| <div class="header ovfl" > | |||
| {{svg "octicon-milestone" 12 }} | |||
| {{.i18n.Tr "repo.issues.new.open_milestone"}} | |||
| </div> | |||
| {{range .OpenMilestones}} | |||
| <a class="{{if eq $.MilestoneID .ID}}active selected{{end}} item" href="{{$.Link}}?type={{$.ViewType}}&sort={{$.SortType}}&state={{$.State}}&labels={{$.SelectLabels}}&milestone={{.ID}}&assignee={{$.AssigneeID}}"> | |||
| {{.Name}} | |||
| </a> | |||
| {{end}} | |||
| {{end}} | |||
| {{if .ClosedMilestones}} | |||
| <div class="divider"></div> | |||
| <div class="header ovfl" > | |||
| {{svg "octicon-milestone" 12}} | |||
| {{.i18n.Tr "repo.issues.new.closed_milestone"}} | |||
| </div> | |||
| {{range .ClosedMilestones}} | |||
| <a class="{{if eq $.MilestoneID .ID}}active selected{{end}} item" href="{{$.Link}}?type={{$.ViewType}}&sort={{$.SortType}}&state={{$.State}}&labels={{$.SelectLabels}}&milestone={{.ID}}&assignee={{$.AssigneeID}}"> | |||
| {{.Name}} | |||
| </a> | |||
| {{end}} | |||
| {{end}} | |||
| </div> | |||
| </div> | |||
| @@ -121,7 +121,7 @@ | |||
| </div> | |||
| <!-- 任务运行时间 --> | |||
| <div class="two wide column text center padding0"> | |||
| <span style="font-size: 12px;" id="duration-{{.JobID}}"></span> | |||
| <span style="font-size: 12px;" id="duration-{{.JobID}}">{{.TrainJobDuration}}</span> | |||
| </div> | |||
| <!-- 计算资源 --> | |||
| <div class="two wide column text center padding0"> | |||
| @@ -253,35 +253,18 @@ | |||
| } | |||
| } | |||
| function loadJobDuration() { | |||
| $(".job-status").each((index, job) => { | |||
| const jobID = job.dataset.jobid; | |||
| const repoPath = job.dataset.repopath; | |||
| const versionname = job.dataset.version | |||
| $.get(`/api/v1/repos/${repoPath}/modelarts/train-job/${jobID}?version_name=${versionname}`, (data) => { | |||
| console.log(data) | |||
| const duration = data.JobDuration | |||
| const jobID = data.JobID | |||
| $('#duration-'+jobID).text(duration) | |||
| }) | |||
| }) | |||
| } | |||
| $(document).ready(loadJobDuration); | |||
| // 加载任务状态 | |||
| var timeid = window.setInterval(loadJobStatus, 15000); | |||
| $(document).ready(loadJobStatus); | |||
| function loadJobStatus() { | |||
| $(".job-status").each((index, job) => { | |||
| const jobID = job.dataset.jobid; | |||
| const repoPath = job.dataset.repopath; | |||
| const jobID = job.dataset.jobid | |||
| const repoPath = job.dataset.repopath | |||
| const versionname = job.dataset.version | |||
| if (job.textContent.trim() == 'IMAGE_FAILED' || job.textContent.trim() == 'SUBMIT_FAILED' || job.textContent.trim() == 'DELETE_FAILED' | |||
| || job.textContent.trim() == 'KILLED' || job.textContent.trim() == 'COMPLETED' || job.textContent.trim() == 'FAILED' | |||
| || job.textContent.trim() == 'CANCELED' || job.textContent.trim() == 'LOST') { | |||
| return | |||
| const status_text = $(`#${jobID}-text`).text() | |||
| if(['IMAGE_FAILED','SUBMIT_FAILED','DELETE_FAILED','KILLED','COMPLETED','FAILED','CANCELED','LOST','START_FAILED'].includes(status_text)){ | |||
| return | |||
| } | |||
| $.get(`/api/v1/repos/${repoPath}/modelarts/train-job/${jobID}?version_name=${versionname}`, (data) => { | |||
| const jobID = data.JobID | |||
| const status = data.JobStatus | |||
| @@ -329,7 +312,6 @@ | |||
| } | |||
| } | |||
| function stopVersion(version_name,jobID){ | |||
| const url = '/api/v1/repos/{{$.RepoRelPath}}/modelarts/train-job/'+jobID+'/stop_version' | |||
| $.post(url,{version_name:version_name},(data)=>{ | |||
| if(data.StatusOK===0){ | |||
| @@ -450,7 +450,6 @@ td, th { | |||
| {{template "base/footer" .}} | |||
| <script> | |||
| console.log({{.version_list_task}}) | |||
| $('.menu .item').tab() | |||
| $(document).ready(function(){ | |||
| @@ -495,14 +494,19 @@ td, th { | |||
| } | |||
| function loadJobStatus() { | |||
| $(".ui.accordion.border-according").each((index, job) => { | |||
| const jobID = job.dataset.jobid; | |||
| const repoPath = job.dataset.repopath; | |||
| const versionname = job.dataset.version | |||
| if (job.textContent.trim() == 'IMAGE_FAILED' || job.textContent.trim() == 'SUBMIT_FAILED' || job.textContent.trim() == 'DELETE_FAILED' | |||
| || job.textContent.trim() == 'KILLED' || job.textContent.trim() == 'COMPLETED' || job.textContent.trim() == 'FAILED' | |||
| || job.textContent.trim() == 'CANCELED' || job.textContent.trim() == 'LOST') { | |||
| return | |||
| // ['IMAGE_FAILED','SUBMIT_FAILED','DELETE_FAILED','KILLED','COMPLETED','FAILED','CANCELED','LOST','START_FAILED'] | |||
| // if (job.textContent.trim() == 'IMAGE_FAILED' || job.textContent.trim() == 'SUBMIT_FAILED' || job.textContent.trim() == 'DELETE_FAILED' | |||
| // || job.textContent.trim() == 'KILLED' || job.textContent.trim() == 'COMPLETED' || job.textContent.trim() == 'FAILED' | |||
| // || job.textContent.trim() == 'CANCELED' || job.textContent.trim() == 'LOST') { | |||
| // return | |||
| // } | |||
| let status = $(`#${versionname}-status-span`).text() | |||
| if(['IMAGE_FAILED','SUBMIT_FAILED','DELETE_FAILED','KILLED','COMPLETED','FAILED','CANCELED','LOST','START_FAILED'].includes(status)){ | |||
| return | |||
| } | |||
| let stopArray=["KILLED","FAILED","START_FAILED","KILLING","COMPLETED"] | |||
| $.get(`/api/v1/repos/${repoPath}/modelarts/train-job/${jobID}?version_name=${versionname}`, (data) => { | |||
| @@ -663,7 +667,12 @@ td, th { | |||
| html += "</span>" | |||
| html += "</td>" | |||
| html += "<td class='message seven wide'>" | |||
| html += "<span class='truncate has-emoji'>"+ `${dirs_size}` + "</span>" | |||
| if(data.Dirs[i].IsDir){ | |||
| html += "<span class='truncate has-emoji'></span>" | |||
| }else{ | |||
| html += "<span class='truncate has-emoji'>"+ `${dirs_size}` + "</span>" | |||
| } | |||
| html += "</td>" | |||
| html += "<td class='text right age three wide'>" | |||
| @@ -141,7 +141,6 @@ | |||
| <script> | |||
| let repolink = {{.RepoLink}} | |||
| let repoId = {{$repository}} | |||
| let url_href = window.location.pathname.split('show_model')[0] + 'create_model' | |||
| const {_AppSubUrl, _StaticUrlPrefix, csrf} = window.config; | |||
| $('input[name="_csrf"]').val(csrf) | |||
| @@ -69,7 +69,7 @@ | |||
| <!-- <a href="javascript:window.history.back();"><i class="arrow left icon"></i>返回</a> --> | |||
| <div class="ui breadcrumb"> | |||
| <a class="section" href="{{$.RepoLink}}/modelmanage/show_model"> | |||
| 模型管理 | |||
| {{$.i18n.Tr "repo.model.manage.model_manage"}} | |||
| </a> | |||
| <div class="divider"> / </div> | |||
| <div class="active section">{{.name}}</div> | |||
| @@ -83,15 +83,15 @@ | |||
| <table class="tableStyle" style="margin-top:20px;"> | |||
| <tbody> | |||
| <tr> | |||
| <td class="ti-text-form-label text-width80">模型名称</td> | |||
| <td class="ti-text-form-label text-width80">{{$.i18n.Tr "repo.model.manage.model_name"}}</td> | |||
| <td class="ti-text-form-content word-elipsis"><span id="ModelName" title=""></span></td> | |||
| </tr> | |||
| <tr> | |||
| <td class="ti-text-form-label text-width80">版本</td> | |||
| <td class="ti-text-form-label text-width80">{{$.i18n.Tr "repo.model.manage.version"}}</td> | |||
| <td class="ti-text-form-content word-elipsis"><span id="Version" title=""></span></td> | |||
| </tr> | |||
| <tr> | |||
| <td class="ti-text-form-label text-width80">标签</td> | |||
| <td class="ti-text-form-label text-width80">{{$.i18n.Tr "repo.migrate_items_labels"}}</td> | |||
| <td class="ti-text-form-content"> | |||
| <div id="Label" style="overflow: hidden;width: 95%;"> | |||
| @@ -101,26 +101,59 @@ | |||
| </td> | |||
| </tr> | |||
| <tr> | |||
| <td class="ti-text-form-label text-width80">大小</td> | |||
| <td class="ti-text-form-label text-width80">{{$.i18n.Tr "repo.modelarts.model_size"}}</td> | |||
| <td class="ti-text-form-content word-elipsis"><span id="Size" title=""></span></td> | |||
| </tr> | |||
| <tr> | |||
| <td class="ti-text-form-label text-width80">创建时间</td> | |||
| <td class="ti-text-form-label text-width80">{{$.i18n.Tr "repo.modelarts.createtime"}}</td> | |||
| <td class="ti-text-form-content word-elipsis"><span id="CreateTime" title=""></span></td> | |||
| </tr> | |||
| <tr> | |||
| <td class="ti-text-form-label text-width80">模型描述</td> | |||
| <td class="ti-text-form-content" ><div id="edit-td" style="display:flex"><span id="Description" title="" class="iword-elipsis"></span><i id="edit-pencil" data-id="" data-desc="" class="pencil alternate icon" style="cursor:pointer;vertical-align: top;" id="editor" onclick="editorFn(this)"></div></td> | |||
| <td class="ti-text-form-label text-width80">{{$.i18n.Tr "repo.model.manage.description"}}</td> | |||
| <td class="ti-text-form-content" > | |||
| <div id="edit-td" style="display:flex"> | |||
| <span id="Description" title="" class="iword-elipsis"></span> | |||
| <i id="edit-pencil" data-id="" data-desc="" class="pencil alternate icon" style="cursor:pointer;vertical-align: top;" id="editor" onclick="editorFn(this)"></i> | |||
| </div> | |||
| </td> | |||
| </tr> | |||
| <tr> | |||
| <td class="ti-text-form-label text-width80">{{$.i18n.Tr "repo.modelarts.code_version"}}</td> | |||
| <td class="ti-text-form-content word-elipsis"><span id="CodeBranch" title=""></span></td> | |||
| </tr> | |||
| <tr> | |||
| <td class="ti-text-form-label text-width80">{{$.i18n.Tr "repo.modelarts.train_job.start_file"}}</td> | |||
| <td class="ti-text-form-content word-elipsis"><span id="BootFile" title=""></span></td> | |||
| </tr> | |||
| <tr> | |||
| <td class="ti-text-form-label text-width80">{{$.i18n.Tr "repo.modelarts.train_job.train_dataset"}}</td> | |||
| <td class="ti-text-form-content word-elipsis"><span id="DatasetName" title=""></span></td> | |||
| </tr> | |||
| <tr> | |||
| <td class="ti-text-form-label text-width80">{{$.i18n.Tr "repo.modelarts.train_job.run_parameter"}}</td> | |||
| <td class="ti-text-form-content word-elipsis"><span id="Parameters" title=""></span></td> | |||
| </tr> | |||
| <tr> | |||
| <td class="ti-text-form-label text-width80">{{$.i18n.Tr "repo.modelarts.train_job.AI_driver"}}</td> | |||
| <td class="ti-text-form-content word-elipsis"><span id="EngineName" title=""></span></td> | |||
| </tr> | |||
| <tr> | |||
| <td class="ti-text-form-label text-width80">{{$.i18n.Tr "repo.modelarts.train_job.standard"}}</td> | |||
| <td class="ti-text-form-content word-elipsis"><span id="FlavorName" title=""></span></td> | |||
| </tr> | |||
| <tr> | |||
| <td class="ti-text-form-label text-width80">{{$.i18n.Tr "repo.modelarts.train_job.compute_node"}}</td> | |||
| <td class="ti-text-form-content word-elipsis"><span id="WorkServerNumber" title=""></span></td> | |||
| </tr> | |||
| </tbody> | |||
| </table> | |||
| </div> | |||
| <div class="half-table"> | |||
| <span class="model_header_text">模型精度</span> | |||
| <span class="model_header_text">{{$.i18n.Tr "repo.model.manage.model_accuracy"}}</span> | |||
| <table class="tableStyle" style="margin-top:20px;"> | |||
| <tbody> | |||
| <tr> | |||
| <td class="ti-text-form-label text-width80">准确率</td> | |||
| <td class="ti-text-form-label text-width80">{{$.i18n.Tr "repo.model.manage.Accuracy"}}</td> | |||
| <td class="ti-text-form-content word-elipsis"><span id="Accuracy" title=""></span></td> | |||
| </tr> | |||
| <tr> | |||
| @@ -128,11 +161,11 @@ | |||
| <td class="ti-text-form-content word-elipsis"><span id="F1" title=""></span></td> | |||
| </tr> | |||
| <tr> | |||
| <td class="ti-text-form-label text-width80">精确率</td> | |||
| <td class="ti-text-form-label text-width80">{{$.i18n.Tr "repo.model.manage.Precision"}}</td> | |||
| <td class="ti-text-form-content word-elipsis"><span id="Precision" title=""></span></td> | |||
| </tr> | |||
| <tr> | |||
| <td class="ti-text-form-label text-width80">召回率</td> | |||
| <td class="ti-text-form-label text-width80">{{$.i18n.Tr "repo.model.manage.Recall"}}</td> | |||
| <td class="ti-text-form-content word-elipsis"><span id="Recall" title=""></span></td> | |||
| </tr> | |||
| </tbody> | |||
| @@ -155,6 +188,7 @@ function changeInfo(version){ | |||
| let returnArray = [] | |||
| returnArray = transObj(versionData) | |||
| let [initObj,initModelAcc,id] = returnArray | |||
| editorCancel('','') | |||
| renderInfo(initObj,initModelAcc,id) | |||
| }) | |||
| } | |||
| @@ -175,8 +209,13 @@ function loadInfo(){ | |||
| }) | |||
| } | |||
| function transObj(data){ | |||
| let {ID,Name,Version,Label,Size,Description,CreatedUnix,Accuracy} = data[0] | |||
| let {ID,Name,Version,Label,Size,Description,CreatedUnix,Accuracy,CodeBranch,CodeCommitID,TrainTaskInfo} = data[0] | |||
| let modelAcc = JSON.parse(Accuracy) | |||
| TrainTaskInfo = JSON.parse(TrainTaskInfo) | |||
| // Parameters = JSON.parse(Parameters) | |||
| let {Parameters,EngineName} = TrainTaskInfo | |||
| Parameters = JSON.parse(Parameters) | |||
| Parameters = Parameters.parameter.length === 0 ? '--':Parameters.parameter | |||
| let size = tranSize(Size) | |||
| let time = transTime(CreatedUnix) | |||
| let initObj = { | |||
| @@ -186,6 +225,15 @@ function transObj(data){ | |||
| Size:size, | |||
| CreateTime:time, | |||
| Description:Description || '--', | |||
| CodeBranch:CodeBranch || '--', | |||
| CodeCommitID:CodeCommitID || '--', | |||
| BootFile:TrainTaskInfo.BootFile || '--', | |||
| DatasetName:TrainTaskInfo.DatasetName || '--', | |||
| Parameters:TrainTaskInfo.Parameters || '--', | |||
| FlavorName:TrainTaskInfo.FlavorName || '--', | |||
| WorkServerNumber:TrainTaskInfo.WorkServerNumber || '--', | |||
| Parameters:Parameters, | |||
| EngineName:EngineName, | |||
| } | |||
| let initModelAcc = { | |||
| Accuracy: modelAcc.Accuracy || '--', | |||
| @@ -221,15 +269,16 @@ function tranSize(value){ | |||
| function editorFn(context){ | |||
| let id= context.dataset.id | |||
| let text = context.dataset.desc | |||
| console.log(id,text) | |||
| $('#edit-td').replaceWith("<div id='edit-div' style='width:80%;display: inline-block;'><textarea id='textarea-value' value='' rows='3' maxlength='255' style='width:80%;' id='edit-text'>"+text+"</textarea><i class='check icon' style='color: #50d4ab;' onclick='editorSure(\"" + text + "\",\"" + id + "\")'></i><i class='times icon' style='color: #f66f6a;' onclick='editorCancel(\"" + text + "\",\"" + id + "\")'></i></div>"); | |||
| let textValue = text.replace(/enter;/g,'\r\n') | |||
| $('#edit-td').replaceWith(`<div id='edit-div' style='width:80%;display: inline-block;'><textarea id='textarea-value' value='' rows='3' maxlength='255' style='width:80%;white-space: nowrap;' id='edit-text'>${textValue}</textarea><i class='check icon' style='color: #50d4ab;' onclick='editorSure("${text}","${id}")'></i><i class='times icon' style='color: #f66f6a;' onclick='editorCancel("${text}","${id}")'></i></div>`); | |||
| } | |||
| function editorCancel(text,id){ | |||
| $('#edit-div').replaceWith(`<div id="edit-td" style="display:flex;"><span id="Description" title="${text}" class="iword-elipsis">${text}</span><i id="edit-pencil" data-id="${id}" data-desc="${text}" class="pencil alternate icon" style="cursor:pointer;vertical-align: top;" id="editor" onclick="editorFn(this)"></div>`) | |||
| let objkey = text.replace(/enter;/g,'\r\n') | |||
| $('#edit-div').replaceWith(`<div id="edit-td" style="display:flex;"><span id="Description" title="${objkey}" class="iword-elipsis">${objkey}</span><i id="edit-pencil" data-id="${id}" data-desc="${text}" class="pencil alternate icon" style="cursor:pointer;vertical-align: top;" id="editor" onclick="editorFn(this)"></div>`) | |||
| } | |||
| function editorSure(text,id){ | |||
| let description=$('#textarea-value').val() | |||
| let sourcetext = $('#textarea-value').val().replace(/\n/g,'enter;') | |||
| let data = { | |||
| ID:id, | |||
| Description:description | |||
| @@ -239,16 +288,18 @@ function editorSure(text,id){ | |||
| type:'PUT', | |||
| data:data | |||
| }).done((res)=>{ | |||
| $('#edit-div').replaceWith(`<div id="edit-td" style="display:flex;"><span id="Description" title="${description}" class="iword-elipsis">${description}</span><i id="edit-pencil" data-id="${id}" data-desc="${description}" class="pencil alternate icon" style="cursor:pointer;vertical-align: top;" id="editor" onclick="editorFn(this)"></div>`) | |||
| $('#edit-div').replaceWith(`<div id="edit-td" style="display:flex;"><span id="Description" title="${description}" class="iword-elipsis">${description}</span><i id="edit-pencil" data-id="${id}" data-desc="${sourcetext}" class="pencil alternate icon" style="cursor:pointer;vertical-align: top;" id="editor" onclick="editorFn(this)"></div>`) | |||
| }) | |||
| } | |||
| function renderInfo(obj,accObj,id){ | |||
| for(let key in obj){ | |||
| if(key==="Description"){ | |||
| let descriptionText=obj[key].replace(/\r\n|\n/g,'enter;') | |||
| $(`#${key}`).text(obj[key]) | |||
| $(`#${key}`).attr("title",obj[key]) | |||
| $('#edit-pencil').attr("data-id",id) | |||
| $('#edit-pencil').attr("data-desc",obj[key]) | |||
| $('#edit-pencil').attr("data-desc",descriptionText) | |||
| } | |||
| else if(key==="Label"){ | |||
| $('#Label').empty() | |||
| @@ -263,6 +314,27 @@ function renderInfo(obj,accObj,id){ | |||
| } | |||
| $('#Label').append(html) | |||
| } | |||
| } | |||
| else if(key==="CodeCommitID"){ | |||
| let codeCommit = obj[key].slice(0,10) | |||
| let html = `<a style="margin-left:1rem" class="ui label" title="${codeCommit}">${codeCommit}</a>` | |||
| $('#CodeBranch').append(html) | |||
| } | |||
| else if(key==="Parameters"){ | |||
| if(obj[key]==='--'){ | |||
| $(`#${key}`).text(obj[key]) | |||
| }else{ | |||
| const parameterArray = obj[key].map(element => { | |||
| let labelValue = `${element.label}=${element.value}` | |||
| return labelValue | |||
| }); | |||
| const parameter = parameterArray.join('; ') | |||
| $(`#${key}`).text(parameter) | |||
| $(`#${key}`).attr("title",parameter) | |||
| } | |||
| } | |||
| else{ | |||
| $(`#${key}`).text(obj[key]) | |||
| @@ -0,0 +1,25 @@ | |||
| # Compiled Object files, Static and Dynamic libs (Shared Objects) | |||
| *.o | |||
| *.a | |||
| *.so | |||
| # Folders | |||
| _obj | |||
| _test | |||
| # Architecture specific extensions/prefixes | |||
| *.[568vq] | |||
| [568vq].out | |||
| *.cgo1.go | |||
| *.cgo2.c | |||
| _cgo_defun.c | |||
| _cgo_gotypes.go | |||
| _cgo_export.* | |||
| _testmain.go | |||
| *.exe | |||
| .idea/ | |||
| *.iml | |||
| @@ -0,0 +1,19 @@ | |||
| language: go | |||
| sudo: false | |||
| matrix: | |||
| include: | |||
| - go: 1.7.x | |||
| - go: 1.8.x | |||
| - go: 1.9.x | |||
| - go: 1.10.x | |||
| - go: 1.11.x | |||
| - go: tip | |||
| allow_failures: | |||
| - go: tip | |||
| script: | |||
| - go get -t -v ./... | |||
| - diff -u <(echo -n) <(gofmt -d .) | |||
| - go vet $(go list ./... | grep -v /vendor/) | |||
| - go test -v -race ./... | |||
| @@ -0,0 +1,9 @@ | |||
| # This is the official list of Gorilla WebSocket authors for copyright | |||
| # purposes. | |||
| # | |||
| # Please keep the list sorted. | |||
| Gary Burd <gary@beagledreams.com> | |||
| Google LLC (https://opensource.google.com/) | |||
| Joachim Bauch <mail@joachim-bauch.de> | |||
| @@ -0,0 +1,22 @@ | |||
| Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. | |||
| Redistribution and use in source and binary forms, with or without | |||
| modification, are permitted provided that the following conditions are met: | |||
| Redistributions of source code must retain the above copyright notice, this | |||
| list of conditions and the following disclaimer. | |||
| Redistributions in binary form must reproduce the above copyright notice, | |||
| this list of conditions and the following disclaimer in the documentation | |||
| and/or other materials provided with the distribution. | |||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND | |||
| ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |||
| WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |||
| DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE | |||
| FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |||
| DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | |||
| SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | |||
| CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | |||
| OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| @@ -0,0 +1,64 @@ | |||
| # Gorilla WebSocket | |||
| Gorilla WebSocket is a [Go](http://golang.org/) implementation of the | |||
| [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. | |||
| [](https://travis-ci.org/gorilla/websocket) | |||
| [](https://godoc.org/github.com/gorilla/websocket) | |||
| ### Documentation | |||
| * [API Reference](http://godoc.org/github.com/gorilla/websocket) | |||
| * [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) | |||
| * [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) | |||
| * [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) | |||
| * [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) | |||
| ### Status | |||
| The Gorilla WebSocket package provides a complete and tested implementation of | |||
| the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The | |||
| package API is stable. | |||
| ### Installation | |||
| go get github.com/gorilla/websocket | |||
| ### Protocol Compliance | |||
| The Gorilla WebSocket package passes the server tests in the [Autobahn Test | |||
| Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn | |||
| subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). | |||
| ### Gorilla WebSocket compared with other packages | |||
| <table> | |||
| <tr> | |||
| <th></th> | |||
| <th><a href="http://godoc.org/github.com/gorilla/websocket">github.com/gorilla</a></th> | |||
| <th><a href="http://godoc.org/golang.org/x/net/websocket">golang.org/x/net</a></th> | |||
| </tr> | |||
| <tr> | |||
| <tr><td colspan="3"><a href="http://tools.ietf.org/html/rfc6455">RFC 6455</a> Features</td></tr> | |||
| <tr><td>Passes <a href="http://autobahn.ws/testsuite/">Autobahn Test Suite</a></td><td><a href="https://github.com/gorilla/websocket/tree/master/examples/autobahn">Yes</a></td><td>No</td></tr> | |||
| <tr><td>Receive <a href="https://tools.ietf.org/html/rfc6455#section-5.4">fragmented</a> message<td>Yes</td><td><a href="https://code.google.com/p/go/issues/detail?id=7632">No</a>, see note 1</td></tr> | |||
| <tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.1">close</a> message</td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td><a href="https://code.google.com/p/go/issues/detail?id=4588">No</a></td></tr> | |||
| <tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.2">pings</a> and receive <a href="https://tools.ietf.org/html/rfc6455#section-5.5.3">pongs</a></td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td>No</td></tr> | |||
| <tr><td>Get the <a href="https://tools.ietf.org/html/rfc6455#section-5.6">type</a> of a received data message</td><td>Yes</td><td>Yes, see note 2</td></tr> | |||
| <tr><td colspan="3">Other Features</tr></td> | |||
| <tr><td><a href="https://tools.ietf.org/html/rfc7692">Compression Extensions</a></td><td>Experimental</td><td>No</td></tr> | |||
| <tr><td>Read message using io.Reader</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextReader">Yes</a></td><td>No, see note 3</td></tr> | |||
| <tr><td>Write message using io.WriteCloser</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextWriter">Yes</a></td><td>No, see note 3</td></tr> | |||
| </table> | |||
| Notes: | |||
| 1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html). | |||
| 2. The application can get the type of a received data message by implementing | |||
| a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal) | |||
| function. | |||
| 3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries. | |||
| Read returns when the input buffer is full or a frame boundary is | |||
| encountered. Each call to Write sends a single frame message. The Gorilla | |||
| io.Reader and io.WriteCloser operate on a single WebSocket message. | |||
| @@ -0,0 +1,395 @@ | |||
| // Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package websocket | |||
| import ( | |||
| "bytes" | |||
| "context" | |||
| "crypto/tls" | |||
| "errors" | |||
| "io" | |||
| "io/ioutil" | |||
| "net" | |||
| "net/http" | |||
| "net/http/httptrace" | |||
| "net/url" | |||
| "strings" | |||
| "time" | |||
| ) | |||
| // ErrBadHandshake is returned when the server response to opening handshake is | |||
| // invalid. | |||
| var ErrBadHandshake = errors.New("websocket: bad handshake") | |||
| var errInvalidCompression = errors.New("websocket: invalid compression negotiation") | |||
| // NewClient creates a new client connection using the given net connection. | |||
| // The URL u specifies the host and request URI. Use requestHeader to specify | |||
| // the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies | |||
| // (Cookie). Use the response.Header to get the selected subprotocol | |||
| // (Sec-WebSocket-Protocol) and cookies (Set-Cookie). | |||
| // | |||
| // If the WebSocket handshake fails, ErrBadHandshake is returned along with a | |||
| // non-nil *http.Response so that callers can handle redirects, authentication, | |||
| // etc. | |||
| // | |||
| // Deprecated: Use Dialer instead. | |||
| func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { | |||
| d := Dialer{ | |||
| ReadBufferSize: readBufSize, | |||
| WriteBufferSize: writeBufSize, | |||
| NetDial: func(net, addr string) (net.Conn, error) { | |||
| return netConn, nil | |||
| }, | |||
| } | |||
| return d.Dial(u.String(), requestHeader) | |||
| } | |||
| // A Dialer contains options for connecting to WebSocket server. | |||
| type Dialer struct { | |||
| // NetDial specifies the dial function for creating TCP connections. If | |||
| // NetDial is nil, net.Dial is used. | |||
| NetDial func(network, addr string) (net.Conn, error) | |||
| // NetDialContext specifies the dial function for creating TCP connections. If | |||
| // NetDialContext is nil, net.DialContext is used. | |||
| NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) | |||
| // Proxy specifies a function to return a proxy for a given | |||
| // Request. If the function returns a non-nil error, the | |||
| // request is aborted with the provided error. | |||
| // If Proxy is nil or returns a nil *URL, no proxy is used. | |||
| Proxy func(*http.Request) (*url.URL, error) | |||
| // TLSClientConfig specifies the TLS configuration to use with tls.Client. | |||
| // If nil, the default configuration is used. | |||
| TLSClientConfig *tls.Config | |||
| // HandshakeTimeout specifies the duration for the handshake to complete. | |||
| HandshakeTimeout time.Duration | |||
| // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer | |||
| // size is zero, then a useful default size is used. The I/O buffer sizes | |||
| // do not limit the size of the messages that can be sent or received. | |||
| ReadBufferSize, WriteBufferSize int | |||
| // WriteBufferPool is a pool of buffers for write operations. If the value | |||
| // is not set, then write buffers are allocated to the connection for the | |||
| // lifetime of the connection. | |||
| // | |||
| // A pool is most useful when the application has a modest volume of writes | |||
| // across a large number of connections. | |||
| // | |||
| // Applications should use a single pool for each unique value of | |||
| // WriteBufferSize. | |||
| WriteBufferPool BufferPool | |||
| // Subprotocols specifies the client's requested subprotocols. | |||
| Subprotocols []string | |||
| // EnableCompression specifies if the client should attempt to negotiate | |||
| // per message compression (RFC 7692). Setting this value to true does not | |||
| // guarantee that compression will be supported. Currently only "no context | |||
| // takeover" modes are supported. | |||
| EnableCompression bool | |||
| // Jar specifies the cookie jar. | |||
| // If Jar is nil, cookies are not sent in requests and ignored | |||
| // in responses. | |||
| Jar http.CookieJar | |||
| } | |||
| // Dial creates a new client connection by calling DialContext with a background context. | |||
| func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { | |||
| return d.DialContext(context.Background(), urlStr, requestHeader) | |||
| } | |||
| var errMalformedURL = errors.New("malformed ws or wss URL") | |||
| func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { | |||
| hostPort = u.Host | |||
| hostNoPort = u.Host | |||
| if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { | |||
| hostNoPort = hostNoPort[:i] | |||
| } else { | |||
| switch u.Scheme { | |||
| case "wss": | |||
| hostPort += ":443" | |||
| case "https": | |||
| hostPort += ":443" | |||
| default: | |||
| hostPort += ":80" | |||
| } | |||
| } | |||
| return hostPort, hostNoPort | |||
| } | |||
| // DefaultDialer is a dialer with all fields set to the default values. | |||
| var DefaultDialer = &Dialer{ | |||
| Proxy: http.ProxyFromEnvironment, | |||
| HandshakeTimeout: 45 * time.Second, | |||
| } | |||
| // nilDialer is dialer to use when receiver is nil. | |||
| var nilDialer = *DefaultDialer | |||
| // DialContext creates a new client connection. Use requestHeader to specify the | |||
| // origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). | |||
| // Use the response.Header to get the selected subprotocol | |||
| // (Sec-WebSocket-Protocol) and cookies (Set-Cookie). | |||
| // | |||
| // The context will be used in the request and in the Dialer | |||
| // | |||
| // If the WebSocket handshake fails, ErrBadHandshake is returned along with a | |||
| // non-nil *http.Response so that callers can handle redirects, authentication, | |||
| // etcetera. The response body may not contain the entire response and does not | |||
| // need to be closed by the application. | |||
| func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { | |||
| if d == nil { | |||
| d = &nilDialer | |||
| } | |||
| challengeKey, err := generateChallengeKey() | |||
| if err != nil { | |||
| return nil, nil, err | |||
| } | |||
| u, err := url.Parse(urlStr) | |||
| if err != nil { | |||
| return nil, nil, err | |||
| } | |||
| switch u.Scheme { | |||
| case "ws": | |||
| u.Scheme = "http" | |||
| case "wss": | |||
| u.Scheme = "https" | |||
| default: | |||
| return nil, nil, errMalformedURL | |||
| } | |||
| if u.User != nil { | |||
| // User name and password are not allowed in websocket URIs. | |||
| return nil, nil, errMalformedURL | |||
| } | |||
| req := &http.Request{ | |||
| Method: "GET", | |||
| URL: u, | |||
| Proto: "HTTP/1.1", | |||
| ProtoMajor: 1, | |||
| ProtoMinor: 1, | |||
| Header: make(http.Header), | |||
| Host: u.Host, | |||
| } | |||
| req = req.WithContext(ctx) | |||
| // Set the cookies present in the cookie jar of the dialer | |||
| if d.Jar != nil { | |||
| for _, cookie := range d.Jar.Cookies(u) { | |||
| req.AddCookie(cookie) | |||
| } | |||
| } | |||
| // Set the request headers using the capitalization for names and values in | |||
| // RFC examples. Although the capitalization shouldn't matter, there are | |||
| // servers that depend on it. The Header.Set method is not used because the | |||
| // method canonicalizes the header names. | |||
| req.Header["Upgrade"] = []string{"websocket"} | |||
| req.Header["Connection"] = []string{"Upgrade"} | |||
| req.Header["Sec-WebSocket-Key"] = []string{challengeKey} | |||
| req.Header["Sec-WebSocket-Version"] = []string{"13"} | |||
| if len(d.Subprotocols) > 0 { | |||
| req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} | |||
| } | |||
| for k, vs := range requestHeader { | |||
| switch { | |||
| case k == "Host": | |||
| if len(vs) > 0 { | |||
| req.Host = vs[0] | |||
| } | |||
| case k == "Upgrade" || | |||
| k == "Connection" || | |||
| k == "Sec-Websocket-Key" || | |||
| k == "Sec-Websocket-Version" || | |||
| k == "Sec-Websocket-Extensions" || | |||
| (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): | |||
| return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) | |||
| case k == "Sec-Websocket-Protocol": | |||
| req.Header["Sec-WebSocket-Protocol"] = vs | |||
| default: | |||
| req.Header[k] = vs | |||
| } | |||
| } | |||
| if d.EnableCompression { | |||
| req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"} | |||
| } | |||
| if d.HandshakeTimeout != 0 { | |||
| var cancel func() | |||
| ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout) | |||
| defer cancel() | |||
| } | |||
| // Get network dial function. | |||
| var netDial func(network, add string) (net.Conn, error) | |||
| if d.NetDialContext != nil { | |||
| netDial = func(network, addr string) (net.Conn, error) { | |||
| return d.NetDialContext(ctx, network, addr) | |||
| } | |||
| } else if d.NetDial != nil { | |||
| netDial = d.NetDial | |||
| } else { | |||
| netDialer := &net.Dialer{} | |||
| netDial = func(network, addr string) (net.Conn, error) { | |||
| return netDialer.DialContext(ctx, network, addr) | |||
| } | |||
| } | |||
| // If needed, wrap the dial function to set the connection deadline. | |||
| if deadline, ok := ctx.Deadline(); ok { | |||
| forwardDial := netDial | |||
| netDial = func(network, addr string) (net.Conn, error) { | |||
| c, err := forwardDial(network, addr) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| err = c.SetDeadline(deadline) | |||
| if err != nil { | |||
| c.Close() | |||
| return nil, err | |||
| } | |||
| return c, nil | |||
| } | |||
| } | |||
| // If needed, wrap the dial function to connect through a proxy. | |||
| if d.Proxy != nil { | |||
| proxyURL, err := d.Proxy(req) | |||
| if err != nil { | |||
| return nil, nil, err | |||
| } | |||
| if proxyURL != nil { | |||
| dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) | |||
| if err != nil { | |||
| return nil, nil, err | |||
| } | |||
| netDial = dialer.Dial | |||
| } | |||
| } | |||
| hostPort, hostNoPort := hostPortNoPort(u) | |||
| trace := httptrace.ContextClientTrace(ctx) | |||
| if trace != nil && trace.GetConn != nil { | |||
| trace.GetConn(hostPort) | |||
| } | |||
| netConn, err := netDial("tcp", hostPort) | |||
| if trace != nil && trace.GotConn != nil { | |||
| trace.GotConn(httptrace.GotConnInfo{ | |||
| Conn: netConn, | |||
| }) | |||
| } | |||
| if err != nil { | |||
| return nil, nil, err | |||
| } | |||
| defer func() { | |||
| if netConn != nil { | |||
| netConn.Close() | |||
| } | |||
| }() | |||
| if u.Scheme == "https" { | |||
| cfg := cloneTLSConfig(d.TLSClientConfig) | |||
| if cfg.ServerName == "" { | |||
| cfg.ServerName = hostNoPort | |||
| } | |||
| tlsConn := tls.Client(netConn, cfg) | |||
| netConn = tlsConn | |||
| var err error | |||
| if trace != nil { | |||
| err = doHandshakeWithTrace(trace, tlsConn, cfg) | |||
| } else { | |||
| err = doHandshake(tlsConn, cfg) | |||
| } | |||
| if err != nil { | |||
| return nil, nil, err | |||
| } | |||
| } | |||
| conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil) | |||
| if err := req.Write(netConn); err != nil { | |||
| return nil, nil, err | |||
| } | |||
| if trace != nil && trace.GotFirstResponseByte != nil { | |||
| if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 { | |||
| trace.GotFirstResponseByte() | |||
| } | |||
| } | |||
| resp, err := http.ReadResponse(conn.br, req) | |||
| if err != nil { | |||
| return nil, nil, err | |||
| } | |||
| if d.Jar != nil { | |||
| if rc := resp.Cookies(); len(rc) > 0 { | |||
| d.Jar.SetCookies(u, rc) | |||
| } | |||
| } | |||
| if resp.StatusCode != 101 || | |||
| !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || | |||
| !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || | |||
| resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { | |||
| // Before closing the network connection on return from this | |||
| // function, slurp up some of the response to aid application | |||
| // debugging. | |||
| buf := make([]byte, 1024) | |||
| n, _ := io.ReadFull(resp.Body, buf) | |||
| resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) | |||
| return nil, resp, ErrBadHandshake | |||
| } | |||
| for _, ext := range parseExtensions(resp.Header) { | |||
| if ext[""] != "permessage-deflate" { | |||
| continue | |||
| } | |||
| _, snct := ext["server_no_context_takeover"] | |||
| _, cnct := ext["client_no_context_takeover"] | |||
| if !snct || !cnct { | |||
| return nil, resp, errInvalidCompression | |||
| } | |||
| conn.newCompressionWriter = compressNoContextTakeover | |||
| conn.newDecompressionReader = decompressNoContextTakeover | |||
| break | |||
| } | |||
| resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) | |||
| conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") | |||
| netConn.SetDeadline(time.Time{}) | |||
| netConn = nil // to avoid close in defer. | |||
| return conn, resp, nil | |||
| } | |||
| func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error { | |||
| if err := tlsConn.Handshake(); err != nil { | |||
| return err | |||
| } | |||
| if !cfg.InsecureSkipVerify { | |||
| if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| @@ -0,0 +1,16 @@ | |||
| // Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // +build go1.8 | |||
| package websocket | |||
| import "crypto/tls" | |||
| func cloneTLSConfig(cfg *tls.Config) *tls.Config { | |||
| if cfg == nil { | |||
| return &tls.Config{} | |||
| } | |||
| return cfg.Clone() | |||
| } | |||
| @@ -0,0 +1,38 @@ | |||
| // Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // +build !go1.8 | |||
| package websocket | |||
| import "crypto/tls" | |||
| // cloneTLSConfig clones all public fields except the fields | |||
| // SessionTicketsDisabled and SessionTicketKey. This avoids copying the | |||
| // sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a | |||
| // config in active use. | |||
| func cloneTLSConfig(cfg *tls.Config) *tls.Config { | |||
| if cfg == nil { | |||
| return &tls.Config{} | |||
| } | |||
| return &tls.Config{ | |||
| Rand: cfg.Rand, | |||
| Time: cfg.Time, | |||
| Certificates: cfg.Certificates, | |||
| NameToCertificate: cfg.NameToCertificate, | |||
| GetCertificate: cfg.GetCertificate, | |||
| RootCAs: cfg.RootCAs, | |||
| NextProtos: cfg.NextProtos, | |||
| ServerName: cfg.ServerName, | |||
| ClientAuth: cfg.ClientAuth, | |||
| ClientCAs: cfg.ClientCAs, | |||
| InsecureSkipVerify: cfg.InsecureSkipVerify, | |||
| CipherSuites: cfg.CipherSuites, | |||
| PreferServerCipherSuites: cfg.PreferServerCipherSuites, | |||
| ClientSessionCache: cfg.ClientSessionCache, | |||
| MinVersion: cfg.MinVersion, | |||
| MaxVersion: cfg.MaxVersion, | |||
| CurvePreferences: cfg.CurvePreferences, | |||
| } | |||
| } | |||
| @@ -0,0 +1,148 @@ | |||
| // Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package websocket | |||
| import ( | |||
| "compress/flate" | |||
| "errors" | |||
| "io" | |||
| "strings" | |||
| "sync" | |||
| ) | |||
| const ( | |||
| minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 | |||
| maxCompressionLevel = flate.BestCompression | |||
| defaultCompressionLevel = 1 | |||
| ) | |||
| var ( | |||
| flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool | |||
| flateReaderPool = sync.Pool{New: func() interface{} { | |||
| return flate.NewReader(nil) | |||
| }} | |||
| ) | |||
| func decompressNoContextTakeover(r io.Reader) io.ReadCloser { | |||
| const tail = | |||
| // Add four bytes as specified in RFC | |||
| "\x00\x00\xff\xff" + | |||
| // Add final block to squelch unexpected EOF error from flate reader. | |||
| "\x01\x00\x00\xff\xff" | |||
| fr, _ := flateReaderPool.Get().(io.ReadCloser) | |||
| fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) | |||
| return &flateReadWrapper{fr} | |||
| } | |||
| func isValidCompressionLevel(level int) bool { | |||
| return minCompressionLevel <= level && level <= maxCompressionLevel | |||
| } | |||
| func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { | |||
| p := &flateWriterPools[level-minCompressionLevel] | |||
| tw := &truncWriter{w: w} | |||
| fw, _ := p.Get().(*flate.Writer) | |||
| if fw == nil { | |||
| fw, _ = flate.NewWriter(tw, level) | |||
| } else { | |||
| fw.Reset(tw) | |||
| } | |||
| return &flateWriteWrapper{fw: fw, tw: tw, p: p} | |||
| } | |||
| // truncWriter is an io.Writer that writes all but the last four bytes of the | |||
| // stream to another io.Writer. | |||
| type truncWriter struct { | |||
| w io.WriteCloser | |||
| n int | |||
| p [4]byte | |||
| } | |||
| func (w *truncWriter) Write(p []byte) (int, error) { | |||
| n := 0 | |||
| // fill buffer first for simplicity. | |||
| if w.n < len(w.p) { | |||
| n = copy(w.p[w.n:], p) | |||
| p = p[n:] | |||
| w.n += n | |||
| if len(p) == 0 { | |||
| return n, nil | |||
| } | |||
| } | |||
| m := len(p) | |||
| if m > len(w.p) { | |||
| m = len(w.p) | |||
| } | |||
| if nn, err := w.w.Write(w.p[:m]); err != nil { | |||
| return n + nn, err | |||
| } | |||
| copy(w.p[:], w.p[m:]) | |||
| copy(w.p[len(w.p)-m:], p[len(p)-m:]) | |||
| nn, err := w.w.Write(p[:len(p)-m]) | |||
| return n + nn, err | |||
| } | |||
| type flateWriteWrapper struct { | |||
| fw *flate.Writer | |||
| tw *truncWriter | |||
| p *sync.Pool | |||
| } | |||
| func (w *flateWriteWrapper) Write(p []byte) (int, error) { | |||
| if w.fw == nil { | |||
| return 0, errWriteClosed | |||
| } | |||
| return w.fw.Write(p) | |||
| } | |||
| func (w *flateWriteWrapper) Close() error { | |||
| if w.fw == nil { | |||
| return errWriteClosed | |||
| } | |||
| err1 := w.fw.Flush() | |||
| w.p.Put(w.fw) | |||
| w.fw = nil | |||
| if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { | |||
| return errors.New("websocket: internal error, unexpected bytes at end of flate stream") | |||
| } | |||
| err2 := w.tw.w.Close() | |||
| if err1 != nil { | |||
| return err1 | |||
| } | |||
| return err2 | |||
| } | |||
| type flateReadWrapper struct { | |||
| fr io.ReadCloser | |||
| } | |||
| func (r *flateReadWrapper) Read(p []byte) (int, error) { | |||
| if r.fr == nil { | |||
| return 0, io.ErrClosedPipe | |||
| } | |||
| n, err := r.fr.Read(p) | |||
| if err == io.EOF { | |||
| // Preemptively place the reader back in the pool. This helps with | |||
| // scenarios where the application does not call NextReader() soon after | |||
| // this final read. | |||
| r.Close() | |||
| } | |||
| return n, err | |||
| } | |||
| func (r *flateReadWrapper) Close() error { | |||
| if r.fr == nil { | |||
| return io.ErrClosedPipe | |||
| } | |||
| err := r.fr.Close() | |||
| flateReaderPool.Put(r.fr) | |||
| r.fr = nil | |||
| return err | |||
| } | |||
| @@ -0,0 +1,15 @@ | |||
| // Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // +build go1.8 | |||
| package websocket | |||
| import "net" | |||
| func (c *Conn) writeBufs(bufs ...[]byte) error { | |||
| b := net.Buffers(bufs) | |||
| _, err := b.WriteTo(c.conn) | |||
| return err | |||
| } | |||
| @@ -0,0 +1,18 @@ | |||
| // Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // +build !go1.8 | |||
| package websocket | |||
| func (c *Conn) writeBufs(bufs ...[]byte) error { | |||
| for _, buf := range bufs { | |||
| if len(buf) > 0 { | |||
| if _, err := c.conn.Write(buf); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| @@ -0,0 +1,180 @@ | |||
| // Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // Package websocket implements the WebSocket protocol defined in RFC 6455. | |||
| // | |||
| // Overview | |||
| // | |||
| // The Conn type represents a WebSocket connection. A server application calls | |||
| // the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: | |||
| // | |||
| // var upgrader = websocket.Upgrader{ | |||
| // ReadBufferSize: 1024, | |||
| // WriteBufferSize: 1024, | |||
| // } | |||
| // | |||
| // func handler(w http.ResponseWriter, r *http.Request) { | |||
| // conn, err := upgrader.Upgrade(w, r, nil) | |||
| // if err != nil { | |||
| // log.Println(err) | |||
| // return | |||
| // } | |||
| // ... Use conn to send and receive messages. | |||
| // } | |||
| // | |||
| // Call the connection's WriteMessage and ReadMessage methods to send and | |||
| // receive messages as a slice of bytes. This snippet of code shows how to echo | |||
| // messages using these methods: | |||
| // | |||
| // for { | |||
| // messageType, p, err := conn.ReadMessage() | |||
| // if err != nil { | |||
| // log.Println(err) | |||
| // return | |||
| // } | |||
| // if err := conn.WriteMessage(messageType, p); err != nil { | |||
| // log.Println(err) | |||
| // return | |||
| // } | |||
| // } | |||
| // | |||
| // In above snippet of code, p is a []byte and messageType is an int with value | |||
| // websocket.BinaryMessage or websocket.TextMessage. | |||
| // | |||
| // An application can also send and receive messages using the io.WriteCloser | |||
| // and io.Reader interfaces. To send a message, call the connection NextWriter | |||
| // method to get an io.WriteCloser, write the message to the writer and close | |||
| // the writer when done. To receive a message, call the connection NextReader | |||
| // method to get an io.Reader and read until io.EOF is returned. This snippet | |||
| // shows how to echo messages using the NextWriter and NextReader methods: | |||
| // | |||
| // for { | |||
| // messageType, r, err := conn.NextReader() | |||
| // if err != nil { | |||
| // return | |||
| // } | |||
| // w, err := conn.NextWriter(messageType) | |||
| // if err != nil { | |||
| // return err | |||
| // } | |||
| // if _, err := io.Copy(w, r); err != nil { | |||
| // return err | |||
| // } | |||
| // if err := w.Close(); err != nil { | |||
| // return err | |||
| // } | |||
| // } | |||
| // | |||
| // Data Messages | |||
| // | |||
| // The WebSocket protocol distinguishes between text and binary data messages. | |||
| // Text messages are interpreted as UTF-8 encoded text. The interpretation of | |||
| // binary messages is left to the application. | |||
| // | |||
| // This package uses the TextMessage and BinaryMessage integer constants to | |||
| // identify the two data message types. The ReadMessage and NextReader methods | |||
| // return the type of the received message. The messageType argument to the | |||
| // WriteMessage and NextWriter methods specifies the type of a sent message. | |||
| // | |||
| // It is the application's responsibility to ensure that text messages are | |||
| // valid UTF-8 encoded text. | |||
| // | |||
| // Control Messages | |||
| // | |||
| // The WebSocket protocol defines three types of control messages: close, ping | |||
| // and pong. Call the connection WriteControl, WriteMessage or NextWriter | |||
| // methods to send a control message to the peer. | |||
| // | |||
| // Connections handle received close messages by calling the handler function | |||
| // set with the SetCloseHandler method and by returning a *CloseError from the | |||
| // NextReader, ReadMessage or the message Read method. The default close | |||
| // handler sends a close message to the peer. | |||
| // | |||
| // Connections handle received ping messages by calling the handler function | |||
| // set with the SetPingHandler method. The default ping handler sends a pong | |||
| // message to the peer. | |||
| // | |||
| // Connections handle received pong messages by calling the handler function | |||
| // set with the SetPongHandler method. The default pong handler does nothing. | |||
| // If an application sends ping messages, then the application should set a | |||
| // pong handler to receive the corresponding pong. | |||
| // | |||
| // The control message handler functions are called from the NextReader, | |||
| // ReadMessage and message reader Read methods. The default close and ping | |||
| // handlers can block these methods for a short time when the handler writes to | |||
| // the connection. | |||
| // | |||
| // The application must read the connection to process close, ping and pong | |||
| // messages sent from the peer. If the application is not otherwise interested | |||
| // in messages from the peer, then the application should start a goroutine to | |||
| // read and discard messages from the peer. A simple example is: | |||
| // | |||
| // func readLoop(c *websocket.Conn) { | |||
| // for { | |||
| // if _, _, err := c.NextReader(); err != nil { | |||
| // c.Close() | |||
| // break | |||
| // } | |||
| // } | |||
| // } | |||
| // | |||
| // Concurrency | |||
| // | |||
| // Connections support one concurrent reader and one concurrent writer. | |||
| // | |||
| // Applications are responsible for ensuring that no more than one goroutine | |||
| // calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, | |||
| // WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and | |||
| // that no more than one goroutine calls the read methods (NextReader, | |||
| // SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) | |||
| // concurrently. | |||
| // | |||
| // The Close and WriteControl methods can be called concurrently with all other | |||
| // methods. | |||
| // | |||
| // Origin Considerations | |||
| // | |||
| // Web browsers allow Javascript applications to open a WebSocket connection to | |||
| // any host. It's up to the server to enforce an origin policy using the Origin | |||
| // request header sent by the browser. | |||
| // | |||
| // The Upgrader calls the function specified in the CheckOrigin field to check | |||
| // the origin. If the CheckOrigin function returns false, then the Upgrade | |||
| // method fails the WebSocket handshake with HTTP status 403. | |||
| // | |||
| // If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail | |||
| // the handshake if the Origin request header is present and the Origin host is | |||
| // not equal to the Host request header. | |||
| // | |||
| // The deprecated package-level Upgrade function does not perform origin | |||
| // checking. The application is responsible for checking the Origin header | |||
| // before calling the Upgrade function. | |||
| // | |||
| // Compression EXPERIMENTAL | |||
| // | |||
| // Per message compression extensions (RFC 7692) are experimentally supported | |||
| // by this package in a limited capacity. Setting the EnableCompression option | |||
| // to true in Dialer or Upgrader will attempt to negotiate per message deflate | |||
| // support. | |||
| // | |||
| // var upgrader = websocket.Upgrader{ | |||
| // EnableCompression: true, | |||
| // } | |||
| // | |||
| // If compression was successfully negotiated with the connection's peer, any | |||
| // message received in compressed form will be automatically decompressed. | |||
| // All Read methods will return uncompressed bytes. | |||
| // | |||
| // Per message compression of messages written to a connection can be enabled | |||
| // or disabled by calling the corresponding Conn method: | |||
| // | |||
| // conn.EnableWriteCompression(false) | |||
| // | |||
| // Currently this package does not support compression with "context takeover". | |||
| // This means that messages must be compressed and decompressed in isolation, | |||
| // without retaining sliding window or dictionary state across messages. For | |||
| // more details refer to RFC 7692. | |||
| // | |||
| // Use of compression is experimental and may result in decreased performance. | |||
| package websocket | |||
| @@ -0,0 +1,60 @@ | |||
| // Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package websocket | |||
| import ( | |||
| "encoding/json" | |||
| "io" | |||
| ) | |||
| // WriteJSON writes the JSON encoding of v as a message. | |||
| // | |||
| // Deprecated: Use c.WriteJSON instead. | |||
| func WriteJSON(c *Conn, v interface{}) error { | |||
| return c.WriteJSON(v) | |||
| } | |||
| // WriteJSON writes the JSON encoding of v as a message. | |||
| // | |||
| // See the documentation for encoding/json Marshal for details about the | |||
| // conversion of Go values to JSON. | |||
| func (c *Conn) WriteJSON(v interface{}) error { | |||
| w, err := c.NextWriter(TextMessage) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| err1 := json.NewEncoder(w).Encode(v) | |||
| err2 := w.Close() | |||
| if err1 != nil { | |||
| return err1 | |||
| } | |||
| return err2 | |||
| } | |||
| // ReadJSON reads the next JSON-encoded message from the connection and stores | |||
| // it in the value pointed to by v. | |||
| // | |||
| // Deprecated: Use c.ReadJSON instead. | |||
| func ReadJSON(c *Conn, v interface{}) error { | |||
| return c.ReadJSON(v) | |||
| } | |||
| // ReadJSON reads the next JSON-encoded message from the connection and stores | |||
| // it in the value pointed to by v. | |||
| // | |||
| // See the documentation for the encoding/json Unmarshal function for details | |||
| // about the conversion of JSON to a Go value. | |||
| func (c *Conn) ReadJSON(v interface{}) error { | |||
| _, r, err := c.NextReader() | |||
| if err != nil { | |||
| return err | |||
| } | |||
| err = json.NewDecoder(r).Decode(v) | |||
| if err == io.EOF { | |||
| // One value is expected in the message. | |||
| err = io.ErrUnexpectedEOF | |||
| } | |||
| return err | |||
| } | |||
| @@ -0,0 +1,54 @@ | |||
| // Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of | |||
| // this source code is governed by a BSD-style license that can be found in the | |||
| // LICENSE file. | |||
| // +build !appengine | |||
| package websocket | |||
| import "unsafe" | |||
| const wordSize = int(unsafe.Sizeof(uintptr(0))) | |||
| func maskBytes(key [4]byte, pos int, b []byte) int { | |||
| // Mask one byte at a time for small buffers. | |||
| if len(b) < 2*wordSize { | |||
| for i := range b { | |||
| b[i] ^= key[pos&3] | |||
| pos++ | |||
| } | |||
| return pos & 3 | |||
| } | |||
| // Mask one byte at a time to word boundary. | |||
| if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { | |||
| n = wordSize - n | |||
| for i := range b[:n] { | |||
| b[i] ^= key[pos&3] | |||
| pos++ | |||
| } | |||
| b = b[n:] | |||
| } | |||
| // Create aligned word size key. | |||
| var k [wordSize]byte | |||
| for i := range k { | |||
| k[i] = key[(pos+i)&3] | |||
| } | |||
| kw := *(*uintptr)(unsafe.Pointer(&k)) | |||
| // Mask one word at a time. | |||
| n := (len(b) / wordSize) * wordSize | |||
| for i := 0; i < n; i += wordSize { | |||
| *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw | |||
| } | |||
| // Mask one byte at a time for remaining bytes. | |||
| b = b[n:] | |||
| for i := range b { | |||
| b[i] ^= key[pos&3] | |||
| pos++ | |||
| } | |||
| return pos & 3 | |||
| } | |||
| @@ -0,0 +1,15 @@ | |||
| // Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of | |||
| // this source code is governed by a BSD-style license that can be found in the | |||
| // LICENSE file. | |||
| // +build appengine | |||
| package websocket | |||
| func maskBytes(key [4]byte, pos int, b []byte) int { | |||
| for i := range b { | |||
| b[i] ^= key[pos&3] | |||
| pos++ | |||
| } | |||
| return pos & 3 | |||
| } | |||
| @@ -0,0 +1,102 @@ | |||
| // Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package websocket | |||
| import ( | |||
| "bytes" | |||
| "net" | |||
| "sync" | |||
| "time" | |||
| ) | |||
| // PreparedMessage caches on the wire representations of a message payload. | |||
| // Use PreparedMessage to efficiently send a message payload to multiple | |||
| // connections. PreparedMessage is especially useful when compression is used | |||
| // because the CPU and memory expensive compression operation can be executed | |||
| // once for a given set of compression options. | |||
| type PreparedMessage struct { | |||
| messageType int | |||
| data []byte | |||
| mu sync.Mutex | |||
| frames map[prepareKey]*preparedFrame | |||
| } | |||
| // prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. | |||
| type prepareKey struct { | |||
| isServer bool | |||
| compress bool | |||
| compressionLevel int | |||
| } | |||
| // preparedFrame contains data in wire representation. | |||
| type preparedFrame struct { | |||
| once sync.Once | |||
| data []byte | |||
| } | |||
| // NewPreparedMessage returns an initialized PreparedMessage. You can then send | |||
| // it to connection using WritePreparedMessage method. Valid wire | |||
| // representation will be calculated lazily only once for a set of current | |||
| // connection options. | |||
| func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { | |||
| pm := &PreparedMessage{ | |||
| messageType: messageType, | |||
| frames: make(map[prepareKey]*preparedFrame), | |||
| data: data, | |||
| } | |||
| // Prepare a plain server frame. | |||
| _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| // To protect against caller modifying the data argument, remember the data | |||
| // copied to the plain server frame. | |||
| pm.data = frameData[len(frameData)-len(data):] | |||
| return pm, nil | |||
| } | |||
| func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { | |||
| pm.mu.Lock() | |||
| frame, ok := pm.frames[key] | |||
| if !ok { | |||
| frame = &preparedFrame{} | |||
| pm.frames[key] = frame | |||
| } | |||
| pm.mu.Unlock() | |||
| var err error | |||
| frame.once.Do(func() { | |||
| // Prepare a frame using a 'fake' connection. | |||
| // TODO: Refactor code in conn.go to allow more direct construction of | |||
| // the frame. | |||
| mu := make(chan bool, 1) | |||
| mu <- true | |||
| var nc prepareConn | |||
| c := &Conn{ | |||
| conn: &nc, | |||
| mu: mu, | |||
| isServer: key.isServer, | |||
| compressionLevel: key.compressionLevel, | |||
| enableWriteCompression: true, | |||
| writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), | |||
| } | |||
| if key.compress { | |||
| c.newCompressionWriter = compressNoContextTakeover | |||
| } | |||
| err = c.WriteMessage(pm.messageType, pm.data) | |||
| frame.data = nc.buf.Bytes() | |||
| }) | |||
| return pm.messageType, frame.data, err | |||
| } | |||
| type prepareConn struct { | |||
| buf bytes.Buffer | |||
| net.Conn | |||
| } | |||
| func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } | |||
| func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } | |||
| @@ -0,0 +1,77 @@ | |||
| // Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package websocket | |||
| import ( | |||
| "bufio" | |||
| "encoding/base64" | |||
| "errors" | |||
| "net" | |||
| "net/http" | |||
| "net/url" | |||
| "strings" | |||
| ) | |||
| type netDialerFunc func(network, addr string) (net.Conn, error) | |||
| func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { | |||
| return fn(network, addr) | |||
| } | |||
| func init() { | |||
| proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { | |||
| return &httpProxyDialer{proxyURL: proxyURL, fowardDial: forwardDialer.Dial}, nil | |||
| }) | |||
| } | |||
| type httpProxyDialer struct { | |||
| proxyURL *url.URL | |||
| fowardDial func(network, addr string) (net.Conn, error) | |||
| } | |||
| func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { | |||
| hostPort, _ := hostPortNoPort(hpd.proxyURL) | |||
| conn, err := hpd.fowardDial(network, hostPort) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| connectHeader := make(http.Header) | |||
| if user := hpd.proxyURL.User; user != nil { | |||
| proxyUser := user.Username() | |||
| if proxyPassword, passwordSet := user.Password(); passwordSet { | |||
| credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) | |||
| connectHeader.Set("Proxy-Authorization", "Basic "+credential) | |||
| } | |||
| } | |||
| connectReq := &http.Request{ | |||
| Method: "CONNECT", | |||
| URL: &url.URL{Opaque: addr}, | |||
| Host: addr, | |||
| Header: connectHeader, | |||
| } | |||
| if err := connectReq.Write(conn); err != nil { | |||
| conn.Close() | |||
| return nil, err | |||
| } | |||
| // Read response. It's OK to use and discard buffered reader here becaue | |||
| // the remote server does not speak until spoken to. | |||
| br := bufio.NewReader(conn) | |||
| resp, err := http.ReadResponse(br, connectReq) | |||
| if err != nil { | |||
| conn.Close() | |||
| return nil, err | |||
| } | |||
| if resp.StatusCode != 200 { | |||
| conn.Close() | |||
| f := strings.SplitN(resp.Status, " ", 2) | |||
| return nil, errors.New(f[1]) | |||
| } | |||
| return conn, nil | |||
| } | |||
| @@ -0,0 +1,363 @@ | |||
| // Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package websocket | |||
| import ( | |||
| "bufio" | |||
| "errors" | |||
| "io" | |||
| "net/http" | |||
| "net/url" | |||
| "strings" | |||
| "time" | |||
| ) | |||
| // HandshakeError describes an error with the handshake from the peer. | |||
| type HandshakeError struct { | |||
| message string | |||
| } | |||
| func (e HandshakeError) Error() string { return e.message } | |||
| // Upgrader specifies parameters for upgrading an HTTP connection to a | |||
| // WebSocket connection. | |||
| type Upgrader struct { | |||
| // HandshakeTimeout specifies the duration for the handshake to complete. | |||
| HandshakeTimeout time.Duration | |||
| // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer | |||
| // size is zero, then buffers allocated by the HTTP server are used. The | |||
| // I/O buffer sizes do not limit the size of the messages that can be sent | |||
| // or received. | |||
| ReadBufferSize, WriteBufferSize int | |||
| // WriteBufferPool is a pool of buffers for write operations. If the value | |||
| // is not set, then write buffers are allocated to the connection for the | |||
| // lifetime of the connection. | |||
| // | |||
| // A pool is most useful when the application has a modest volume of writes | |||
| // across a large number of connections. | |||
| // | |||
| // Applications should use a single pool for each unique value of | |||
| // WriteBufferSize. | |||
| WriteBufferPool BufferPool | |||
| // Subprotocols specifies the server's supported protocols in order of | |||
| // preference. If this field is not nil, then the Upgrade method negotiates a | |||
| // subprotocol by selecting the first match in this list with a protocol | |||
| // requested by the client. If there's no match, then no protocol is | |||
| // negotiated (the Sec-Websocket-Protocol header is not included in the | |||
| // handshake response). | |||
| Subprotocols []string | |||
| // Error specifies the function for generating HTTP error responses. If Error | |||
| // is nil, then http.Error is used to generate the HTTP response. | |||
| Error func(w http.ResponseWriter, r *http.Request, status int, reason error) | |||
| // CheckOrigin returns true if the request Origin header is acceptable. If | |||
| // CheckOrigin is nil, then a safe default is used: return false if the | |||
| // Origin request header is present and the origin host is not equal to | |||
| // request Host header. | |||
| // | |||
| // A CheckOrigin function should carefully validate the request origin to | |||
| // prevent cross-site request forgery. | |||
| CheckOrigin func(r *http.Request) bool | |||
| // EnableCompression specify if the server should attempt to negotiate per | |||
| // message compression (RFC 7692). Setting this value to true does not | |||
| // guarantee that compression will be supported. Currently only "no context | |||
| // takeover" modes are supported. | |||
| EnableCompression bool | |||
| } | |||
| func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { | |||
| err := HandshakeError{reason} | |||
| if u.Error != nil { | |||
| u.Error(w, r, status, err) | |||
| } else { | |||
| w.Header().Set("Sec-Websocket-Version", "13") | |||
| http.Error(w, http.StatusText(status), status) | |||
| } | |||
| return nil, err | |||
| } | |||
| // checkSameOrigin returns true if the origin is not set or is equal to the request host. | |||
| func checkSameOrigin(r *http.Request) bool { | |||
| origin := r.Header["Origin"] | |||
| if len(origin) == 0 { | |||
| return true | |||
| } | |||
| u, err := url.Parse(origin[0]) | |||
| if err != nil { | |||
| return false | |||
| } | |||
| return equalASCIIFold(u.Host, r.Host) | |||
| } | |||
| func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { | |||
| if u.Subprotocols != nil { | |||
| clientProtocols := Subprotocols(r) | |||
| for _, serverProtocol := range u.Subprotocols { | |||
| for _, clientProtocol := range clientProtocols { | |||
| if clientProtocol == serverProtocol { | |||
| return clientProtocol | |||
| } | |||
| } | |||
| } | |||
| } else if responseHeader != nil { | |||
| return responseHeader.Get("Sec-Websocket-Protocol") | |||
| } | |||
| return "" | |||
| } | |||
| // Upgrade upgrades the HTTP server connection to the WebSocket protocol. | |||
| // | |||
| // The responseHeader is included in the response to the client's upgrade | |||
| // request. Use the responseHeader to specify cookies (Set-Cookie) and the | |||
| // application negotiated subprotocol (Sec-WebSocket-Protocol). | |||
| // | |||
| // If the upgrade fails, then Upgrade replies to the client with an HTTP error | |||
| // response. | |||
| func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { | |||
| const badHandshake = "websocket: the client is not using the websocket protocol: " | |||
| if !tokenListContainsValue(r.Header, "Connection", "upgrade") { | |||
| return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header") | |||
| } | |||
| if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { | |||
| return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") | |||
| } | |||
| if r.Method != "GET" { | |||
| return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") | |||
| } | |||
| if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { | |||
| return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") | |||
| } | |||
| if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { | |||
| return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported") | |||
| } | |||
| checkOrigin := u.CheckOrigin | |||
| if checkOrigin == nil { | |||
| checkOrigin = checkSameOrigin | |||
| } | |||
| if !checkOrigin(r) { | |||
| return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin") | |||
| } | |||
| challengeKey := r.Header.Get("Sec-Websocket-Key") | |||
| if challengeKey == "" { | |||
| return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-WebSocket-Key' header is missing or blank") | |||
| } | |||
| subprotocol := u.selectSubprotocol(r, responseHeader) | |||
| // Negotiate PMCE | |||
| var compress bool | |||
| if u.EnableCompression { | |||
| for _, ext := range parseExtensions(r.Header) { | |||
| if ext[""] != "permessage-deflate" { | |||
| continue | |||
| } | |||
| compress = true | |||
| break | |||
| } | |||
| } | |||
| h, ok := w.(http.Hijacker) | |||
| if !ok { | |||
| return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") | |||
| } | |||
| var brw *bufio.ReadWriter | |||
| netConn, brw, err := h.Hijack() | |||
| if err != nil { | |||
| return u.returnError(w, r, http.StatusInternalServerError, err.Error()) | |||
| } | |||
| if brw.Reader.Buffered() > 0 { | |||
| netConn.Close() | |||
| return nil, errors.New("websocket: client sent data before handshake is complete") | |||
| } | |||
| var br *bufio.Reader | |||
| if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 { | |||
| // Reuse hijacked buffered reader as connection reader. | |||
| br = brw.Reader | |||
| } | |||
| buf := bufioWriterBuffer(netConn, brw.Writer) | |||
| var writeBuf []byte | |||
| if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 { | |||
| // Reuse hijacked write buffer as connection buffer. | |||
| writeBuf = buf | |||
| } | |||
| c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf) | |||
| c.subprotocol = subprotocol | |||
| if compress { | |||
| c.newCompressionWriter = compressNoContextTakeover | |||
| c.newDecompressionReader = decompressNoContextTakeover | |||
| } | |||
| // Use larger of hijacked buffer and connection write buffer for header. | |||
| p := buf | |||
| if len(c.writeBuf) > len(p) { | |||
| p = c.writeBuf | |||
| } | |||
| p = p[:0] | |||
| p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) | |||
| p = append(p, computeAcceptKey(challengeKey)...) | |||
| p = append(p, "\r\n"...) | |||
| if c.subprotocol != "" { | |||
| p = append(p, "Sec-WebSocket-Protocol: "...) | |||
| p = append(p, c.subprotocol...) | |||
| p = append(p, "\r\n"...) | |||
| } | |||
| if compress { | |||
| p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) | |||
| } | |||
| for k, vs := range responseHeader { | |||
| if k == "Sec-Websocket-Protocol" { | |||
| continue | |||
| } | |||
| for _, v := range vs { | |||
| p = append(p, k...) | |||
| p = append(p, ": "...) | |||
| for i := 0; i < len(v); i++ { | |||
| b := v[i] | |||
| if b <= 31 { | |||
| // prevent response splitting. | |||
| b = ' ' | |||
| } | |||
| p = append(p, b) | |||
| } | |||
| p = append(p, "\r\n"...) | |||
| } | |||
| } | |||
| p = append(p, "\r\n"...) | |||
| // Clear deadlines set by HTTP server. | |||
| netConn.SetDeadline(time.Time{}) | |||
| if u.HandshakeTimeout > 0 { | |||
| netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) | |||
| } | |||
| if _, err = netConn.Write(p); err != nil { | |||
| netConn.Close() | |||
| return nil, err | |||
| } | |||
| if u.HandshakeTimeout > 0 { | |||
| netConn.SetWriteDeadline(time.Time{}) | |||
| } | |||
| return c, nil | |||
| } | |||
| // Upgrade upgrades the HTTP server connection to the WebSocket protocol. | |||
| // | |||
| // Deprecated: Use websocket.Upgrader instead. | |||
| // | |||
| // Upgrade does not perform origin checking. The application is responsible for | |||
| // checking the Origin header before calling Upgrade. An example implementation | |||
| // of the same origin policy check is: | |||
| // | |||
| // if req.Header.Get("Origin") != "http://"+req.Host { | |||
| // http.Error(w, "Origin not allowed", http.StatusForbidden) | |||
| // return | |||
| // } | |||
| // | |||
| // If the endpoint supports subprotocols, then the application is responsible | |||
| // for negotiating the protocol used on the connection. Use the Subprotocols() | |||
| // function to get the subprotocols requested by the client. Use the | |||
| // Sec-Websocket-Protocol response header to specify the subprotocol selected | |||
| // by the application. | |||
| // | |||
| // The responseHeader is included in the response to the client's upgrade | |||
| // request. Use the responseHeader to specify cookies (Set-Cookie) and the | |||
| // negotiated subprotocol (Sec-Websocket-Protocol). | |||
| // | |||
| // The connection buffers IO to the underlying network connection. The | |||
| // readBufSize and writeBufSize parameters specify the size of the buffers to | |||
| // use. Messages can be larger than the buffers. | |||
| // | |||
| // If the request is not a valid WebSocket handshake, then Upgrade returns an | |||
| // error of type HandshakeError. Applications should handle this error by | |||
| // replying to the client with an HTTP error response. | |||
| func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { | |||
| u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} | |||
| u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { | |||
| // don't return errors to maintain backwards compatibility | |||
| } | |||
| u.CheckOrigin = func(r *http.Request) bool { | |||
| // allow all connections by default | |||
| return true | |||
| } | |||
| return u.Upgrade(w, r, responseHeader) | |||
| } | |||
| // Subprotocols returns the subprotocols requested by the client in the | |||
| // Sec-Websocket-Protocol header. | |||
| func Subprotocols(r *http.Request) []string { | |||
| h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) | |||
| if h == "" { | |||
| return nil | |||
| } | |||
| protocols := strings.Split(h, ",") | |||
| for i := range protocols { | |||
| protocols[i] = strings.TrimSpace(protocols[i]) | |||
| } | |||
| return protocols | |||
| } | |||
| // IsWebSocketUpgrade returns true if the client requested upgrade to the | |||
| // WebSocket protocol. | |||
| func IsWebSocketUpgrade(r *http.Request) bool { | |||
| return tokenListContainsValue(r.Header, "Connection", "upgrade") && | |||
| tokenListContainsValue(r.Header, "Upgrade", "websocket") | |||
| } | |||
| // bufioReaderSize size returns the size of a bufio.Reader. | |||
| func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int { | |||
| // This code assumes that peek on a reset reader returns | |||
| // bufio.Reader.buf[:0]. | |||
| // TODO: Use bufio.Reader.Size() after Go 1.10 | |||
| br.Reset(originalReader) | |||
| if p, err := br.Peek(0); err == nil { | |||
| return cap(p) | |||
| } | |||
| return 0 | |||
| } | |||
| // writeHook is an io.Writer that records the last slice passed to it vio | |||
| // io.Writer.Write. | |||
| type writeHook struct { | |||
| p []byte | |||
| } | |||
| func (wh *writeHook) Write(p []byte) (int, error) { | |||
| wh.p = p | |||
| return len(p), nil | |||
| } | |||
| // bufioWriterBuffer grabs the buffer from a bufio.Writer. | |||
| func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { | |||
| // This code assumes that bufio.Writer.buf[:1] is passed to the | |||
| // bufio.Writer's underlying writer. | |||
| var wh writeHook | |||
| bw.Reset(&wh) | |||
| bw.WriteByte(0) | |||
| bw.Flush() | |||
| bw.Reset(originalWriter) | |||
| return wh.p[:cap(wh.p)] | |||
| } | |||
| @@ -0,0 +1,19 @@ | |||
| // +build go1.8 | |||
| package websocket | |||
| import ( | |||
| "crypto/tls" | |||
| "net/http/httptrace" | |||
| ) | |||
| func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error { | |||
| if trace.TLSHandshakeStart != nil { | |||
| trace.TLSHandshakeStart() | |||
| } | |||
| err := doHandshake(tlsConn, cfg) | |||
| if trace.TLSHandshakeDone != nil { | |||
| trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) | |||
| } | |||
| return err | |||
| } | |||
| @@ -0,0 +1,12 @@ | |||
| // +build !go1.8 | |||
| package websocket | |||
| import ( | |||
| "crypto/tls" | |||
| "net/http/httptrace" | |||
| ) | |||
| func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error { | |||
| return doHandshake(tlsConn, cfg) | |||
| } | |||
| @@ -0,0 +1,237 @@ | |||
| // Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package websocket | |||
| import ( | |||
| "crypto/rand" | |||
| "crypto/sha1" | |||
| "encoding/base64" | |||
| "io" | |||
| "net/http" | |||
| "strings" | |||
| "unicode/utf8" | |||
| ) | |||
| var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") | |||
| func computeAcceptKey(challengeKey string) string { | |||
| h := sha1.New() | |||
| h.Write([]byte(challengeKey)) | |||
| h.Write(keyGUID) | |||
| return base64.StdEncoding.EncodeToString(h.Sum(nil)) | |||
| } | |||
| func generateChallengeKey() (string, error) { | |||
| p := make([]byte, 16) | |||
| if _, err := io.ReadFull(rand.Reader, p); err != nil { | |||
| return "", err | |||
| } | |||
| return base64.StdEncoding.EncodeToString(p), nil | |||
| } | |||
| // Octet types from RFC 2616. | |||
| var octetTypes [256]byte | |||
| const ( | |||
| isTokenOctet = 1 << iota | |||
| isSpaceOctet | |||
| ) | |||
| func init() { | |||
| // From RFC 2616 | |||
| // | |||
| // OCTET = <any 8-bit sequence of data> | |||
| // CHAR = <any US-ASCII character (octets 0 - 127)> | |||
| // CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)> | |||
| // CR = <US-ASCII CR, carriage return (13)> | |||
| // LF = <US-ASCII LF, linefeed (10)> | |||
| // SP = <US-ASCII SP, space (32)> | |||
| // HT = <US-ASCII HT, horizontal-tab (9)> | |||
| // <"> = <US-ASCII double-quote mark (34)> | |||
| // CRLF = CR LF | |||
| // LWS = [CRLF] 1*( SP | HT ) | |||
| // TEXT = <any OCTET except CTLs, but including LWS> | |||
| // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> | |||
| // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT | |||
| // token = 1*<any CHAR except CTLs or separators> | |||
| // qdtext = <any TEXT except <">> | |||
| for c := 0; c < 256; c++ { | |||
| var t byte | |||
| isCtl := c <= 31 || c == 127 | |||
| isChar := 0 <= c && c <= 127 | |||
| isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 | |||
| if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { | |||
| t |= isSpaceOctet | |||
| } | |||
| if isChar && !isCtl && !isSeparator { | |||
| t |= isTokenOctet | |||
| } | |||
| octetTypes[c] = t | |||
| } | |||
| } | |||
| func skipSpace(s string) (rest string) { | |||
| i := 0 | |||
| for ; i < len(s); i++ { | |||
| if octetTypes[s[i]]&isSpaceOctet == 0 { | |||
| break | |||
| } | |||
| } | |||
| return s[i:] | |||
| } | |||
| func nextToken(s string) (token, rest string) { | |||
| i := 0 | |||
| for ; i < len(s); i++ { | |||
| if octetTypes[s[i]]&isTokenOctet == 0 { | |||
| break | |||
| } | |||
| } | |||
| return s[:i], s[i:] | |||
| } | |||
| func nextTokenOrQuoted(s string) (value string, rest string) { | |||
| if !strings.HasPrefix(s, "\"") { | |||
| return nextToken(s) | |||
| } | |||
| s = s[1:] | |||
| for i := 0; i < len(s); i++ { | |||
| switch s[i] { | |||
| case '"': | |||
| return s[:i], s[i+1:] | |||
| case '\\': | |||
| p := make([]byte, len(s)-1) | |||
| j := copy(p, s[:i]) | |||
| escape := true | |||
| for i = i + 1; i < len(s); i++ { | |||
| b := s[i] | |||
| switch { | |||
| case escape: | |||
| escape = false | |||
| p[j] = b | |||
| j++ | |||
| case b == '\\': | |||
| escape = true | |||
| case b == '"': | |||
| return string(p[:j]), s[i+1:] | |||
| default: | |||
| p[j] = b | |||
| j++ | |||
| } | |||
| } | |||
| return "", "" | |||
| } | |||
| } | |||
| return "", "" | |||
| } | |||
| // equalASCIIFold returns true if s is equal to t with ASCII case folding. | |||
| func equalASCIIFold(s, t string) bool { | |||
| for s != "" && t != "" { | |||
| sr, size := utf8.DecodeRuneInString(s) | |||
| s = s[size:] | |||
| tr, size := utf8.DecodeRuneInString(t) | |||
| t = t[size:] | |||
| if sr == tr { | |||
| continue | |||
| } | |||
| if 'A' <= sr && sr <= 'Z' { | |||
| sr = sr + 'a' - 'A' | |||
| } | |||
| if 'A' <= tr && tr <= 'Z' { | |||
| tr = tr + 'a' - 'A' | |||
| } | |||
| if sr != tr { | |||
| return false | |||
| } | |||
| } | |||
| return s == t | |||
| } | |||
| // tokenListContainsValue returns true if the 1#token header with the given | |||
| // name contains a token equal to value with ASCII case folding. | |||
| func tokenListContainsValue(header http.Header, name string, value string) bool { | |||
| headers: | |||
| for _, s := range header[name] { | |||
| for { | |||
| var t string | |||
| t, s = nextToken(skipSpace(s)) | |||
| if t == "" { | |||
| continue headers | |||
| } | |||
| s = skipSpace(s) | |||
| if s != "" && s[0] != ',' { | |||
| continue headers | |||
| } | |||
| if equalASCIIFold(t, value) { | |||
| return true | |||
| } | |||
| if s == "" { | |||
| continue headers | |||
| } | |||
| s = s[1:] | |||
| } | |||
| } | |||
| return false | |||
| } | |||
| // parseExtensions parses WebSocket extensions from a header. | |||
| func parseExtensions(header http.Header) []map[string]string { | |||
| // From RFC 6455: | |||
| // | |||
| // Sec-WebSocket-Extensions = extension-list | |||
| // extension-list = 1#extension | |||
| // extension = extension-token *( ";" extension-param ) | |||
| // extension-token = registered-token | |||
| // registered-token = token | |||
| // extension-param = token [ "=" (token | quoted-string) ] | |||
| // ;When using the quoted-string syntax variant, the value | |||
| // ;after quoted-string unescaping MUST conform to the | |||
| // ;'token' ABNF. | |||
| var result []map[string]string | |||
| headers: | |||
| for _, s := range header["Sec-Websocket-Extensions"] { | |||
| for { | |||
| var t string | |||
| t, s = nextToken(skipSpace(s)) | |||
| if t == "" { | |||
| continue headers | |||
| } | |||
| ext := map[string]string{"": t} | |||
| for { | |||
| s = skipSpace(s) | |||
| if !strings.HasPrefix(s, ";") { | |||
| break | |||
| } | |||
| var k string | |||
| k, s = nextToken(skipSpace(s[1:])) | |||
| if k == "" { | |||
| continue headers | |||
| } | |||
| s = skipSpace(s) | |||
| var v string | |||
| if strings.HasPrefix(s, "=") { | |||
| v, s = nextTokenOrQuoted(skipSpace(s[1:])) | |||
| s = skipSpace(s) | |||
| } | |||
| if s != "" && s[0] != ',' && s[0] != ';' { | |||
| continue headers | |||
| } | |||
| ext[k] = v | |||
| } | |||
| if s != "" && s[0] != ',' { | |||
| continue headers | |||
| } | |||
| result = append(result, ext) | |||
| if s == "" { | |||
| continue headers | |||
| } | |||
| s = s[1:] | |||
| } | |||
| } | |||
| return result | |||
| } | |||
| @@ -0,0 +1,473 @@ | |||
| // Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. | |||
| //go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy | |||
| // Package proxy provides support for a variety of protocols to proxy network | |||
| // data. | |||
| // | |||
| package websocket | |||
| import ( | |||
| "errors" | |||
| "io" | |||
| "net" | |||
| "net/url" | |||
| "os" | |||
| "strconv" | |||
| "strings" | |||
| "sync" | |||
| ) | |||
| type proxy_direct struct{} | |||
| // Direct is a direct proxy: one that makes network connections directly. | |||
| var proxy_Direct = proxy_direct{} | |||
| func (proxy_direct) Dial(network, addr string) (net.Conn, error) { | |||
| return net.Dial(network, addr) | |||
| } | |||
| // A PerHost directs connections to a default Dialer unless the host name | |||
| // requested matches one of a number of exceptions. | |||
| type proxy_PerHost struct { | |||
| def, bypass proxy_Dialer | |||
| bypassNetworks []*net.IPNet | |||
| bypassIPs []net.IP | |||
| bypassZones []string | |||
| bypassHosts []string | |||
| } | |||
| // NewPerHost returns a PerHost Dialer that directs connections to either | |||
| // defaultDialer or bypass, depending on whether the connection matches one of | |||
| // the configured rules. | |||
| func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { | |||
| return &proxy_PerHost{ | |||
| def: defaultDialer, | |||
| bypass: bypass, | |||
| } | |||
| } | |||
| // Dial connects to the address addr on the given network through either | |||
| // defaultDialer or bypass. | |||
| func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { | |||
| host, _, err := net.SplitHostPort(addr) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return p.dialerForRequest(host).Dial(network, addr) | |||
| } | |||
| func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { | |||
| if ip := net.ParseIP(host); ip != nil { | |||
| for _, net := range p.bypassNetworks { | |||
| if net.Contains(ip) { | |||
| return p.bypass | |||
| } | |||
| } | |||
| for _, bypassIP := range p.bypassIPs { | |||
| if bypassIP.Equal(ip) { | |||
| return p.bypass | |||
| } | |||
| } | |||
| return p.def | |||
| } | |||
| for _, zone := range p.bypassZones { | |||
| if strings.HasSuffix(host, zone) { | |||
| return p.bypass | |||
| } | |||
| if host == zone[1:] { | |||
| // For a zone ".example.com", we match "example.com" | |||
| // too. | |||
| return p.bypass | |||
| } | |||
| } | |||
| for _, bypassHost := range p.bypassHosts { | |||
| if bypassHost == host { | |||
| return p.bypass | |||
| } | |||
| } | |||
| return p.def | |||
| } | |||
| // AddFromString parses a string that contains comma-separated values | |||
| // specifying hosts that should use the bypass proxy. Each value is either an | |||
| // IP address, a CIDR range, a zone (*.example.com) or a host name | |||
| // (localhost). A best effort is made to parse the string and errors are | |||
| // ignored. | |||
| func (p *proxy_PerHost) AddFromString(s string) { | |||
| hosts := strings.Split(s, ",") | |||
| for _, host := range hosts { | |||
| host = strings.TrimSpace(host) | |||
| if len(host) == 0 { | |||
| continue | |||
| } | |||
| if strings.Contains(host, "/") { | |||
| // We assume that it's a CIDR address like 127.0.0.0/8 | |||
| if _, net, err := net.ParseCIDR(host); err == nil { | |||
| p.AddNetwork(net) | |||
| } | |||
| continue | |||
| } | |||
| if ip := net.ParseIP(host); ip != nil { | |||
| p.AddIP(ip) | |||
| continue | |||
| } | |||
| if strings.HasPrefix(host, "*.") { | |||
| p.AddZone(host[1:]) | |||
| continue | |||
| } | |||
| p.AddHost(host) | |||
| } | |||
| } | |||
| // AddIP specifies an IP address that will use the bypass proxy. Note that | |||
| // this will only take effect if a literal IP address is dialed. A connection | |||
| // to a named host will never match an IP. | |||
| func (p *proxy_PerHost) AddIP(ip net.IP) { | |||
| p.bypassIPs = append(p.bypassIPs, ip) | |||
| } | |||
| // AddNetwork specifies an IP range that will use the bypass proxy. Note that | |||
| // this will only take effect if a literal IP address is dialed. A connection | |||
| // to a named host will never match. | |||
| func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { | |||
| p.bypassNetworks = append(p.bypassNetworks, net) | |||
| } | |||
| // AddZone specifies a DNS suffix that will use the bypass proxy. A zone of | |||
| // "example.com" matches "example.com" and all of its subdomains. | |||
| func (p *proxy_PerHost) AddZone(zone string) { | |||
| if strings.HasSuffix(zone, ".") { | |||
| zone = zone[:len(zone)-1] | |||
| } | |||
| if !strings.HasPrefix(zone, ".") { | |||
| zone = "." + zone | |||
| } | |||
| p.bypassZones = append(p.bypassZones, zone) | |||
| } | |||
| // AddHost specifies a host name that will use the bypass proxy. | |||
| func (p *proxy_PerHost) AddHost(host string) { | |||
| if strings.HasSuffix(host, ".") { | |||
| host = host[:len(host)-1] | |||
| } | |||
| p.bypassHosts = append(p.bypassHosts, host) | |||
| } | |||
| // A Dialer is a means to establish a connection. | |||
| type proxy_Dialer interface { | |||
| // Dial connects to the given address via the proxy. | |||
| Dial(network, addr string) (c net.Conn, err error) | |||
| } | |||
| // Auth contains authentication parameters that specific Dialers may require. | |||
| type proxy_Auth struct { | |||
| User, Password string | |||
| } | |||
| // FromEnvironment returns the dialer specified by the proxy related variables in | |||
| // the environment. | |||
| func proxy_FromEnvironment() proxy_Dialer { | |||
| allProxy := proxy_allProxyEnv.Get() | |||
| if len(allProxy) == 0 { | |||
| return proxy_Direct | |||
| } | |||
| proxyURL, err := url.Parse(allProxy) | |||
| if err != nil { | |||
| return proxy_Direct | |||
| } | |||
| proxy, err := proxy_FromURL(proxyURL, proxy_Direct) | |||
| if err != nil { | |||
| return proxy_Direct | |||
| } | |||
| noProxy := proxy_noProxyEnv.Get() | |||
| if len(noProxy) == 0 { | |||
| return proxy | |||
| } | |||
| perHost := proxy_NewPerHost(proxy, proxy_Direct) | |||
| perHost.AddFromString(noProxy) | |||
| return perHost | |||
| } | |||
| // proxySchemes is a map from URL schemes to a function that creates a Dialer | |||
| // from a URL with such a scheme. | |||
| var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) | |||
| // RegisterDialerType takes a URL scheme and a function to generate Dialers from | |||
| // a URL with that scheme and a forwarding Dialer. Registered schemes are used | |||
| // by FromURL. | |||
| func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { | |||
| if proxy_proxySchemes == nil { | |||
| proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) | |||
| } | |||
| proxy_proxySchemes[scheme] = f | |||
| } | |||
| // FromURL returns a Dialer given a URL specification and an underlying | |||
| // Dialer for it to make network requests. | |||
| func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { | |||
| var auth *proxy_Auth | |||
| if u.User != nil { | |||
| auth = new(proxy_Auth) | |||
| auth.User = u.User.Username() | |||
| if p, ok := u.User.Password(); ok { | |||
| auth.Password = p | |||
| } | |||
| } | |||
| switch u.Scheme { | |||
| case "socks5": | |||
| return proxy_SOCKS5("tcp", u.Host, auth, forward) | |||
| } | |||
| // If the scheme doesn't match any of the built-in schemes, see if it | |||
| // was registered by another package. | |||
| if proxy_proxySchemes != nil { | |||
| if f, ok := proxy_proxySchemes[u.Scheme]; ok { | |||
| return f(u, forward) | |||
| } | |||
| } | |||
| return nil, errors.New("proxy: unknown scheme: " + u.Scheme) | |||
| } | |||
| var ( | |||
| proxy_allProxyEnv = &proxy_envOnce{ | |||
| names: []string{"ALL_PROXY", "all_proxy"}, | |||
| } | |||
| proxy_noProxyEnv = &proxy_envOnce{ | |||
| names: []string{"NO_PROXY", "no_proxy"}, | |||
| } | |||
| ) | |||
| // envOnce looks up an environment variable (optionally by multiple | |||
| // names) once. It mitigates expensive lookups on some platforms | |||
| // (e.g. Windows). | |||
| // (Borrowed from net/http/transport.go) | |||
| type proxy_envOnce struct { | |||
| names []string | |||
| once sync.Once | |||
| val string | |||
| } | |||
| func (e *proxy_envOnce) Get() string { | |||
| e.once.Do(e.init) | |||
| return e.val | |||
| } | |||
| func (e *proxy_envOnce) init() { | |||
| for _, n := range e.names { | |||
| e.val = os.Getenv(n) | |||
| if e.val != "" { | |||
| return | |||
| } | |||
| } | |||
| } | |||
| // SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address | |||
| // with an optional username and password. See RFC 1928 and RFC 1929. | |||
| func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { | |||
| s := &proxy_socks5{ | |||
| network: network, | |||
| addr: addr, | |||
| forward: forward, | |||
| } | |||
| if auth != nil { | |||
| s.user = auth.User | |||
| s.password = auth.Password | |||
| } | |||
| return s, nil | |||
| } | |||
| type proxy_socks5 struct { | |||
| user, password string | |||
| network, addr string | |||
| forward proxy_Dialer | |||
| } | |||
| const proxy_socks5Version = 5 | |||
| const ( | |||
| proxy_socks5AuthNone = 0 | |||
| proxy_socks5AuthPassword = 2 | |||
| ) | |||
| const proxy_socks5Connect = 1 | |||
| const ( | |||
| proxy_socks5IP4 = 1 | |||
| proxy_socks5Domain = 3 | |||
| proxy_socks5IP6 = 4 | |||
| ) | |||
| var proxy_socks5Errors = []string{ | |||
| "", | |||
| "general failure", | |||
| "connection forbidden", | |||
| "network unreachable", | |||
| "host unreachable", | |||
| "connection refused", | |||
| "TTL expired", | |||
| "command not supported", | |||
| "address type not supported", | |||
| } | |||
| // Dial connects to the address addr on the given network via the SOCKS5 proxy. | |||
| func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { | |||
| switch network { | |||
| case "tcp", "tcp6", "tcp4": | |||
| default: | |||
| return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) | |||
| } | |||
| conn, err := s.forward.Dial(s.network, s.addr) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if err := s.connect(conn, addr); err != nil { | |||
| conn.Close() | |||
| return nil, err | |||
| } | |||
| return conn, nil | |||
| } | |||
| // connect takes an existing connection to a socks5 proxy server, | |||
| // and commands the server to extend that connection to target, | |||
| // which must be a canonical address with a host and port. | |||
| func (s *proxy_socks5) connect(conn net.Conn, target string) error { | |||
| host, portStr, err := net.SplitHostPort(target) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| port, err := strconv.Atoi(portStr) | |||
| if err != nil { | |||
| return errors.New("proxy: failed to parse port number: " + portStr) | |||
| } | |||
| if port < 1 || port > 0xffff { | |||
| return errors.New("proxy: port number out of range: " + portStr) | |||
| } | |||
| // the size here is just an estimate | |||
| buf := make([]byte, 0, 6+len(host)) | |||
| buf = append(buf, proxy_socks5Version) | |||
| if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { | |||
| buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) | |||
| } else { | |||
| buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) | |||
| } | |||
| if _, err := conn.Write(buf); err != nil { | |||
| return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) | |||
| } | |||
| if _, err := io.ReadFull(conn, buf[:2]); err != nil { | |||
| return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) | |||
| } | |||
| if buf[0] != 5 { | |||
| return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) | |||
| } | |||
| if buf[1] == 0xff { | |||
| return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") | |||
| } | |||
| // See RFC 1929 | |||
| if buf[1] == proxy_socks5AuthPassword { | |||
| buf = buf[:0] | |||
| buf = append(buf, 1 /* password protocol version */) | |||
| buf = append(buf, uint8(len(s.user))) | |||
| buf = append(buf, s.user...) | |||
| buf = append(buf, uint8(len(s.password))) | |||
| buf = append(buf, s.password...) | |||
| if _, err := conn.Write(buf); err != nil { | |||
| return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) | |||
| } | |||
| if _, err := io.ReadFull(conn, buf[:2]); err != nil { | |||
| return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) | |||
| } | |||
| if buf[1] != 0 { | |||
| return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") | |||
| } | |||
| } | |||
| buf = buf[:0] | |||
| buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) | |||
| if ip := net.ParseIP(host); ip != nil { | |||
| if ip4 := ip.To4(); ip4 != nil { | |||
| buf = append(buf, proxy_socks5IP4) | |||
| ip = ip4 | |||
| } else { | |||
| buf = append(buf, proxy_socks5IP6) | |||
| } | |||
| buf = append(buf, ip...) | |||
| } else { | |||
| if len(host) > 255 { | |||
| return errors.New("proxy: destination host name too long: " + host) | |||
| } | |||
| buf = append(buf, proxy_socks5Domain) | |||
| buf = append(buf, byte(len(host))) | |||
| buf = append(buf, host...) | |||
| } | |||
| buf = append(buf, byte(port>>8), byte(port)) | |||
| if _, err := conn.Write(buf); err != nil { | |||
| return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) | |||
| } | |||
| if _, err := io.ReadFull(conn, buf[:4]); err != nil { | |||
| return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) | |||
| } | |||
| failure := "unknown error" | |||
| if int(buf[1]) < len(proxy_socks5Errors) { | |||
| failure = proxy_socks5Errors[buf[1]] | |||
| } | |||
| if len(failure) > 0 { | |||
| return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) | |||
| } | |||
| bytesToDiscard := 0 | |||
| switch buf[3] { | |||
| case proxy_socks5IP4: | |||
| bytesToDiscard = net.IPv4len | |||
| case proxy_socks5IP6: | |||
| bytesToDiscard = net.IPv6len | |||
| case proxy_socks5Domain: | |||
| _, err := io.ReadFull(conn, buf[:1]) | |||
| if err != nil { | |||
| return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) | |||
| } | |||
| bytesToDiscard = int(buf[0]) | |||
| default: | |||
| return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) | |||
| } | |||
| if cap(buf) < bytesToDiscard { | |||
| buf = make([]byte, bytesToDiscard) | |||
| } else { | |||
| buf = buf[:bytesToDiscard] | |||
| } | |||
| if _, err := io.ReadFull(conn, buf); err != nil { | |||
| return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) | |||
| } | |||
| // Also need to discard the port number | |||
| if _, err := io.ReadFull(conn, buf[:2]); err != nil { | |||
| return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) | |||
| } | |||
| return nil | |||
| } | |||
| @@ -472,6 +472,9 @@ github.com/gorilla/mux | |||
| github.com/gorilla/securecookie | |||
| # github.com/gorilla/sessions v1.2.0 | |||
| github.com/gorilla/sessions | |||
| # github.com/gorilla/websocket v1.4.0 | |||
| ## explicit | |||
| github.com/gorilla/websocket | |||
| # github.com/hashicorp/go-cleanhttp v0.5.1 | |||
| github.com/hashicorp/go-cleanhttp | |||
| # github.com/hashicorp/go-retryablehttp v0.6.6 | |||
| @@ -45,6 +45,8 @@ export default { | |||
| data() { | |||
| return { | |||
| url:'', | |||
| url_infor:'', | |||
| href_:'', | |||
| contributors_list:[], | |||
| contributors_list_page:[], | |||
| currentPage:1, | |||
| @@ -56,7 +58,7 @@ export default { | |||
| methods: { | |||
| getContributorsList(){ | |||
| this.$axios.get(this.url+'/list').then((res)=>{ | |||
| this.$axios.get(this.url+'/list?'+this.url_infor).then((res)=>{ | |||
| this.contributors_list = res.data.contributor_info | |||
| this.totalNum = this.contributors_list.length | |||
| this.contributors_list_page = this.contributors_list.slice(0,this.pageSize) | |||
| @@ -78,6 +80,10 @@ created(){ | |||
| this.url = url; | |||
| let strIndex = this.url.indexOf("contributors") | |||
| this.url_code = this.url.substr(0,strIndex) | |||
| this.href_ = window.location.href; | |||
| let index = this.href_.indexOf("?") | |||
| this.url_infor = this.href_.substring(index+1,this.href_.length) | |||
| this.getContributorsList() | |||
| }, | |||
| @@ -52,7 +52,7 @@ export default { | |||
| previewTemplate += | |||
| ' <span data-dz-name data-dz-thumbnail></span>'; | |||
| previewTemplate += ' </div>\n '; | |||
| previewTemplate += ' <div class="dz-size" data-dz-size></div>\n '; | |||
| previewTemplate += ' <div class="dz-size" data-dz-size style="white-space: nowrap"></div>\n '; | |||
| previewTemplate += ' </div>\n '; | |||
| previewTemplate += ' <div class="dz-progress ui active progress">'; | |||
| previewTemplate += | |||
| @@ -263,6 +263,7 @@ export default { | |||
| let cName = $("input[name='Name']").val() | |||
| let version = $("input[name='Version']").val() | |||
| let data = $("#formId").serialize() | |||
| let url_href = version === '0.0.1' ? context.url_create_newModel : context.url_create_newVersion | |||
| $("#mask").css({"display":"block","z-index":"9999"}) | |||
| $.ajax({ | |||
| url:url_href, | |||
| @@ -292,7 +293,7 @@ export default { | |||
| if(!this.loadNodeMap.get(row.cName)){ | |||
| const parent = store.states.data | |||
| const index = parent.findIndex(child => child.ID == row.ID) | |||
| parent.splice(index, 1) | |||
| this.getModelList() | |||
| }else{ | |||
| let {tree,treeNode,resolve} = this.loadNodeMap.get(row.cName) | |||
| const keys = Object.keys(store.states.lazyTreeNodeMap); | |||
| @@ -406,6 +407,8 @@ export default { | |||
| this.getModelList() | |||
| this.url = location.href.split('show_model')[0] | |||
| this.submitId.addEventListener("click", this.submit) | |||
| this.url_create_newVersion = this.url + 'create_model' | |||
| this.url_create_newModel = this.url + 'create_new_model' | |||
| }, | |||
| beforeDestroy() { // 实例销毁之前对点击事件进行解绑 | |||
| @@ -517,4 +520,4 @@ export default { | |||
| opacity: .45 !important; | |||
| } | |||
| </style> | |||
| </style> | |||
| @@ -53,7 +53,7 @@ export default { | |||
| previewTemplate += | |||
| ' <span data-dz-name data-dz-thumbnail></span>'; | |||
| previewTemplate += ' </div>\n '; | |||
| previewTemplate += ' <div class="dz-size" data-dz-size></div>\n '; | |||
| previewTemplate += ' <div class="dz-size" data-dz-size style="white-space: nowrap"></div>\n '; | |||
| previewTemplate += ' </div>\n '; | |||
| previewTemplate += ' <div class="dz-progress ui active progress">'; | |||
| previewTemplate += | |||
| @@ -1,6 +1,6 @@ | |||
| @import "~highlight.js/styles/github.css"; | |||
| @import "./vendor/gitGraph.css"; | |||
| // @import "~/remixicon/fonts/remixicon.css"; | |||
| @import "_svg"; | |||
| @import "_tribute"; | |||
| @import "_base"; | |||