diff --git a/models/cloudbrain.go b/models/cloudbrain.go index 161e06f3f..17fa2a69f 100755 --- a/models/cloudbrain.go +++ b/models/cloudbrain.go @@ -8,13 +8,14 @@ import ( "strings" "time" + "code.gitea.io/gitea/modules/util" + "xorm.io/builder" "xorm.io/xorm" "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/timeutil" - "code.gitea.io/gitea/modules/util" ) type CloudbrainStatus string @@ -142,7 +143,7 @@ type Cloudbrain struct { VersionID int64 //版本id VersionName string `xorm:"INDEX"` //当前版本 Uuid string //数据集id - DatasetName string + DatasetName string `xorm:"varchar(2000)"` VersionCount int //任务的当前版本数量,不包括删除的 IsLatestVersion string //是否是最新版本,1是,0否 CommitID string //提交的仓库代码id diff --git a/models/dataset.go b/models/dataset.go index a0f6ba2aa..e91adb7d2 100755 --- a/models/dataset.go +++ b/models/dataset.go @@ -6,6 +6,8 @@ import ( "sort" "strings" + "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/timeutil" @@ -178,7 +180,7 @@ type SearchDatasetOptions struct { Category string Task string License string - DatasetIDs []int64 // 目前只在StarByMe为true时起作用 + DatasetIDs []int64 ListOptions SearchOrderBy IsOwner bool @@ -188,6 +190,7 @@ type SearchDatasetOptions struct { JustNeedZipFile bool NeedAttachment bool UploadAttachmentByMe bool + QueryReference bool } func CreateDataset(dataset *Dataset) (err error) { @@ -258,7 +261,7 @@ func SearchDatasetCondition(opts *SearchDatasetOptions) builder.Cond { } } if len(opts.DatasetIDs) > 0 { - if opts.StarByMe { + if opts.StarByMe || (opts.RepoID == 0 && opts.QueryReference) { cond = cond.And(builder.In("dataset.id", opts.DatasetIDs)) } else { subCon := builder.NewCond() @@ -329,13 +332,15 @@ func SearchDatasetByCondition(opts *SearchDatasetOptions, cond builder.Cond) (Da return nil, 0, fmt.Errorf("Count: %v", err) } - sess.Select(selectColumnsSql).Join("INNER", "repository", "repository.id = dataset.repo_id"). + builderQuery := builder.Dialect(setting.Database.Type).Select("id", "title", "status", "category", "description", "download_times", "license", "task", "release_id", "user_id", "repo_id", "created_unix", "updated_unix", "num_stars", "recommend", "use_count").From(builder.Dialect(setting.Database.Type).Select(selectColumnsSql).From("dataset").Join("INNER", "repository", "repository.id = dataset.repo_id"). Join("INNER", "attachment", "attachment.dataset_id=dataset.id"). - Where(cond).OrderBy(opts.SearchOrderBy.String()) + Where(cond), "d").OrderBy(opts.SearchOrderBy.String()) + if opts.PageSize > 0 { - sess.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize) + builderQuery.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize) } - if err = sess.Find(&datasets); err != nil { + + if err = sess.SQL(builderQuery).Find(&datasets); err != nil { return nil, 0, fmt.Errorf("Dataset: %v", err) } diff --git a/models/dataset_reference.go b/models/dataset_reference.go new file mode 100644 index 000000000..a43cd625e --- /dev/null +++ b/models/dataset_reference.go @@ -0,0 +1,88 @@ +package models + +import ( + "strconv" + "strings" + + "code.gitea.io/gitea/modules/timeutil" +) + +type DatasetReference struct { + ID int64 `xorm:"pk autoincr"` + RepoID int64 `xorm:"INDEX unique"` + DatasetID string `xorm:"TEXT"` + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` +} + +func GetDatasetIdsByRepoID(repoID int64) []int64 { + var datasets []int64 + var datasetIds []string + _ = x.Table("dataset_reference").Where("repo_id=?", repoID). + Cols("dataset_reference.dataset_id").Find(&datasetIds) + if len(datasetIds) > 0 { + for _, datasetIdStr := range strings.Split(datasetIds[0], ",") { + datasetId, err := strconv.ParseInt(datasetIdStr, 10, 64) + if err != nil { + continue + } + datasets = append(datasets, datasetId) + } + } + + return datasets +} + +func HasReferenceDataset(repoID int64) bool { + + var datasetIds []string + _ = x.Table("dataset_reference").Where("repo_id=?", repoID). + Cols("dataset_reference.dataset_id").Find(&datasetIds) + return len(datasetIds) > 0 +} + +func getReferenceDatasetStr(repoID int64) string { + + var datasetIds []string + _ = x.Table("dataset_reference").Where("repo_id=?", repoID). + Cols("dataset_reference.dataset_id").Find(&datasetIds) + if len(datasetIds) > 0 { + return datasetIds[0] + } + return "" +} + +func DeleteReferenceDatasetIdsByRepoID(repoID int64) error { + + _, err := x.Exec("delete from dataset_reference where repo_id=?", repoID) + return err +} + +func NewDatasetIdsByRepoID(repoID int64, datasetIds []int64) error { + if len(datasetIds) == 0 { //关联数据集数组为空 + DeleteReferenceDatasetIdsByRepoID(repoID) + } + var datasetsStrArray []string + for _, datasetId := range datasetIds { + datasetsStrArray = append(datasetsStrArray, strconv.FormatInt(datasetId, 10)) + } + + newDatasetStr := strings.Join(datasetsStrArray, ",") + oldDatasetStr := getReferenceDatasetStr(repoID) + if newDatasetStr == oldDatasetStr { //关联数据集无变化,不需要处理 + return nil + } + if oldDatasetStr != "" { //已经存在关联数据集 + _, err := x.Exec("update dataset_reference set dataset_id=? where repo_id=?", newDatasetStr, repoID) + + return err + } else { + datasetReference := DatasetReference{ + DatasetID: newDatasetStr, + RepoID: repoID, + } + + _, err := x.Insert(datasetReference) + return err + } + +} diff --git a/models/models.go b/models/models.go index 8898955a7..5018f5f99 100755 --- a/models/models.go +++ b/models/models.go @@ -146,6 +146,7 @@ func init() { new(SearchRecord), new(AiModelConvert), new(CloudbrainTemp), + new(DatasetReference), ) tablesStatistic = append(tablesStatistic, diff --git a/modules/auth/dataset.go b/modules/auth/dataset.go index 71b5ac938..8aed3a8c2 100755 --- a/modules/auth/dataset.go +++ b/modules/auth/dataset.go @@ -44,3 +44,11 @@ type EditAttachmentForm struct { func (f *EditAttachmentForm) Validate(ctx *macaron.Context, errs binding.Errors) binding.Errors { return validate(errs, ctx.Data, f, ctx.Locale) } + +type ReferenceDatasetForm struct { + DatasetID []int64 `binding:"Required"` +} + +func (f *ReferenceDatasetForm) Validate(ctx *macaron.Context, errs binding.Errors) binding.Errors { + return validate(errs, ctx.Data, f, ctx.Locale) +} diff --git a/modules/cloudbrain/cloudbrain.go b/modules/cloudbrain/cloudbrain.go index e55d8c887..30f080335 100755 --- a/modules/cloudbrain/cloudbrain.go +++ b/modules/cloudbrain/cloudbrain.go @@ -82,7 +82,7 @@ type GenerateCloudBrainTaskReq struct { } func GetCloudbrainDebugCommand() string { - var command = `pip3 install jupyterlab==3 -i https://pypi.tuna.tsinghua.edu.cn/simple;service ssh stop;jupyter lab --ServerApp.shutdown_no_activity_timeout=` + setting.CullIdleTimeout + ` --TerminalManager.cull_inactive_timeout=` + setting.CullIdleTimeout + ` --TerminalManager.cull_interval=` + setting.CullInterval + ` --MappingKernelManager.cull_idle_timeout=` + setting.CullIdleTimeout + ` --MappingKernelManager.cull_interval=` + setting.CullInterval + ` --MappingKernelManager.cull_connected=True --MappingKernelManager.cull_busy=True --no-browser --ip=0.0.0.0 --allow-root --notebook-dir="/code" --port=80 --ServerApp.token="" --ServerApp.allow_origin="self https://cloudbrain.pcl.ac.cn" ` + var command = `pip3 install jupyterlab==3 -i https://pypi.tuna.tsinghua.edu.cn/simple;pip3 install -U "nbclassic>=0.2.8" -i https://pypi.tuna.tsinghua.edu.cn/simple;service ssh stop;jupyter lab --ServerApp.shutdown_no_activity_timeout=` + setting.CullIdleTimeout + ` --TerminalManager.cull_inactive_timeout=` + setting.CullIdleTimeout + ` --TerminalManager.cull_interval=` + setting.CullInterval + ` --MappingKernelManager.cull_idle_timeout=` + setting.CullIdleTimeout + ` --MappingKernelManager.cull_interval=` + setting.CullInterval + ` --MappingKernelManager.cull_connected=True --MappingKernelManager.cull_busy=True --no-browser --ip=0.0.0.0 --allow-root --notebook-dir="/code" --port=80 --ServerApp.token="" --LabApp.token="" --ServerApp.allow_origin="self https://cloudbrain.pcl.ac.cn" ` return command } diff --git a/modules/cloudbrain/resty.go b/modules/cloudbrain/resty.go index d9db3bbb5..8387d481a 100755 --- a/modules/cloudbrain/resty.go +++ b/modules/cloudbrain/resty.go @@ -81,7 +81,8 @@ func GetQueuesDetail() (*map[string]int, error) { var jobResult models.QueueDetailResult var result = make(map[string]int, 0) - + retry := 0 +sendjob: res, err := client.R(). SetHeader("Content-Type", "application/json"). SetAuthToken(TOKEN). @@ -92,6 +93,12 @@ func GetQueuesDetail() (*map[string]int, error) { return nil, fmt.Errorf("resty get queues detail failed: %s", err) } + if jobResult.Code == errInvalidToken && retry < 1 { + retry++ + _ = loginCloudbrain() + goto sendjob + } + if jobResult.Code != Success { return nil, fmt.Errorf("jobResult err: %s", res.String()) } diff --git a/modules/context/permission_json.go b/modules/context/permission_json.go new file mode 100644 index 000000000..28bc3f6a9 --- /dev/null +++ b/modules/context/permission_json.go @@ -0,0 +1,43 @@ +package context + +import ( + "net/http" + + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/log" + "gitea.com/macaron/macaron" +) + +func RequireRepoReaderJson(unitType models.UnitType) macaron.Handler { + return func(ctx *Context) { + if !ctx.Repo.CanRead(unitType) { + if log.IsTrace() { + if ctx.IsSigned { + log.Trace("Permission Denied: User %-v cannot read %-v in Repo %-v\n"+ + "User in Repo has Permissions: %-+v", + ctx.User, + unitType, + ctx.Repo.Repository, + ctx.Repo.Permission) + } else { + log.Trace("Permission Denied: Anonymous user cannot read %-v in Repo %-v\n"+ + "Anonymous user in Repo has Permissions: %-+v", + unitType, + ctx.Repo.Repository, + ctx.Repo.Permission) + } + } + ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("error.no_right"))) + return + } + } +} + +func RequireRepoWriterJson(unitType models.UnitType) macaron.Handler { + return func(ctx *Context) { + if !ctx.Repo.CanWrite(unitType) { + ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("error.no_right"))) + return + } + } +} diff --git a/modules/dataset/dataset.go b/modules/dataset/dataset.go index a180af184..297ed460d 100644 --- a/modules/dataset/dataset.go +++ b/modules/dataset/dataset.go @@ -1,5 +1,7 @@ package dataset +import "code.gitea.io/gitea/models" + func GetResourceType(cloudbrainType int) string { if cloudbrainType == 0 { return "CPU/GPU" @@ -15,3 +17,19 @@ func GetStatusText(isPrivate bool) string { return "dataset.public" } } + +func IsShowDataSetOfCurrentRepo(repoID int64) bool { + repo := models.Repository{ + ID: repoID, + } + + dataset, _ := models.GetDatasetByRepo(&repo) + if dataset != nil { + return true + } + if models.HasReferenceDataset(repoID) { + return false + } + return true + +} diff --git a/modules/setting/repository.go b/modules/setting/repository.go index ee4f8b379..1eb1875b2 100644 --- a/modules/setting/repository.go +++ b/modules/setting/repository.go @@ -193,8 +193,9 @@ var ( Wiki: []string{"never"}, }, } - RepoRootPath string - ScriptType = "bash" + RepoRootPath string + RepoMaxReferenceDatasetNum int + ScriptType = "bash" ) func newRepository() { @@ -210,6 +211,8 @@ func newRepository() { Repository.UseCompatSSHURI = sec.Key("USE_COMPAT_SSH_URI").MustBool() Repository.MaxCreationLimit = sec.Key("MAX_CREATION_LIMIT").MustInt(-1) RepoRootPath = sec.Key("ROOT").MustString(path.Join(homeDir, "gitea-repositories")) + RepoMaxReferenceDatasetNum = sec.Key("MAX_REF_DATASET_NUM").MustInt(20) + forcePathSeparator(RepoRootPath) if !filepath.IsAbs(RepoRootPath) { RepoRootPath = filepath.Join(AppWorkPath, RepoRootPath) diff --git a/modules/templates/helper.go b/modules/templates/helper.go index 857e365f8..8e7723e75 100755 --- a/modules/templates/helper.go +++ b/modules/templates/helper.go @@ -97,23 +97,24 @@ func NewFuncMap() []template.FuncMap { "AllowedReactions": func() []string { return setting.UI.Reactions }, - "AvatarLink": models.AvatarLink, - "Safe": Safe, - "SafeJS": SafeJS, - "Str2html": Str2html, - "subOne": subOne, - "TimeSince": timeutil.TimeSince, - "TimeSinceUnix": timeutil.TimeSinceUnix, - "TimeSinceUnix1": timeutil.TimeSinceUnix1, - "AttachmentResourceType": dataset.GetResourceType, - "AttachmentStatus": dataset.GetStatusText, - "TimeSinceUnixShort": timeutil.TimeSinceUnixShort, - "RawTimeSince": timeutil.RawTimeSince, - "FileSize": base.FileSize, - "PrettyNumber": base.PrettyNumber, - "Subtract": base.Subtract, - "EntryIcon": base.EntryIcon, - "MigrationIcon": MigrationIcon, + "AvatarLink": models.AvatarLink, + "Safe": Safe, + "SafeJS": SafeJS, + "Str2html": Str2html, + "subOne": subOne, + "TimeSince": timeutil.TimeSince, + "TimeSinceUnix": timeutil.TimeSinceUnix, + "TimeSinceUnix1": timeutil.TimeSinceUnix1, + "AttachmentResourceType": dataset.GetResourceType, + "AttachmentStatus": dataset.GetStatusText, + "IsShowDataSetOfCurrentRepo": dataset.IsShowDataSetOfCurrentRepo, + "TimeSinceUnixShort": timeutil.TimeSinceUnixShort, + "RawTimeSince": timeutil.RawTimeSince, + "FileSize": base.FileSize, + "PrettyNumber": base.PrettyNumber, + "Subtract": base.Subtract, + "EntryIcon": base.EntryIcon, + "MigrationIcon": MigrationIcon, "Add": func(a, b int) int { return a + b }, @@ -357,13 +358,15 @@ func NewTextFuncMap() []texttmpl.FuncMap { "AppDomain": func() string { return setting.Domain }, - "TimeSince": timeutil.TimeSince, - "TimeSinceUnix": timeutil.TimeSinceUnix, - "TimeSinceUnix1": timeutil.TimeSinceUnix1, - "TimeSinceUnixShort": timeutil.TimeSinceUnixShort, - "RawTimeSince": timeutil.RawTimeSince, - "AttachmentResourceType": dataset.GetResourceType, - "AttachmentStatus": dataset.GetStatusText, + "TimeSince": timeutil.TimeSince, + "TimeSinceUnix": timeutil.TimeSinceUnix, + "TimeSinceUnix1": timeutil.TimeSinceUnix1, + "TimeSinceUnixShort": timeutil.TimeSinceUnixShort, + "RawTimeSince": timeutil.RawTimeSince, + "AttachmentResourceType": dataset.GetResourceType, + "AttachmentStatus": dataset.GetStatusText, + "IsShowDataSetOfCurrentRepo": dataset.IsShowDataSetOfCurrentRepo, + "DateFmtLong": func(t time.Time) string { return t.Format(time.RFC1123Z) }, diff --git a/options/locale/locale_en-US.ini b/options/locale/locale_en-US.ini index 3c1237820..03ab37143 100755 --- a/options/locale/locale_en-US.ini +++ b/options/locale/locale_en-US.ini @@ -98,6 +98,7 @@ error500= Sorry, the site has encountered some problems, we are trying to initialized first ; debug_task_running_limit =Running time: no more than 4 hours, it will automatically stop if it exceeds 4 hours; dataset_desc = Dataset: Cloud Brain 1 provides CPU/GPU,Cloud Brain 2 provides Ascend NPU.And dataset also needs to be uploaded to the corresponding environment; -platform_instructions = Instructions for use: You can refer to the Xiaobai training camp course of Qizhi AI collaboration platform. +platform_instructions = Instructions for use: You can refer to the Xiaobai training camp course of Openi AI collaboration platform. model_not_exist = Model file: You do not have a model file yet, please generate and export the model through the training task first ; benchmark_leaderboards = Benchmark leaderboards @@ -1220,7 +1232,7 @@ model.manage.version = Version model.manage.label = Label model.manage.size = Size model.manage.create_time = Create Time -model.manage.Description = Description +model.manage.description = Description model.manage.Accuracy = Accuracy model.manage.F1 = F1 model.manage.Precision = Precision @@ -1232,6 +1244,49 @@ model.convert=Model Transformation model.list=Model List model.manage.create_new_convert_task=Create Model Transformation Task +model.manage.notcreatemodel=No model has been created. +model.manage.init1=Code version: You have not initialized the code repository, please +model.manage.init2=initialized first ; +model.manage.createtrainjob_tip=Training task: you haven't created a training task, please create it first +model.manage.createtrainjob=Training task +model.manage.delete=Delete Model +model.manage.delete_confirm=Are you sure to delete this model? Once this model is deleted, it cannot be restored. +model.manage.select.trainjob=Select train task +model.manage.select.version=Select version +model.manage.engine=Model engine +model.manage.select.engine=Select model engine +model.manage.modelfile=Model file +model.manage.modellabel=Model label +model.manage.modeldesc=Model description +model.manage.baseinfo=Base Information +modelconvert.notcreate=No model conversion task has been created. +modelconvert.importfirst1=Please import first +modelconvert.importfirst2=download model +modelconvert.importfirst3=, then converts it. +modelconvert.download=Download +modelconvert.taskname=Task name +modelconvert.modelname=Model name +modelconvert.selectmodel=Select model +modelconvert.modelversion=Model version +modelconvert.selectversion=Select version +modelconvert.selectmodelfile=Select model file +modelconvert.taskstatus=Status +modelconvert.srcengine=Source model engine +modelconvert.outputformat=Output format +modelconvert.createtime=Created time +modelconvert.inputdataformat=Input data format +modelconvert.inputshape=Input tensor shape +modelconvert.inputshapetip=For example: 1,1,32,32, corresponding to the input data format. +modelconvert.netoutputdata=Network output data type +modelconvert.taskdesc=Task description +modelconvert.newtask=New +modelconvert.createtask=Create model transformation task +modelconvert.taskurlname=Model transformation task +log_scroll_start=Scroll to top +log_scroll_end=Scroll to bottom +modelconvert.tasknameempty=Please enter a task name. +modelconvert.inputshapeerror=Format input error, please input such as: 1,1,32,32, corresponding to the input data format. + modelconvert.manage.create_error1=A model transformation task with the same name already exists. modelconvert.manage.create_error2=Only one running model transformation task can be created. modelconvert.manage.model_not_exist=The model does not exist. @@ -2328,7 +2383,7 @@ topic.count_prompt = You can not select more than 25 topics topic.format_prompt = Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long. imagetopic.format_prompt = Topics can be up to 35 characters long. use_repo_agreement=I promise that the content of this warehouse does not violate any national laws and regulations. During the use of the warehouse, I will abide by the OpenI community management regulations and platform usage rules, and will not conduct malicious attacks, mining, or any other illegal or disruptive platform order. Information release and related behaviors. For more information please refer to -openi_use_agreement=OpenI Qizhi Community Platform Use Agreement. +openi_use_agreement=OpenI Openi Community Platform Use Agreement. [org] org_name_holder = Organization Name org_full_name_holder = Organization Full Name @@ -3120,11 +3175,11 @@ specification = specification select_specification = select specification description = description wrong_specification=You cannot use this specification, please choose another item. - +resource_use=Resource Occupancy job_name_rule = Please enter letters, numbers, _ and - up to 64 characters and cannot end with a dash (-). train_dataset_path_rule = The dataset location is stored in the environment variable data_url, and the output path is stored in the environment variable train_url. -infer_dataset_path_rule = The dataset location is stored in the environment variable data_url, and the output path is stored in the environment variable train_url. +infer_dataset_path_rule = The dataset location is stored in the environment variable data_url, and the output path is stored in the environment variable result_url. view_sample = View sample inference_output_path_rule = The inference output path is stored in the environment variable result_url. model_file_path_rule=The model file location is stored in the environment variable ckpt_url @@ -3158,3 +3213,4 @@ load_code_failed=Fail to load code, please check if the right branch is selected error.dataset_select = dataset select error:the count exceed the limit or has same name new_train_gpu_tooltips = The code is storaged in %s, the dataset is storaged in %s, and please put your model into %s then you can download it online +new_infer_gpu_tooltips = The dataset is stored in %s, the model file is stored in %s, please store the inference output in %s for subsequent downloads. \ No newline at end of file diff --git a/options/locale/locale_zh-CN.ini b/options/locale/locale_zh-CN.ini index 06a1f3639..355c2aa33 100755 --- a/options/locale/locale_zh-CN.ini +++ b/options/locale/locale_zh-CN.ini @@ -99,6 +99,7 @@ error500=抱歉,站点遇到一些问题,我们正尝试修复网页 [error] occurred=发生错误 report_message=发生错误 +no_right=您没有权限执行本操作。 [install] install=安装页面 @@ -832,7 +833,12 @@ create_dataset=创建数据集 create_dataset_fail=创建数据集失败。 query_dataset_fail=查询数据集失败。 edit_attachment_fail=修改描述失败。 + +reference_dataset_fail=关联数据集失败,请稍后再试。 +cancel_reference_dataset_fail=取消关联数据集失败,请稍后再试。 + download_url=数据集下载地址 +download_copy=复制链接 download_oper=操作 show_dataset=数据集 edit_dataset=编辑数据集 @@ -926,6 +932,7 @@ dataset_explain = 数据集:云脑1提供 CPU / GPU 资源,云脑2提供 Asc dataset_instructions_for_use = 使用说明:可以参考启智AI协作平台 dataset_camp_course = 小白训练营课程 dataset_upload = 上传 +dataset_upload_status = 上传状态 dataset_file_name = 文件名称 dataset_available_clusters = 可用集群 dataset_upload_time = 上传时间 @@ -952,6 +959,13 @@ unzip_failed=解压失败 unzip_stared=解压中 unzip_status=解压状态 collection_num=收藏数量 +current_dataset=当前数据集 +linked_dataset=关联数据集 +unfavorite=取消收藏 +favorite=收藏 +disassociate=取消关联 +benchmark_dataset_tip=说明:先使用数据集功能上传模型,然后从数据集列表选模型。 + [repo] owner=拥有者 repo_name=项目名称 @@ -1243,6 +1257,50 @@ model.convert=模型转换任务 model.list=模型列表 model.manage.create_new_convert_task=创建模型转换任务 +model.manage.notcreatemodel=未创建过模型 +model.manage.init1=代码版本:您还没有初始化代码仓库,请先 +model.manage.init2=创建代码版本; +model.manage.createtrainjob_tip=训练任务:您还没创建过训练任务,请先创建 +model.manage.createtrainjob=训练任务 +model.manage.delete=删除模型 +model.manage.delete_confirm=你确认删除该模型么?此模型一旦删除不可恢复。 +model.manage.select.trainjob=选择训练任务 +model.manage.select.version=选择版本 +model.manage.engine=模型框架 +model.manage.select.engine=选择模型框架 +model.manage.modelfile=模型文件 +model.manage.modellabel=模型标签 +model.manage.modeldesc=模型描述 +model.manage.baseinfo=基本信息 +modelconvert.notcreate=未创建过模型转换任务 +modelconvert.importfirst1=请您先导入 +modelconvert.importfirst2=模型下载 +modelconvert.importfirst3=,然后再对其进行转换。 +modelconvert.download=下载 +modelconvert.taskname=任务名称 +modelconvert.modelname=模型名称 +modelconvert.selectmodel=选择模型 +modelconvert.modelversion=模型版本 +modelconvert.selectversion=选择版本 +modelconvert.selectmodelfile=选择模型文件 +modelconvert.taskstatus=状态 +modelconvert.srcengine=原模型框架 +modelconvert.outputformat=转换后格式 +modelconvert.createtime=创建时间 +modelconvert.inputdataformat=输入数据格式 +modelconvert.inputshape=输入张量形状 +modelconvert.inputshapetip=如:1,1,32,32,与输入数据格式对应。 +modelconvert.netoutputdata=网络输出数据类型 +modelconvert.taskdesc=任务描述 +modelconvert.newtask=新建任务 +modelconvert.createtask=创建模型转换任务 + +modelconvert.taskurlname=模型转换任务 +log_scroll_start=滚动到顶部 +log_scroll_end=滚动到底部 +modelconvert.tasknameempty=请输入任务名称。 +modelconvert.inputshapeerror=格式输入错误,请输入如:1,1,32,32,与输入数据格式对应。 + modelconvert.manage.create_error1=相同的名称模型转换任务已经存在。 modelconvert.manage.create_error2=只能创建一个正在运行的模型转换任务。 modelconvert.manage.model_not_exist=选择的模型不存在。 @@ -3138,7 +3196,7 @@ wrong_specification=您目前不能使用这个资源规格,请选择其他资 job_name_rule = 请输入字母、数字、_和-,最长64个字符,且不能以中划线(-)结尾。 train_dataset_path_rule = 数据集位置存储在环境变量data_url中,训练输出路径存储在环境变量train_url中。 -infer_dataset_path_rule = 数据集位置存储在环境变量data_url中,推理输出路径存储在环境变量train_url中。 +infer_dataset_path_rule = 数据集位置存储在环境变量data_url中,推理输出路径存储在环境变量result_url中。 view_sample = 查看样例 inference_output_path_rule = 推理输出路径存储在环境变量result_url中。 model_file_path_rule = 模型文件位置存储在环境变量ckpt_url中。 @@ -3148,7 +3206,7 @@ delete_task = 删除任务 task_delete_confirm = 你确认删除该任务么?此任务一旦删除不可恢复。 operate_confirm = 确定操作 operate_cancel = 取消操作 - +resource_use=资源占用情况 gpu_num = GPU数 cpu_num = CPU数 @@ -3173,3 +3231,4 @@ load_code_failed=代码加载失败,请确认选择了正确的分支。 error.dataset_select = 数据集选择错误:数量超过限制或者有同名数据集 new_train_gpu_tooltips =训练脚本存储在%s中,数据集存储在%s中,训练输出请存储在%s中以供后续下载。 +new_infer_gpu_tooltips = 数据集存储在%s中,模型文件存储在%s中,推理输出请存储在%s中以供后续下载。 \ No newline at end of file diff --git a/routers/home.go b/routers/home.go index 1a697946a..026491156 100755 --- a/routers/home.go +++ b/routers/home.go @@ -296,11 +296,10 @@ func ExploreDatasets(ctx *context.Context) { // ctx.Data["IsRepoIndexerEnabled"] = setting.Indexer.RepoIndexerEnabled var ( - datasets []*models.Dataset - datasetsWithStar []*models.DatasetWithStar - count int64 - err error - orderBy models.SearchOrderBy + datasets []*models.Dataset + count int64 + err error + orderBy models.SearchOrderBy ) page := ctx.QueryInt("page") if page <= 0 { @@ -379,14 +378,6 @@ func ExploreDatasets(ctx *context.Context) { ctx.ServerError("SearchDatasets", err) return } - for _, dataset := range datasets { - if !ctx.IsSigned { - datasetsWithStar = append(datasetsWithStar, &models.DatasetWithStar{Dataset: *dataset, IsStaring: false}) - } else { - datasetsWithStar = append(datasetsWithStar, &models.DatasetWithStar{Dataset: *dataset, IsStaring: models.IsDatasetStaring(ctx.User.ID, dataset.ID)}) - } - - } pager := context.NewPagination(int(count), opts.PageSize, page, 5) ctx.Data["Keyword"] = opts.Keyword @@ -397,7 +388,7 @@ func ExploreDatasets(ctx *context.Context) { pager.SetDefaultParams(ctx) ctx.Data["Page"] = pager - ctx.Data["Datasets"] = datasetsWithStar + ctx.Data["Datasets"] = repository.ConvertToDatasetWithStar(ctx, datasets) ctx.Data["Total"] = count ctx.Data["PageIsDatasets"] = true ctx.HTML(200, tplExploreDataset) diff --git a/routers/repo/cloudbrain.go b/routers/repo/cloudbrain.go index 457f275ed..c1e89dde5 100755 --- a/routers/repo/cloudbrain.go +++ b/routers/repo/cloudbrain.go @@ -2718,7 +2718,7 @@ func getTrainJobCommand(form auth.CreateCloudBrainForm) (string, error) { } } - command += "python /code/" + bootFile + param + " > " + cloudbrain.ModelMountPath + "/" + form.DisplayJobName + "-" + cloudbrain.LogFile + command += "python /code/" + bootFile + param + " | tee " + cloudbrain.ModelMountPath + "/" + form.DisplayJobName + "-" + cloudbrain.LogFile return command, nil } diff --git a/routers/repo/dataset.go b/routers/repo/dataset.go index 4dde646a6..f047cdaa9 100755 --- a/routers/repo/dataset.go +++ b/routers/repo/dataset.go @@ -9,6 +9,8 @@ import ( "strings" "unicode/utf8" + "code.gitea.io/gitea/services/repository" + "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/auth" "code.gitea.io/gitea/modules/base" @@ -22,6 +24,7 @@ const ( tplDatasetCreate base.TplName = "repo/datasets/create" tplDatasetEdit base.TplName = "repo/datasets/edit" taskstplIndex base.TplName = "repo/datasets/tasks/index" + tplReference base.TplName = "repo/datasets/reference" ) // MustEnableDataset check if repository enable internal dataset @@ -266,6 +269,37 @@ func CreateDatasetPost(ctx *context.Context, form auth.CreateDatasetForm) { ctx.JSON(http.StatusOK, models.BaseOKMessage) } +} +func ReferenceDatasetDelete(ctx *context.Context) { + repoID := ctx.Repo.Repository.ID + datasetId, _ := strconv.ParseInt(ctx.Params(":id"), 10, 64) + + oldDatasetIds := models.GetDatasetIdsByRepoID(repoID) + + var newDatasetIds []int64 + + for _, tempDatasetId := range oldDatasetIds { + if datasetId != tempDatasetId { + newDatasetIds = append(newDatasetIds, tempDatasetId) + } + } + err := models.NewDatasetIdsByRepoID(repoID, newDatasetIds) + if err != nil { + ctx.JSON(http.StatusOK, models.BaseErrorMessage("dataset.cancel_reference_dataset_fail")) + } + ctx.JSON(http.StatusOK, models.BaseOKMessage) + +} + +func ReferenceDatasetPost(ctx *context.Context, form auth.ReferenceDatasetForm) { + repoID := ctx.Repo.Repository.ID + err := models.NewDatasetIdsByRepoID(repoID, form.DatasetID) + if err != nil { + ctx.JSON(http.StatusOK, models.BaseErrorMessage("dataset.reference_dataset_fail")) + } + + ctx.JSON(http.StatusOK, models.BaseOKMessage) + } func EditDatasetPost(ctx *context.Context, form auth.EditDatasetForm) { @@ -412,18 +446,17 @@ func MyDatasets(ctx *context.Context) { func datasetMultiple(ctx *context.Context, opts *models.SearchDatasetOptions) { page := ctx.QueryInt("page") - cloudbrainType := ctx.QueryInt("type") keyword := strings.Trim(ctx.Query("q"), " ") - orderBy := models.SearchOrderByRecentUpdated opts.Keyword = keyword - opts.SearchOrderBy = orderBy + if opts.SearchOrderBy.String() == "" { + opts.SearchOrderBy = models.SearchOrderByRecentUpdated + } + opts.RecommendOnly = ctx.QueryBool("recommend") - opts.CloudBrainType = cloudbrainType opts.ListOptions = models.ListOptions{ Page: page, PageSize: setting.UI.DatasetPagingNum, } - opts.NeedAttachment = true opts.JustNeedZipFile = true opts.User = ctx.User @@ -449,22 +482,52 @@ func datasetMultiple(ctx *context.Context, opts *models.SearchDatasetOptions) { "data": string(data), "count": strconv.FormatInt(count, 10), }) - } func CurrentRepoDatasetMultiple(ctx *context.Context) { - + datasetIds := models.GetDatasetIdsByRepoID(ctx.Repo.Repository.ID) + searchOrderBy := getSearchOrderByInValues(datasetIds) opts := &models.SearchDatasetOptions{ - RepoID: ctx.Repo.Repository.ID, + RepoID: ctx.Repo.Repository.ID, + NeedAttachment: true, + CloudBrainType: ctx.QueryInt("type"), + DatasetIDs: datasetIds, + SearchOrderBy: searchOrderBy, } + datasetMultiple(ctx, opts) } +func getSearchOrderByInValues(datasetIds []int64) models.SearchOrderBy { + if len(datasetIds) == 0 { + return "" + } + searchOrderBy := "CASE id " + for i, id := range datasetIds { + searchOrderBy += fmt.Sprintf(" WHEN %d THEN %d", id, i+1) + } + searchOrderBy += " ELSE 0 END" + return models.SearchOrderBy(searchOrderBy) +} + func MyDatasetsMultiple(ctx *context.Context) { opts := &models.SearchDatasetOptions{ UploadAttachmentByMe: true, + NeedAttachment: true, + CloudBrainType: ctx.QueryInt("type"), + } + datasetMultiple(ctx, opts) + +} + +func ReferenceDatasetAvailable(ctx *context.Context) { + + opts := &models.SearchDatasetOptions{ + PublicOnly: true, + NeedAttachment: false, + CloudBrainType: models.TypeCloudBrainAll, } datasetMultiple(ctx, opts) @@ -473,7 +536,9 @@ func MyDatasetsMultiple(ctx *context.Context) { func PublicDatasetMultiple(ctx *context.Context) { opts := &models.SearchDatasetOptions{ - PublicOnly: true, + PublicOnly: true, + NeedAttachment: true, + CloudBrainType: ctx.QueryInt("type"), } datasetMultiple(ctx, opts) @@ -482,11 +547,50 @@ func PublicDatasetMultiple(ctx *context.Context) { func MyFavoriteDatasetMultiple(ctx *context.Context) { opts := &models.SearchDatasetOptions{ - StarByMe: true, - DatasetIDs: models.GetDatasetIdsStarByUser(ctx.User.ID), + StarByMe: true, + DatasetIDs: models.GetDatasetIdsStarByUser(ctx.User.ID), + NeedAttachment: true, + CloudBrainType: ctx.QueryInt("type"), } datasetMultiple(ctx, opts) } +func ReferenceDataset(ctx *context.Context) { + MustEnableDataset(ctx) + ctx.Data["PageIsDataset"] = true + ctx.Data["MaxReferenceDatasetNum"] = setting.RepoMaxReferenceDatasetNum + ctx.Data["CanWrite"] = ctx.Repo.CanWrite(models.UnitTypeDatasets) + ctx.HTML(200, tplReference) + +} + +func ReferenceDatasetData(ctx *context.Context) { + MustEnableDataset(ctx) + datasetIds := models.GetDatasetIdsByRepoID(ctx.Repo.Repository.ID) + var datasets models.DatasetList + var err error + if len(datasetIds) > 0 { + + opts := &models.SearchDatasetOptions{ + DatasetIDs: datasetIds, + NeedAttachment: false, + CloudBrainType: models.TypeCloudBrainAll, + ListOptions: models.ListOptions{ + Page: 1, + PageSize: setting.RepoMaxReferenceDatasetNum, + }, + SearchOrderBy: getSearchOrderByInValues(datasetIds), + QueryReference: true, + } + datasets, _, err = models.SearchDataset(opts) + if err != nil { + ctx.ServerError("SearchDatasets", err) + return + } + } + + ctx.JSON(http.StatusOK, repository.ConvertToDatasetWithStar(ctx, datasets)) + +} func PublicDataset(ctx *context.Context) { page := ctx.QueryInt("page") diff --git a/routers/repo/modelarts.go b/routers/repo/modelarts.go index 25c394efc..40e8076fb 100755 --- a/routers/repo/modelarts.go +++ b/routers/repo/modelarts.go @@ -2066,13 +2066,6 @@ func InferenceJobCreate(ctx *context.Context, form auth.CreateModelArtsInference gitRepo, _ := git.OpenRepository(repo.RepoPath()) commitID, _ := gitRepo.GetBranchCommitID(branchName) - _, dataUrl, datasetNames, _, err := getDatasUrlListByUUIDS(uuid) - if err != nil { - inferenceJobErrorNewDataPrepare(ctx, form) - ctx.RenderWithErr(ctx.Tr(errStr), tplModelArtsInferenceJobNew, &form) - return - } - if err := downloadCode(repo, codeLocalPath, branchName); err != nil { log.Error("Create task failed, server timed out: %s (%v)", repo.FullName(), err) inferenceJobErrorNewDataPrepare(ctx, form) @@ -2111,6 +2104,28 @@ func InferenceJobCreate(ctx *context.Context, form auth.CreateModelArtsInference Label: modelarts.CkptUrl, Value: "s3:/" + ckptUrl, }) + + datasUrlList, dataUrl, datasetNames, isMultiDataset, err := getDatasUrlListByUUIDS(uuid) + if err != nil { + inferenceJobErrorNewDataPrepare(ctx, form) + ctx.RenderWithErr(ctx.Tr(errStr), tplModelArtsInferenceJobNew, &form) + return + } + dataPath := dataUrl + jsondatas, err := json.Marshal(datasUrlList) + if err != nil { + log.Error("Failed to Marshal: %v", err) + inferenceJobErrorNewDataPrepare(ctx, form) + ctx.RenderWithErr("json error:"+err.Error(), tplModelArtsInferenceJobNew, &form) + return + } + if isMultiDataset { + param = append(param, models.Parameter{ + Label: modelarts.MultiDataUrl, + Value: string(jsondatas), + }) + } + existDeviceTarget := false if len(params) != 0 { err := json.Unmarshal([]byte(params), ¶meters) @@ -2143,7 +2158,7 @@ func InferenceJobCreate(ctx *context.Context, form auth.CreateModelArtsInference req := &modelarts.GenerateInferenceJobReq{ JobName: jobName, DisplayJobName: displayJobName, - DataUrl: dataUrl, + DataUrl: dataPath, Description: description, CodeObsPath: codeObsPath, BootFileUrl: codeObsPath + bootFile, diff --git a/routers/routes/routes.go b/routers/routes/routes.go index 41d34b937..9d83594fa 100755 --- a/routers/routes/routes.go +++ b/routers/routes/routes.go @@ -676,6 +676,9 @@ func RegisterRoutes(m *macaron.Macaron) { reqRepoIssuesOrPullsReader := context.RequireRepoReaderOr(models.UnitTypeIssues, models.UnitTypePullRequests) reqRepoDatasetReader := context.RequireRepoReader(models.UnitTypeDatasets) reqRepoDatasetWriter := context.RequireRepoWriter(models.UnitTypeDatasets) + reqRepoDatasetReaderJson := context.RequireRepoReaderJson(models.UnitTypeDatasets) + reqRepoDatasetWriterJson := context.RequireRepoWriterJson(models.UnitTypeDatasets) + reqRepoCloudBrainReader := context.RequireRepoReader(models.UnitTypeCloudBrain) reqRepoCloudBrainWriter := context.RequireRepoWriter(models.UnitTypeCloudBrain) reqRepoModelManageReader := context.RequireRepoReader(models.UnitTypeModelManage) @@ -1032,10 +1035,14 @@ func RegisterRoutes(m *macaron.Macaron) { m.Group("/datasets", func() { m.Get("", reqRepoDatasetReader, repo.DatasetIndex) + m.Get("/reference_datasets", reqRepoDatasetReader, repo.ReferenceDataset) + m.Get("/reference_datasets_data", reqRepoDatasetReaderJson, repo.ReferenceDatasetData) + m.Delete("/reference_datasets/:id", reqRepoDatasetWriterJson, repo.ReferenceDatasetDelete) m.Put("/:id/:action", reqRepoDatasetReader, repo.DatasetAction) m.Get("/create", reqRepoDatasetWriter, repo.CreateDataset) m.Post("/create", reqRepoDatasetWriter, bindIgnErr(auth.CreateDatasetForm{}), repo.CreateDatasetPost) m.Get("/edit/:id", reqRepoDatasetWriter, repo.EditDataset) + m.Post("/reference_datasets", reqRepoDatasetWriterJson, bindIgnErr(auth.ReferenceDatasetForm{}), repo.ReferenceDatasetPost) m.Post("/edit", reqRepoDatasetWriter, bindIgnErr(auth.EditDatasetForm{}), repo.EditDatasetPost) m.Get("/current_repo", repo.CurrentRepoDataset) m.Get("/my_datasets", repo.MyDatasets) @@ -1045,6 +1052,8 @@ func RegisterRoutes(m *macaron.Macaron) { m.Get("/current_repo_m", repo.CurrentRepoDatasetMultiple) m.Get("/my_datasets_m", repo.MyDatasetsMultiple) m.Get("/public_datasets_m", repo.PublicDatasetMultiple) + + m.Get("/reference_datasets_available", repo.ReferenceDatasetAvailable) m.Get("/my_favorite_m", repo.MyFavoriteDatasetMultiple) m.Group("/status", func() { @@ -1184,7 +1193,7 @@ func RegisterRoutes(m *macaron.Macaron) { m.Get("", reqRepoCloudBrainReader, repo.NotebookShow) m.Get("/debug", cloudbrain.AdminOrJobCreaterRight, repo.NotebookDebug2) m.Post("/restart", cloudbrain.AdminOrJobCreaterRight, repo.NotebookRestart) - m.Post("/stop", cloudbrain.AdminOrJobCreaterRight, repo.NotebookStop) + m.Post("/stop", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.NotebookStop) m.Post("/del", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.NotebookDel) }) m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, repo.NotebookNew) diff --git a/services/repository/dataset.go b/services/repository/dataset.go new file mode 100644 index 000000000..ffe3c5466 --- /dev/null +++ b/services/repository/dataset.go @@ -0,0 +1,19 @@ +package repository + +import ( + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/context" +) + +func ConvertToDatasetWithStar(ctx *context.Context, datasets []*models.Dataset) []*models.DatasetWithStar { + var datasetsWithStar []*models.DatasetWithStar + for _, dataset := range datasets { + if !ctx.IsSigned { + datasetsWithStar = append(datasetsWithStar, &models.DatasetWithStar{Dataset: *dataset, IsStaring: false}) + } else { + datasetsWithStar = append(datasetsWithStar, &models.DatasetWithStar{Dataset: *dataset, IsStaring: models.IsDatasetStaring(ctx.User.ID, dataset.ID)}) + } + + } + return datasetsWithStar +} diff --git a/templates/custom/select_dataset_train.tmpl b/templates/custom/select_dataset_train.tmpl index d7dbb055a..caa8f46f9 100755 --- a/templates/custom/select_dataset_train.tmpl +++ b/templates/custom/select_dataset_train.tmpl @@ -15,7 +15,7 @@ {{if .benchmarkMode}}{{.i18n.Tr "repo.modelarts.infer_job.select_model"}}{{else}}{{.i18n.Tr "dataset.select_dataset"}}{{end}} {{if .benchmarkMode}} - 说明:先使用数据集功能上传模型,然后从数据集列表选模型。 + {{.i18n.Tr "dataset.benchmark_dataset_tip"}} {{end}}
@@ -49,13 +49,13 @@ 解压中 + data-variation="mini" data-position="left center">{{$.i18n.Tr "dataset.unzip_stared"}} 解压失败 + data-tooltip="{{$.i18n.Tr "dataset.unzip_failed"}}" data-inverted="" + data-variation="mini" data-position="left center">{{$.i18n.Tr "dataset.unzip_failed"}}
@@ -85,13 +85,13 @@ 解压中 + data-variation="mini" data-position="left center">{{$.i18n.Tr "dataset.unzip_stared"}} 解压失败 + data-variation="mini" data-position="left center">{{$.i18n.Tr "dataset.unzip_failed"}} @@ -120,13 +120,13 @@ 解压中 + data-variation="mini" data-position="left center">{{$.i18n.Tr "dataset.unzip_stared"}} 解压失败 + data-variation="mini" data-position="left center">{{$.i18n.Tr "dataset.unzip_failed"}} @@ -155,13 +155,13 @@ 解压中 + data-variation="mini" data-position="left center">{{$.i18n.Tr "dataset.unzip_stared"}} 解压失败 + data-variation="mini" data-position="left center">{{$.i18n.Tr "dataset.unzip_failed"}} diff --git a/templates/repo/attachment/upload.tmpl b/templates/repo/attachment/upload.tmpl index abda92b07..5161d93a1 100644 --- a/templates/repo/attachment/upload.tmpl +++ b/templates/repo/attachment/upload.tmpl @@ -47,7 +47,8 @@ data-uploading='{{.i18n.Tr "dropzone.uploading"}}' data-failed='{{.i18n.Tr "dropzone.failed"}}' data-repopath='{{AppSubUrl}}{{$.RepoLink}}/datasets' data-cancel='{{.i18n.Tr "cancel"}}' - data-upload='{{.i18n.Tr "dataset.dataset_upload"}}'> + data-upload='{{.i18n.Tr "dataset.dataset_upload"}}' + data-upload-status='{{.i18n.Tr "dataset.dataset_upload_status"}}'>
diff --git a/templates/repo/cloudbrain/benchmark/new.tmpl b/templates/repo/cloudbrain/benchmark/new.tmpl index a60f32853..32e715ab7 100755 --- a/templates/repo/cloudbrain/benchmark/new.tmpl +++ b/templates/repo/cloudbrain/benchmark/new.tmpl @@ -134,7 +134,7 @@ {{else}} -
+ {{.CsrfTokenHtml}} @@ -178,8 +178,7 @@
 
- +
- - - + +
diff --git a/templates/repo/cloudbrain/inference/new.tmpl b/templates/repo/cloudbrain/inference/new.tmpl index e023a5fe7..9932ecf49 100644 --- a/templates/repo/cloudbrain/inference/new.tmpl +++ b/templates/repo/cloudbrain/inference/new.tmpl @@ -83,6 +83,10 @@ Ascend NPU {{template "custom/wait_count_train" Dict "ctx" $ "type" .inference_gpu_types}} +
+ + {{.i18n.Tr "cloudbrain.new_infer_gpu_tooltips" "/dataset" "/model" "/result" | Safe}} +
@@ -229,18 +233,18 @@ {{if .resource_spec_id}} {{range .inference_resource_specs}} {{if eq $.resource_spec_id .Id}} - + {{end}} {{end}} {{range .inference_resource_specs}} {{if ne $.resource_spec_id .Id}} - + {{end}} {{end}} {{else}} {{range .inference_resource_specs}} + {{$.i18n.Tr "cloudbrain.gpu_num"}}:{{.GpuNum}},{{$.i18n.Tr "cloudbrain.cpu_num"}}:{{.CpuNum}},{{$.i18n.Tr "cloudbrain.memory"}}(MB):{{$.i18n.Tr "cloudbrain.shared_memory"}}(MB):{{.ShareMemMiB}} {{end}} {{end}} @@ -384,17 +388,19 @@ let value = '' value += `
` value += '
' + let placeholder_value='{{.i18n.Tr "repo.modelarts.train_job.parameter_value"}}' + let placeholder_name='{{.i18n.Tr "repo.modelarts.train_job.parameter_name"}}' if(flag){ value +=`` }else{ - value +='' + value +='' } value += '
' value += '
' if(flag){ value +=`` }else{ - value +='' + value +='' } value += '
' value += '' diff --git a/templates/repo/cloudbrain/trainjob/new.tmpl b/templates/repo/cloudbrain/trainjob/new.tmpl index 993b04fc5..cf25ae91c 100755 --- a/templates/repo/cloudbrain/trainjob/new.tmpl +++ b/templates/repo/cloudbrain/trainjob/new.tmpl @@ -211,7 +211,7 @@ data-content={{.i18n.Tr "repo.modelarts.train_job.boot_file_helper"}} data-position="right center" data-variation="mini"> - 查看样例 + {{.i18n.Tr "cloudbrain.view_sample"}}
@@ -234,18 +234,18 @@ {{if .resource_spec_id}} {{range .train_resource_specs}} {{if eq $.resource_spec_id .Id}} - + {{end}} {{end}} {{range .train_resource_specs}} {{if ne $.resource_spec_id .Id}} - + {{end}} {{end}} {{else}} {{range .train_resource_specs}} + {{$.i18n.Tr "cloudbrain.gpu_num"}}:{{.GpuNum}},{{$.i18n.Tr "cloudbrain.cpu_num"}}:{{.CpuNum}},{{$.i18n.Tr "cloudbrain.memory"}}(MB):{{.MemMiB}},{{$.i18n.Tr "cloudbrain.shared_memory"}}(MB):{{.ShareMemMiB}} {{end}} {{end}} @@ -297,17 +297,19 @@ let value = '' value += `
` value += '
' + let placeholder_value='{{.i18n.Tr "repo.modelarts.train_job.parameter_value"}}' + let placeholder_name='{{.i18n.Tr "repo.modelarts.train_job.parameter_name"}}' if(flag){ value +=`` }else{ - value +='' + value +='' } value += '
' value += '
' if(flag){ value +=`` }else{ - value +='' + value +='' } value += '
' value += '' diff --git a/templates/repo/cloudbrain/trainjob/show.tmpl b/templates/repo/cloudbrain/trainjob/show.tmpl index ba299a7eb..14967c900 100644 --- a/templates/repo/cloudbrain/trainjob/show.tmpl +++ b/templates/repo/cloudbrain/trainjob/show.tmpl @@ -584,7 +584,7 @@