You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

taskhpcmodel_gen.go 5.9 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. // Code generated by goctl. DO NOT EDIT.
  2. package models
  3. import (
  4. "context"
  5. "database/sql"
  6. "fmt"
  7. "strings"
  8. "time"
  9. "github.com/zeromicro/go-zero/core/stores/builder"
  10. "github.com/zeromicro/go-zero/core/stores/sqlc"
  11. "github.com/zeromicro/go-zero/core/stores/sqlx"
  12. "github.com/zeromicro/go-zero/core/stringx"
  13. )
  14. var (
  15. taskHpcFieldNames = builder.RawFieldNames(&TaskHpc{})
  16. taskHpcRows = strings.Join(taskHpcFieldNames, ",")
  17. taskHpcRowsExpectAutoSet = strings.Join(stringx.Remove(taskHpcFieldNames, "`id`", "`create_at`", "`create_time`", "`created_at`", "`update_at`", "`update_time`", "`updated_at`"), ",")
  18. taskHpcRowsWithPlaceHolder = strings.Join(stringx.Remove(taskHpcFieldNames, "`id`", "`create_at`", "`create_time`", "`created_at`", "`update_at`", "`update_time`", "`updated_at`"), "=?,") + "=?"
  19. )
  20. type (
  21. taskHpcModel interface {
  22. Insert(ctx context.Context, data *TaskHpc) (sql.Result, error)
  23. FindOne(ctx context.Context, id int64) (*TaskHpc, error)
  24. Update(ctx context.Context, data *TaskHpc) error
  25. Delete(ctx context.Context, id int64) error
  26. }
  27. defaultTaskHpcModel struct {
  28. conn sqlx.SqlConn
  29. table string
  30. }
  31. TaskHpc struct {
  32. Id int64 `db:"id"` // id
  33. TaskId int64 `db:"task_id"` // 任务id
  34. JobId string `db:"job_id"` // 作业id(在第三方系统中的作业id)
  35. ClusterId int64 `db:"cluster_id"` // 执行任务的集群id
  36. Name string `db:"name"` // 名称
  37. Status string `db:"status"` // 状态
  38. CmdScript string `db:"cmd_script"`
  39. StartTime string `db:"start_time"` // 开始时间
  40. RunningTime int64 `db:"running_time"` // 运行时间
  41. DerivedEs string `db:"derived_es"`
  42. Cluster string `db:"cluster"`
  43. BlockId int64 `db:"block_id"`
  44. AllocNodes int64 `db:"alloc_nodes"`
  45. AllocCpu int64 `db:"alloc_cpu"`
  46. CardCount int64 `db:"card_count"` // 卡数
  47. Version string `db:"version"`
  48. Account string `db:"account"`
  49. WorkDir string `db:"work_dir"` // 工作路径
  50. AssocId int64 `db:"assoc_id"`
  51. ExitCode int64 `db:"exit_code"`
  52. WallTime string `db:"wall_time"` // 最大运行时间
  53. Result string `db:"result"` // 运行结果
  54. DeletedAt sql.NullTime `db:"deleted_at"` // 删除时间
  55. YamlString string `db:"yaml_string"`
  56. AppType string `db:"app_type"` // 应用类型
  57. AppName string `db:"app_name"` // 应用名称
  58. Queue string `db:"queue"` // 队列名称
  59. SubmitType string `db:"submit_type"` // cmd(命令行模式)
  60. NNode string `db:"n_node"` // 节点个数(当指定该参数时,GAP_NODE_STRING必须为"")
  61. StdOutFile string `db:"std_out_file"` // 工作路径/std.err.%j
  62. StdErrFile string `db:"std_err_file"` // 工作路径/std.err.%j
  63. StdInput string `db:"std_input"`
  64. Environment string `db:"environment"`
  65. DeletedFlag int64 `db:"deleted_flag"` // 是否删除(0-否,1-是)
  66. CreatedBy int64 `db:"created_by"` // 创建人
  67. CreatedTime time.Time `db:"created_time"` // 创建时间
  68. UpdatedBy int64 `db:"updated_by"` // 更新人
  69. UpdatedTime time.Time `db:"updated_time"` // 更新时间
  70. }
  71. )
  72. func newTaskHpcModel(conn sqlx.SqlConn) *defaultTaskHpcModel {
  73. return &defaultTaskHpcModel{
  74. conn: conn,
  75. table: "`task_hpc`",
  76. }
  77. }
  78. func (m *defaultTaskHpcModel) Delete(ctx context.Context, id int64) error {
  79. query := fmt.Sprintf("delete from %s where `id` = ?", m.table)
  80. _, err := m.conn.ExecCtx(ctx, query, id)
  81. return err
  82. }
  83. func (m *defaultTaskHpcModel) FindOne(ctx context.Context, id int64) (*TaskHpc, error) {
  84. query := fmt.Sprintf("select %s from %s where `id` = ? limit 1", taskHpcRows, m.table)
  85. var resp TaskHpc
  86. err := m.conn.QueryRowCtx(ctx, &resp, query, id)
  87. switch err {
  88. case nil:
  89. return &resp, nil
  90. case sqlc.ErrNotFound:
  91. return nil, ErrNotFound
  92. default:
  93. return nil, err
  94. }
  95. }
  96. func (m *defaultTaskHpcModel) Insert(ctx context.Context, data *TaskHpc) (sql.Result, error) {
  97. query := fmt.Sprintf("insert into %s (%s) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", m.table, taskHpcRowsExpectAutoSet)
  98. ret, err := m.conn.ExecCtx(ctx, query, data.TaskId, data.JobId, data.ClusterId, data.Name, data.Status, data.CmdScript, data.StartTime, data.RunningTime, data.DerivedEs, data.Cluster, data.BlockId, data.AllocNodes, data.AllocCpu, data.CardCount, data.Version, data.Account, data.WorkDir, data.AssocId, data.ExitCode, data.WallTime, data.Result, data.DeletedAt, data.YamlString, data.AppType, data.AppName, data.Queue, data.SubmitType, data.NNode, data.StdOutFile, data.StdErrFile, data.StdInput, data.Environment, data.DeletedFlag, data.CreatedBy, data.CreatedTime, data.UpdatedBy, data.UpdatedTime)
  99. return ret, err
  100. }
  101. func (m *defaultTaskHpcModel) Update(ctx context.Context, data *TaskHpc) error {
  102. query := fmt.Sprintf("update %s set %s where `id` = ?", m.table, taskHpcRowsWithPlaceHolder)
  103. _, err := m.conn.ExecCtx(ctx, query, data.TaskId, data.JobId, data.ClusterId, data.Name, data.Status, data.CmdScript, data.StartTime, data.RunningTime, data.DerivedEs, data.Cluster, data.BlockId, data.AllocNodes, data.AllocCpu, data.CardCount, data.Version, data.Account, data.WorkDir, data.AssocId, data.ExitCode, data.WallTime, data.Result, data.DeletedAt, data.YamlString, data.AppType, data.AppName, data.Queue, data.SubmitType, data.NNode, data.StdOutFile, data.StdErrFile, data.StdInput, data.Environment, data.DeletedFlag, data.CreatedBy, data.CreatedTime, data.UpdatedBy, data.UpdatedTime, data.Id)
  104. return err
  105. }
  106. func (m *defaultTaskHpcModel) tableName() string {
  107. return m.table
  108. }

PCM is positioned as Software stack over Cloud, aiming to build the standards and ecology of heterogeneous cloud collaboration for JCC in a non intrusive and autonomous peer-to-peer manner.