You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

tclustertaskqueuemodel_gen.go 3.4 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. // Code generated by goctl. DO NOT EDIT.
  2. package models
  3. import (
  4. "context"
  5. "database/sql"
  6. "fmt"
  7. "strings"
  8. "time"
  9. "github.com/zeromicro/go-zero/core/stores/builder"
  10. "github.com/zeromicro/go-zero/core/stores/sqlc"
  11. "github.com/zeromicro/go-zero/core/stores/sqlx"
  12. "github.com/zeromicro/go-zero/core/stringx"
  13. )
  14. var (
  15. tClusterTaskQueueFieldNames = builder.RawFieldNames(&TClusterTaskQueue{})
  16. tClusterTaskQueueRows = strings.Join(tClusterTaskQueueFieldNames, ",")
  17. tClusterTaskQueueRowsExpectAutoSet = strings.Join(stringx.Remove(tClusterTaskQueueFieldNames, "`id`", "`create_at`", "`create_time`", "`created_at`", "`update_at`", "`update_time`", "`updated_at`"), ",")
  18. tClusterTaskQueueRowsWithPlaceHolder = strings.Join(stringx.Remove(tClusterTaskQueueFieldNames, "`id`", "`create_at`", "`create_time`", "`created_at`", "`update_at`", "`update_time`", "`updated_at`"), "=?,") + "=?"
  19. )
  20. type (
  21. tClusterTaskQueueModel interface {
  22. Insert(ctx context.Context, data *TClusterTaskQueue) (sql.Result, error)
  23. FindOne(ctx context.Context, id int64) (*TClusterTaskQueue, error)
  24. Update(ctx context.Context, data *TClusterTaskQueue) error
  25. Delete(ctx context.Context, id int64) error
  26. }
  27. defaultTClusterTaskQueueModel struct {
  28. conn sqlx.SqlConn
  29. table string
  30. }
  31. TClusterTaskQueue struct {
  32. Id int64 `db:"id"` // id
  33. AdapterId int64 `db:"adapter_id"` // 适配器id
  34. ClusterId int64 `db:"cluster_id"` // 集群id
  35. QueueNum int64 `db:"queue_num"` // 任务排队数量
  36. Date time.Time `db:"date"`
  37. }
  38. )
  39. func newTClusterTaskQueueModel(conn sqlx.SqlConn) *defaultTClusterTaskQueueModel {
  40. return &defaultTClusterTaskQueueModel{
  41. conn: conn,
  42. table: "`t_cluster_task_queue`",
  43. }
  44. }
  45. func (m *defaultTClusterTaskQueueModel) withSession(session sqlx.Session) *defaultTClusterTaskQueueModel {
  46. return &defaultTClusterTaskQueueModel{
  47. conn: sqlx.NewSqlConnFromSession(session),
  48. table: "`t_cluster_task_queue`",
  49. }
  50. }
  51. func (m *defaultTClusterTaskQueueModel) Delete(ctx context.Context, id int64) error {
  52. query := fmt.Sprintf("delete from %s where `id` = ?", m.table)
  53. _, err := m.conn.ExecCtx(ctx, query, id)
  54. return err
  55. }
  56. func (m *defaultTClusterTaskQueueModel) FindOne(ctx context.Context, id int64) (*TClusterTaskQueue, error) {
  57. query := fmt.Sprintf("select %s from %s where `id` = ? limit 1", tClusterTaskQueueRows, m.table)
  58. var resp TClusterTaskQueue
  59. err := m.conn.QueryRowCtx(ctx, &resp, query, id)
  60. switch err {
  61. case nil:
  62. return &resp, nil
  63. case sqlc.ErrNotFound:
  64. return nil, ErrNotFound
  65. default:
  66. return nil, err
  67. }
  68. }
  69. func (m *defaultTClusterTaskQueueModel) Insert(ctx context.Context, data *TClusterTaskQueue) (sql.Result, error) {
  70. query := fmt.Sprintf("insert into %s (%s) values (?, ?, ?, ?)", m.table, tClusterTaskQueueRowsExpectAutoSet)
  71. ret, err := m.conn.ExecCtx(ctx, query, data.AdapterId, data.ClusterId, data.QueueNum, data.Date)
  72. return ret, err
  73. }
  74. func (m *defaultTClusterTaskQueueModel) Update(ctx context.Context, data *TClusterTaskQueue) error {
  75. query := fmt.Sprintf("update %s set %s where `id` = ?", m.table, tClusterTaskQueueRowsWithPlaceHolder)
  76. _, err := m.conn.ExecCtx(ctx, query, data.AdapterId, data.ClusterId, data.QueueNum, data.Date, data.Id)
  77. return err
  78. }
  79. func (m *defaultTClusterTaskQueueModel) tableName() string {
  80. return m.table
  81. }

PCM is positioned as Software stack over Cloud, aiming to build the standards and ecology of heterogeneous cloud collaboration for JCC in a non intrusive and autonomous peer-to-peer manner.