You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

domainresourcemodel_gen.go 5.5 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. // Code generated by goctl. DO NOT EDIT.
  2. package models
  3. import (
  4. "context"
  5. "database/sql"
  6. "fmt"
  7. "strings"
  8. "time"
  9. "github.com/zeromicro/go-zero/core/stores/builder"
  10. "github.com/zeromicro/go-zero/core/stores/cache"
  11. "github.com/zeromicro/go-zero/core/stores/sqlc"
  12. "github.com/zeromicro/go-zero/core/stores/sqlx"
  13. "github.com/zeromicro/go-zero/core/stringx"
  14. )
  15. var (
  16. domainResourceFieldNames = builder.RawFieldNames(&DomainResource{})
  17. domainResourceRows = strings.Join(domainResourceFieldNames, ",")
  18. domainResourceRowsExpectAutoSet = strings.Join(stringx.Remove(domainResourceFieldNames, "`id`", "`create_at`", "`create_time`", "`created_at`", "`update_at`", "`update_time`", "`updated_at`"), ",")
  19. domainResourceRowsWithPlaceHolder = strings.Join(stringx.Remove(domainResourceFieldNames, "`id`", "`create_at`", "`create_time`", "`created_at`", "`update_at`", "`update_time`", "`updated_at`"), "=?,") + "=?"
  20. cachePcmDomainResourceIdPrefix = "cache:pcm:domainResource:id:"
  21. )
  22. type (
  23. domainResourceModel interface {
  24. Insert(ctx context.Context, data *DomainResource) (sql.Result, error)
  25. FindOne(ctx context.Context, id int64) (*DomainResource, error)
  26. Update(ctx context.Context, data *DomainResource) error
  27. Delete(ctx context.Context, id int64) error
  28. }
  29. defaultDomainResourceModel struct {
  30. sqlc.CachedConn
  31. table string
  32. }
  33. DomainResource struct {
  34. Id int64 `db:"id"` // id
  35. DomainId string `db:"domain_id"` // 资源域id
  36. DomainName string `db:"domain_name"` // 资源域名称
  37. JobCount int64 `db:"job_count"` // 资源域任务数量
  38. DomainSource int64 `db:"domain_source"` // 资源域数据来源:0-nudt,1-鹏城
  39. Stack string `db:"stack"` // 技术栈
  40. ResourceType string `db:"resource_type"` // 资源类型
  41. Cpu string `db:"cpu"` // cpu
  42. Memory string `db:"memory"` // 内存
  43. Disk string `db:"disk"` // 存储
  44. NodeCount string `db:"node_count"` // 节点数量
  45. CreateTime time.Time `db:"create_time"` // 数据创建时间
  46. UpdateTime time.Time `db:"update_time"` // 数据更新时间
  47. DeleteFlag int64 `db:"delete_flag"` // 是否删除 0:未删除,1:已经删除
  48. Description string `db:"description"` //集群描述
  49. ClusterName string `db:"cluster_name"` //集群名称
  50. }
  51. )
  52. func newDomainResourceModel(conn sqlx.SqlConn, c cache.CacheConf, opts ...cache.Option) *defaultDomainResourceModel {
  53. return &defaultDomainResourceModel{
  54. CachedConn: sqlc.NewConn(conn, c, opts...),
  55. table: "`domain_resource`",
  56. }
  57. }
  58. func (m *defaultDomainResourceModel) Delete(ctx context.Context, id int64) error {
  59. pcmDomainResourceIdKey := fmt.Sprintf("%s%v", cachePcmDomainResourceIdPrefix, id)
  60. _, err := m.ExecCtx(ctx, func(ctx context.Context, conn sqlx.SqlConn) (result sql.Result, err error) {
  61. query := fmt.Sprintf("delete from %s where `id` = ?", m.table)
  62. return conn.ExecCtx(ctx, query, id)
  63. }, pcmDomainResourceIdKey)
  64. return err
  65. }
  66. func (m *defaultDomainResourceModel) FindOne(ctx context.Context, id int64) (*DomainResource, error) {
  67. pcmDomainResourceIdKey := fmt.Sprintf("%s%v", cachePcmDomainResourceIdPrefix, id)
  68. var resp DomainResource
  69. err := m.QueryRowCtx(ctx, &resp, pcmDomainResourceIdKey, func(ctx context.Context, conn sqlx.SqlConn, v any) error {
  70. query := fmt.Sprintf("select %s from %s where `id` = ? limit 1", domainResourceRows, m.table)
  71. return conn.QueryRowCtx(ctx, v, query, id)
  72. })
  73. switch err {
  74. case nil:
  75. return &resp, nil
  76. case sqlc.ErrNotFound:
  77. return nil, ErrNotFound
  78. default:
  79. return nil, err
  80. }
  81. }
  82. func (m *defaultDomainResourceModel) Insert(ctx context.Context, data *DomainResource) (sql.Result, error) {
  83. pcmDomainResourceIdKey := fmt.Sprintf("%s%v", cachePcmDomainResourceIdPrefix, data.Id)
  84. ret, err := m.ExecCtx(ctx, func(ctx context.Context, conn sqlx.SqlConn) (result sql.Result, err error) {
  85. query := fmt.Sprintf("insert into %s (%s) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", m.table, domainResourceRowsExpectAutoSet)
  86. return conn.ExecCtx(ctx, query, data.DomainId, data.DomainName, data.JobCount, data.DomainSource, data.Stack, data.ResourceType, data.Cpu, data.Memory, data.Disk, data.DeleteFlag)
  87. }, pcmDomainResourceIdKey)
  88. return ret, err
  89. }
  90. func (m *defaultDomainResourceModel) Update(ctx context.Context, data *DomainResource) error {
  91. pcmDomainResourceIdKey := fmt.Sprintf("%s%v", cachePcmDomainResourceIdPrefix, data.Id)
  92. _, err := m.ExecCtx(ctx, func(ctx context.Context, conn sqlx.SqlConn) (result sql.Result, err error) {
  93. query := fmt.Sprintf("update %s set %s where `id` = ?", m.table, domainResourceRowsWithPlaceHolder)
  94. return conn.ExecCtx(ctx, query, data.DomainId, data.DomainName, data.JobCount, data.DomainSource, data.Stack, data.ResourceType, data.Cpu, data.Memory, data.Disk, data.DeleteFlag, data.Id)
  95. }, pcmDomainResourceIdKey)
  96. return err
  97. }
  98. func (m *defaultDomainResourceModel) formatPrimary(primary any) string {
  99. return fmt.Sprintf("%s%v", cachePcmDomainResourceIdPrefix, primary)
  100. }
  101. func (m *defaultDomainResourceModel) queryPrimary(ctx context.Context, conn sqlx.SqlConn, v, primary any) error {
  102. query := fmt.Sprintf("select %s from %s where `id` = ? limit 1", domainResourceRows, m.table)
  103. return conn.QueryRowCtx(ctx, v, query, primary)
  104. }
  105. func (m *defaultDomainResourceModel) tableName() string {
  106. return m.table
  107. }

PCM is positioned as Software stack over Cloud, aiming to build the standards and ecology of heterogeneous cloud collaboration for JCC in a non intrusive and autonomous peer-to-peer manner.