You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

servicecontext.go 5.3 kB

2 years ago
2 years ago
2 years ago
2 years ago
2 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. /*
  2. Copyright (c) [2023] [pcm]
  3. [pcm-coordinator] is licensed under Mulan PSL v2.
  4. You can use this software according to the terms and conditions of the Mulan PSL v2.
  5. You may obtain a copy of Mulan PSL v2 at:
  6. http://license.coscl.org.cn/MulanPSL2
  7. THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
  8. EITHER EXPaRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
  9. MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
  10. See the Mulan PSL v2 for more details.
  11. */
  12. package svc
  13. import (
  14. "github.com/go-redis/redis/v8"
  15. "github.com/go-resty/resty/v2"
  16. alert "github.com/prometheus/alertmanager/api/v2/client"
  17. "github.com/robfig/cron/v3"
  18. "github.com/zeromicro/go-zero/core/logx"
  19. "github.com/zeromicro/go-zero/zrpc"
  20. "gitlink.org.cn/JointCloud/pcm-ac/hpcacclient"
  21. "gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/config"
  22. "gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/scheduler"
  23. "gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/scheduler/database"
  24. "gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/scheduler/service"
  25. "gitlink.org.cn/JointCloud/pcm-coordinator/pkg/tracker"
  26. "gitlink.org.cn/JointCloud/pcm-coordinator/pkg/utils"
  27. "gitlink.org.cn/JointCloud/pcm-coordinator/rpc/client/participantservice"
  28. "gitlink.org.cn/JointCloud/pcm-kubernetes/kubernetesclient"
  29. "gitlink.org.cn/JointCloud/pcm-modelarts/client/imagesservice"
  30. "gitlink.org.cn/JointCloud/pcm-modelarts/client/modelartsservice"
  31. "gitlink.org.cn/JointCloud/pcm-octopus/octopusclient"
  32. "gitlink.org.cn/JointCloud/pcm-openstack/openstackclient"
  33. slurmClient "gitlink.org.cn/JointCloud/pcm-slurm/slurmclient"
  34. "gitlink.org.cn/jcce-pcm/pcm-participant-ceph/cephclient"
  35. "gorm.io/driver/mysql"
  36. "gorm.io/gorm"
  37. "gorm.io/gorm/logger"
  38. "gorm.io/gorm/schema"
  39. "time"
  40. )
  41. type ServiceContext struct {
  42. Config config.Config
  43. RedisClient *redis.Client
  44. Cron *cron.Cron
  45. ModelArtsRpc modelartsservice.ModelArtsService
  46. ModelArtsImgRpc imagesservice.ImagesService
  47. DbEngin *gorm.DB
  48. ACRpc hpcacclient.HpcAC
  49. THRpc slurmClient.Slurm
  50. OctopusRpc octopusclient.Octopus
  51. CephRpc cephclient.Ceph
  52. OpenstackRpc openstackclient.Openstack
  53. K8sRpc kubernetesclient.Kubernetes
  54. MonitorClient map[int64]tracker.Prometheus
  55. ParticipantRpc participantservice.ParticipantService
  56. PromClient tracker.Prometheus
  57. AlertClient *alert.AlertmanagerAPI
  58. HttpClient *resty.Client
  59. Scheduler *scheduler.Scheduler
  60. }
  61. func NewServiceContext(c config.Config) *ServiceContext {
  62. promClient, err := tracker.NewPrometheus(c.Monitoring.PromUrl)
  63. if err != nil {
  64. logx.Errorf("InitPrometheus err: %v", err)
  65. panic("InitSnowflake err")
  66. }
  67. httpClient := resty.New()
  68. httpClient.SetTimeout(1 * time.Second)
  69. alertClient := tracker.NewAlertClient(c.Monitoring.AlertUrl)
  70. if err != nil {
  71. logx.Errorf("InitPrometheus err: %v", err)
  72. panic("InitSnowflake err")
  73. }
  74. //添加snowflake支持
  75. err = utils.InitSnowflake(c.SnowflakeConf.MachineId)
  76. if err != nil {
  77. logx.Errorf("InitSnowflake err: %v", err)
  78. panic("InitSnowflake err")
  79. }
  80. //启动Gorm支持
  81. dbEngin, err := gorm.Open(mysql.Open(c.DB.DataSource), &gorm.Config{
  82. NamingStrategy: schema.NamingStrategy{
  83. SingularTable: true, // 使用单数表名,启用该选项,此时,`User` 的表名应该是 `t_user`
  84. },
  85. Logger: logger.Default.LogMode(logger.Error),
  86. })
  87. if err != nil {
  88. logx.Errorf("数据库连接失败, err%v", err)
  89. panic(err)
  90. }
  91. sqlDB, err := dbEngin.DB()
  92. // SetMaxIdleConns 设置空闲连接池中连接的最大数量
  93. sqlDB.SetMaxIdleConns(10)
  94. // SetMaxOpenConns 设置打开数据库连接的最大数量。
  95. sqlDB.SetMaxOpenConns(50)
  96. // SetConnMaxLifetime 设置了连接可复用的最大时间。
  97. sqlDB.SetConnMaxLifetime(time.Hour)
  98. if err != nil {
  99. logx.Error(err.Error())
  100. return nil
  101. }
  102. redisClient := redis.NewClient(&redis.Options{
  103. Addr: c.Redis.Host,
  104. Password: c.Redis.Pass,
  105. })
  106. // scheduler
  107. storage := &database.AiStorage{DbEngin: dbEngin}
  108. aiService, err := service.NewAiService(&c, storage)
  109. if err != nil {
  110. logx.Error(err.Error())
  111. return nil
  112. }
  113. scheduler := scheduler.NewSchdlr(aiService, storage)
  114. return &ServiceContext{
  115. Cron: cron.New(cron.WithSeconds()),
  116. DbEngin: dbEngin,
  117. Config: c,
  118. RedisClient: redisClient,
  119. ModelArtsRpc: modelartsservice.NewModelArtsService(zrpc.MustNewClient(c.ModelArtsRpcConf)),
  120. ModelArtsImgRpc: imagesservice.NewImagesService(zrpc.MustNewClient(c.ModelArtsImgRpcConf)),
  121. CephRpc: cephclient.NewCeph(zrpc.MustNewClient(c.CephRpcConf)),
  122. ACRpc: hpcacclient.NewHpcAC(zrpc.MustNewClient(c.ACRpcConf)),
  123. OctopusRpc: octopusclient.NewOctopus(zrpc.MustNewClient(c.OctopusRpcConf)),
  124. OpenstackRpc: openstackclient.NewOpenstack(zrpc.MustNewClient(c.OpenstackRpcConf)),
  125. K8sRpc: kubernetesclient.NewKubernetes(zrpc.MustNewClient(c.K8sNativeConf)),
  126. MonitorClient: make(map[int64]tracker.Prometheus),
  127. ParticipantRpc: participantservice.NewParticipantService(zrpc.MustNewClient(c.PcmCoreRpcConf)),
  128. PromClient: promClient,
  129. AlertClient: alertClient,
  130. HttpClient: httpClient,
  131. Scheduler: scheduler,
  132. }
  133. }

PCM is positioned as Software stack over Cloud, aiming to build the standards and ecology of heterogeneous cloud collaboration for JCC in a non intrusive and autonomous peer-to-peer manner.