You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

vmScheduler.go 5.0 kB

11 months ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. package schedulers
  2. import (
  3. "context"
  4. "github.com/pkg/errors"
  5. "gitlink.org.cn/JointCloud/pcm-coordinator/internal/scheduler"
  6. "gitlink.org.cn/JointCloud/pcm-coordinator/internal/scheduler/algorithm/providerPricing"
  7. "gitlink.org.cn/JointCloud/pcm-coordinator/internal/scheduler/database"
  8. "gitlink.org.cn/JointCloud/pcm-coordinator/internal/scheduler/schedulers/option"
  9. "gitlink.org.cn/JointCloud/pcm-coordinator/internal/scheduler/strategy"
  10. "gitlink.org.cn/JointCloud/pcm-coordinator/internal/svc"
  11. "gitlink.org.cn/JointCloud/pcm-coordinator/pkg/constants"
  12. "gitlink.org.cn/JointCloud/pcm-coordinator/pkg/models"
  13. "gitlink.org.cn/JointCloud/pcm-coordinator/pkg/response"
  14. "gitlink.org.cn/JointCloud/pcm-coordinator/pkg/tracker"
  15. "gitlink.org.cn/JointCloud/pcm-coordinator/pkg/utils"
  16. "gorm.io/gorm"
  17. )
  18. type VmScheduler struct {
  19. yamlString string
  20. storage database.Storage
  21. task *response.TaskInfo
  22. *scheduler.Scheduler
  23. option *option.VmOption
  24. ctx context.Context
  25. promClient tracker.Prometheus
  26. dbEngin *gorm.DB
  27. svcCtx *svc.ServiceContext
  28. }
  29. type VmResult struct {
  30. TaskId string
  31. ClusterId string
  32. ClusterName string
  33. Strategy string
  34. Replica int32
  35. Msg string
  36. }
  37. func NewVmScheduler(ctx context.Context, val string, scheduler *scheduler.Scheduler, option *option.VmOption, dbEngin *gorm.DB, promClient tracker.Prometheus) (*VmScheduler, error) {
  38. return &VmScheduler{ctx: ctx, yamlString: val, Scheduler: scheduler, option: option, dbEngin: dbEngin, promClient: promClient}, nil
  39. }
  40. func (vm *VmScheduler) PickOptimalStrategy() (strategy.Strategy, error) {
  41. if len(vm.option.ClusterIds) == 1 {
  42. // TODO database operation Find
  43. return &strategy.SingleAssignment{Cluster: &strategy.AssignedCluster{ClusterId: vm.option.ClusterIds[0], Replicas: 1}}, nil
  44. }
  45. //resources, err := vm.findClustersWithResources()
  46. /* if err != nil {
  47. return nil, err
  48. }*/
  49. //if len(resources) == 1 {
  50. // var cluster strategy.AssignedCluster
  51. // cluster.ClusterId = resources[0].ClusterId
  52. // cluster.Replicas = 1
  53. // return &strategy.SingleAssignment{Cluster: &cluster}, nil
  54. //}
  55. //params := &param.Params{Resources: resources}
  56. switch vm.option.Strategy {
  57. /* case strategy.REPLICATION:
  58. var clusterIds []string
  59. for _, resource := range resources {
  60. clusterIds = append(clusterIds, resource.ClusterId)
  61. }
  62. strategy := strategy.NewReplicationStrategy(clusterIds, 1)
  63. return strategy, nil
  64. case strategy.RESOURCES_PRICING:
  65. strategy := strategy.NewPricingStrategy(&param.ResourcePricingParams{Params: params, Replicas: 1})
  66. return strategy, nil*/
  67. /* case strategy.DYNAMIC_RESOURCES:
  68. strategy := strategy.NewDynamicResourcesStrategy(params.Resources, vm.option, 1)
  69. return strategy, nil*/
  70. case strategy.STATIC_WEIGHT:
  71. //todo resources should match cluster StaticWeightMap
  72. strategy := strategy.NewStaticWeightStrategy(vm.option.StaticWeightMap, 1)
  73. return strategy, nil
  74. case strategy.RANDOM:
  75. strategy := strategy.NewRandomStrategy(vm.option.ClusterIds, vm.option.Replicas)
  76. return strategy, nil
  77. }
  78. return nil, errors.New("no strategy has been chosen")
  79. }
  80. func (v *VmScheduler) GetNewStructForDb(task *response.TaskInfo, resource string, participantId int64) (interface{}, error) {
  81. //TODO implement me
  82. vm := models.Vm{}
  83. utils.Convert(task.Metadata, &vm)
  84. vm.Id = utils.GenSnowflakeID()
  85. vm.TaskId = vm.TaskId
  86. vm.Status = constants.Saved
  87. vm.ParticipantId = participantId
  88. return vm, nil
  89. }
  90. func (vm *VmScheduler) genTaskAndProviders() (*providerPricing.Task, []*providerPricing.Provider, error) {
  91. proParams, err := vm.storage.GetProviderParams()
  92. if err != nil {
  93. return nil, nil, nil
  94. }
  95. var providerList []*providerPricing.Provider
  96. for _, p := range proParams {
  97. provider := providerPricing.NewProvider(p.Participant_id, p.Cpu_avail, p.Mem_avail, p.Disk_avail, 0.0, 0.0, 0.0)
  98. providerList = append(providerList, provider)
  99. }
  100. //replicas := task.Metadata.(map[string]interface{})["spec"].(map[string]interface{})["replicas"].(float64)
  101. //t := algorithm.NewTask(0, int(replicas), 2, 75120000, 301214500, 1200, 2, 6, 2000)
  102. return nil, providerList, nil
  103. }
  104. func (as *VmScheduler) AssignTask(clusters []*strategy.AssignedCluster, mode int) (interface{}, error) {
  105. //TODO implement me
  106. if clusters == nil {
  107. return nil, errors.New("clusters is nil")
  108. }
  109. for i := len(clusters) - 1; i >= 0; i-- {
  110. if clusters[i].Replicas == 0 {
  111. clusters = append(clusters[:i], clusters[i+1:]...)
  112. }
  113. }
  114. if len(clusters) == 0 {
  115. return nil, errors.New("clusters is nil")
  116. }
  117. var results []*VmResult
  118. for _, cluster := range clusters {
  119. cName := ""
  120. as.dbEngin.Table("t_cluster").Select("name").Where("id=?", cluster.ClusterId).Find(&cName)
  121. cr := VmResult{
  122. ClusterId: cluster.ClusterId,
  123. ClusterName: cName,
  124. Replica: cluster.Replicas,
  125. }
  126. cr.ClusterId = cluster.ClusterId
  127. cr.Replica = cluster.Replicas
  128. cr.ClusterName = cName
  129. results = append(results, &cr)
  130. }
  131. return results, nil
  132. }

PCM is positioned as Software stack over Cloud, aiming to build the standards and ecology of heterogeneous cloud collaboration for JCC in a non intrusive and autonomous peer-to-peer manner.