You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

schedulecreatetasklogic.go 12 kB

11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
10 months ago
8 months ago
10 months ago
11 months ago
10 months ago
11 months ago
10 months ago
11 months ago
10 months ago
11 months ago
11 months ago
11 months ago
10 months ago
10 months ago
10 months ago
11 months ago
10 months ago
11 months ago
10 months ago
11 months ago
10 months ago
11 months ago
10 months ago
11 months ago
11 months ago
10 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414
  1. package schedule
  2. import (
  3. "context"
  4. "fmt"
  5. "github.com/pkg/errors"
  6. "gitlink.org.cn/JointCloud/pcm-coordinator/internal/scheduler/common"
  7. "gitlink.org.cn/JointCloud/pcm-coordinator/internal/scheduler/service/collector"
  8. "gitlink.org.cn/JointCloud/pcm-coordinator/internal/scheduler/service/utils/task"
  9. "gitlink.org.cn/JointCloud/pcm-coordinator/internal/scheduler/strategy"
  10. "gitlink.org.cn/JointCloud/pcm-coordinator/internal/storeLink"
  11. "gitlink.org.cn/JointCloud/pcm-coordinator/internal/svc"
  12. "gitlink.org.cn/JointCloud/pcm-coordinator/internal/types"
  13. "gopkg.in/yaml.v3"
  14. "slices"
  15. "strings"
  16. "time"
  17. "github.com/zeromicro/go-zero/core/logx"
  18. )
  19. const (
  20. TRAINNING_TASK_REPLICA = 1
  21. TRAINNING_TASK_SUFFIX_LEN = 10
  22. QUERY_RESOURCE_RETRY = 3
  23. )
  24. type ClustersWithDataDistributes struct {
  25. Clusters []*strategy.AssignedCluster
  26. DataDistributes *types.DataDistribute
  27. }
  28. type ScheduleCreateTaskLogic struct {
  29. logx.Logger
  30. ctx context.Context
  31. svcCtx *svc.ServiceContext
  32. queryResource *QueryResourcesLogic
  33. }
  34. func NewScheduleCreateTaskLogic(ctx context.Context, svcCtx *svc.ServiceContext) *ScheduleCreateTaskLogic {
  35. return &ScheduleCreateTaskLogic{
  36. Logger: logx.WithContext(ctx),
  37. ctx: ctx,
  38. svcCtx: svcCtx,
  39. queryResource: NewQueryResourcesLogic(ctx, svcCtx),
  40. }
  41. }
  42. func generateFilteredDataDistributes(clusters []*strategy.AssignedCluster, distribute types.DataDistribute) *ClustersWithDataDistributes {
  43. var clusterIds []string
  44. for _, c := range clusters {
  45. clusterIds = append(clusterIds, c.ClusterId)
  46. }
  47. clustersWithDataDistributes := &ClustersWithDataDistributes{
  48. Clusters: clusters,
  49. DataDistributes: &types.DataDistribute{
  50. Dataset: make([]*types.DatasetDistribute, 0),
  51. Image: make([]*types.ImageDistribute, 0),
  52. Model: make([]*types.ModelDistribute, 0),
  53. Code: make([]*types.CodeDistribute, 0),
  54. },
  55. }
  56. for _, datasetDistribute := range distribute.Dataset {
  57. dataset := &types.DatasetDistribute{}
  58. dataset.DataName = datasetDistribute.DataName
  59. dataset.PackageID = datasetDistribute.PackageID
  60. clusterScheduledList := make([]*types.ClusterScheduled, 0)
  61. if len(datasetDistribute.Clusters) != 0 {
  62. for _, cluster := range datasetDistribute.Clusters {
  63. if slices.Contains(clusterIds, cluster.ClusterID) {
  64. clusterScheduledList = append(clusterScheduledList, cluster)
  65. }
  66. }
  67. }
  68. dataset.Clusters = clusterScheduledList
  69. clustersWithDataDistributes.DataDistributes.Dataset = append(clustersWithDataDistributes.DataDistributes.Dataset, dataset)
  70. }
  71. for _, imageDistribute := range distribute.Image {
  72. image := &types.ImageDistribute{}
  73. image.DataName = imageDistribute.DataName
  74. image.PackageID = imageDistribute.PackageID
  75. clusterScheduledList := make([]*types.ClusterScheduled, 0)
  76. if len(imageDistribute.Clusters) != 0 {
  77. for _, cluster := range imageDistribute.Clusters {
  78. if slices.Contains(clusterIds, cluster.ClusterID) {
  79. clusterScheduledList = append(clusterScheduledList, cluster)
  80. }
  81. }
  82. }
  83. image.Clusters = clusterScheduledList
  84. clustersWithDataDistributes.DataDistributes.Image = append(clustersWithDataDistributes.DataDistributes.Image, image)
  85. }
  86. for _, codeDistribute := range distribute.Code {
  87. code := &types.CodeDistribute{}
  88. code.DataName = codeDistribute.DataName
  89. code.PackageID = codeDistribute.PackageID
  90. code.Output = codeDistribute.Output
  91. clusterScheduledList := make([]*types.ClusterScheduled, 0)
  92. if len(codeDistribute.Clusters) != 0 {
  93. for _, cluster := range codeDistribute.Clusters {
  94. if slices.Contains(clusterIds, cluster.ClusterID) {
  95. clusterScheduledList = append(clusterScheduledList, cluster)
  96. }
  97. }
  98. }
  99. code.Clusters = clusterScheduledList
  100. clustersWithDataDistributes.DataDistributes.Code = append(clustersWithDataDistributes.DataDistributes.Code, code)
  101. }
  102. for _, modelDistribute := range distribute.Model {
  103. model := &types.ModelDistribute{}
  104. model.DataName = modelDistribute.DataName
  105. model.PackageID = modelDistribute.PackageID
  106. clusterScheduledList := make([]*types.ClusterScheduled, 0)
  107. if len(modelDistribute.Clusters) != 0 {
  108. for _, cluster := range modelDistribute.Clusters {
  109. if slices.Contains(clusterIds, cluster.ClusterID) {
  110. clusterScheduledList = append(clusterScheduledList, cluster)
  111. }
  112. }
  113. }
  114. model.Clusters = clusterScheduledList
  115. clustersWithDataDistributes.DataDistributes.Model = append(clustersWithDataDistributes.DataDistributes.Model, model)
  116. }
  117. return clustersWithDataDistributes
  118. }
  119. func (l *ScheduleCreateTaskLogic) ScheduleCreateTask(req *types.CreateTaskReq) (resp *types.CreateTaskResp, err error) {
  120. resp = &types.CreateTaskResp{}
  121. err = task.ValidateJobResources(req.JobResources, "training")
  122. if err != nil {
  123. return nil, err
  124. }
  125. taskName, err := l.svcCtx.Scheduler.AiService.HandleDuplicateTaskName(req.Name)
  126. if err != nil {
  127. return nil, err
  128. }
  129. var clusters []string
  130. if len(req.JobResources.Clusters) == 1 {
  131. clusters = append(clusters, req.JobResources.Clusters[0].ClusterID)
  132. schedatas, err := l.generateScheduleResult(req.DataDistributes, clusters)
  133. if err != nil {
  134. return nil, err
  135. }
  136. assignedClusters := task.CopyParams([]*strategy.AssignedCluster{{
  137. ClusterId: req.JobResources.Clusters[0].ClusterID, Replicas: 1,
  138. }}, req.JobResources.Clusters, "")
  139. // filter data distribution
  140. clustersWithDataDistributes := generateFilteredDataDistributes(assignedClusters, req.DataDistributes)
  141. taskId, err := l.createTask(taskName, req.Description, req.JobResources.ScheduleStrategy, clustersWithDataDistributes, req.Token, req.UserIp)
  142. if err != nil {
  143. return nil, err
  144. }
  145. resp.ScheduleDatas = schedatas
  146. resp.TaskID = taskId
  147. return resp, nil
  148. } else {
  149. assignedClusters, err := l.getAssignedClustersByStrategy(&req.JobResources, &req.DataDistributes)
  150. if err != nil {
  151. return nil, err
  152. }
  153. if len(assignedClusters) == 0 {
  154. return nil, fmt.Errorf("failed to create task, no scheduled cluster found")
  155. }
  156. for _, c := range assignedClusters {
  157. clusters = append(clusters, c.ClusterId)
  158. }
  159. schedatas, err := l.generateScheduleResult(req.DataDistributes, clusters)
  160. if err != nil {
  161. return nil, err
  162. }
  163. // filter data distribution
  164. clustersWithDataDistributes := generateFilteredDataDistributes(assignedClusters, req.DataDistributes)
  165. taskId, err := l.createTask(taskName, req.Description, req.JobResources.ScheduleStrategy, clustersWithDataDistributes, req.Token, req.UserIp)
  166. if err != nil {
  167. return nil, err
  168. }
  169. resp.ScheduleDatas = schedatas
  170. resp.TaskID = taskId
  171. return resp, nil
  172. }
  173. }
  174. func (l *ScheduleCreateTaskLogic) getAssignedClustersByStrategy(resources *types.JobResources, dataDistribute *types.DataDistribute) ([]*strategy.AssignedCluster, error) {
  175. var assignedClusters []*strategy.AssignedCluster
  176. switch resources.ScheduleStrategy {
  177. case strategy.LEASTLOADFIRST:
  178. var resSpecs []*collector.ResourceSpec
  179. var resCount int
  180. for i := 0; i < QUERY_RESOURCE_RETRY; i++ {
  181. defer time.Sleep(time.Second)
  182. qResources, err := l.queryResource.QueryResourcesByClusterId(nil)
  183. if err != nil {
  184. continue
  185. }
  186. for _, resource := range qResources {
  187. if resource.Resources != nil {
  188. resCount++
  189. }
  190. }
  191. if resCount >= 1 {
  192. resSpecs = qResources
  193. break
  194. } else {
  195. resCount = 0
  196. continue
  197. }
  198. }
  199. if resCount == 0 {
  200. return nil, fmt.Errorf("failed to create task, resources counting fails")
  201. }
  202. strtg := strategy.NewLeastLoadFirst(TRAINNING_TASK_REPLICA, resSpecs)
  203. clusters, err := strtg.Schedule()
  204. if err != nil {
  205. return nil, err
  206. }
  207. assignedClusters = task.CopyParams(clusters, resources.Clusters, "")
  208. case strategy.DATA_LOCALITY:
  209. strtg := strategy.NewDataLocality(TRAINNING_TASK_REPLICA, dataDistribute)
  210. clusters, err := strtg.Schedule()
  211. if err != nil {
  212. return nil, err
  213. }
  214. assignedClusters = task.CopyParams(clusters, resources.Clusters, "")
  215. default:
  216. return nil, errors.New("no strategy has been chosen")
  217. }
  218. return assignedClusters, nil
  219. }
  220. func (l *ScheduleCreateTaskLogic) createTask(taskName string, desc string, strategyName string, clustersWithDataDistributes *ClustersWithDataDistributes, token string, userIp string) (int64, error) {
  221. var synergyStatus int64
  222. if len(clustersWithDataDistributes.Clusters) > 1 {
  223. synergyStatus = 1
  224. }
  225. y, err := yaml.Marshal(clustersWithDataDistributes)
  226. if err != nil {
  227. fmt.Printf("Error while Marshaling. %v", err)
  228. }
  229. taskId, err := l.svcCtx.Scheduler.CreateTask(taskName, desc, synergyStatus, strategyName, string(y), token, userIp, &l.svcCtx.Config)
  230. if err != nil {
  231. return 0, err
  232. }
  233. return taskId, nil
  234. }
  235. func (l *ScheduleCreateTaskLogic) generateScheduleResult(distribute types.DataDistribute, clusters []string) ([]*types.ScheduleData, error) {
  236. var schedatas []*types.ScheduleData
  237. for _, d := range distribute.Dataset {
  238. data := &types.ScheduleData{
  239. DataType: "dataset",
  240. PackageID: d.PackageID,
  241. ClusterIDs: make([]string, 0),
  242. }
  243. var cSlc []string
  244. for _, cluster := range d.Clusters {
  245. cSlc = append(cSlc, cluster.ClusterID)
  246. }
  247. for _, cluster := range clusters {
  248. if !slices.Contains(cSlc, cluster) {
  249. data.ClusterIDs = append(data.ClusterIDs, cluster)
  250. } else {
  251. continue
  252. }
  253. }
  254. if len(data.ClusterIDs) != 0 {
  255. schedatas = append(schedatas, data)
  256. }
  257. }
  258. for _, d := range distribute.Code {
  259. data := &types.ScheduleData{
  260. DataType: "code",
  261. PackageID: d.PackageID,
  262. ClusterIDs: make([]string, 0),
  263. }
  264. var cSlc []string
  265. for _, cluster := range d.Clusters {
  266. cSlc = append(cSlc, cluster.ClusterID)
  267. }
  268. for _, cluster := range clusters {
  269. if !slices.Contains(cSlc, cluster) {
  270. data.ClusterIDs = append(data.ClusterIDs, cluster)
  271. } else {
  272. continue
  273. }
  274. }
  275. if len(data.ClusterIDs) != 0 {
  276. schedatas = append(schedatas, data)
  277. }
  278. }
  279. for _, d := range distribute.Image {
  280. data := &types.ScheduleData{
  281. DataType: "image",
  282. PackageID: d.PackageID,
  283. ClusterIDs: make([]string, 0),
  284. }
  285. var cSlc []string
  286. for _, cluster := range d.Clusters {
  287. cSlc = append(cSlc, cluster.ClusterID)
  288. }
  289. for _, cluster := range clusters {
  290. if !slices.Contains(cSlc, cluster) {
  291. data.ClusterIDs = append(data.ClusterIDs, cluster)
  292. } else {
  293. continue
  294. }
  295. }
  296. if len(data.ClusterIDs) != 0 {
  297. schedatas = append(schedatas, data)
  298. }
  299. }
  300. for _, d := range distribute.Model {
  301. data := &types.ScheduleData{
  302. DataType: "model",
  303. PackageID: d.PackageID,
  304. ClusterIDs: make([]string, 0),
  305. }
  306. var cSlc []string
  307. for _, cluster := range d.Clusters {
  308. cSlc = append(cSlc, cluster.ClusterID)
  309. }
  310. for _, cluster := range clusters {
  311. if !slices.Contains(cSlc, cluster) {
  312. data.ClusterIDs = append(data.ClusterIDs, cluster)
  313. } else {
  314. continue
  315. }
  316. }
  317. if len(data.ClusterIDs) != 0 {
  318. schedatas = append(schedatas, data)
  319. }
  320. }
  321. if len(schedatas) != 0 {
  322. err := l.updateStorageType(&schedatas)
  323. if err != nil {
  324. return nil, err
  325. }
  326. }
  327. return schedatas, nil
  328. }
  329. func (l *ScheduleCreateTaskLogic) updateStorageType(schedatas *[]*types.ScheduleData) error {
  330. for _, s := range *schedatas {
  331. var storageType string
  332. var sTypes []string
  333. for _, id := range s.ClusterIDs {
  334. cluster, err := l.svcCtx.Scheduler.AiStorages.GetClustersById(id)
  335. if err != nil {
  336. return err
  337. }
  338. stype, ok := storeLink.StorageTypeMap[strings.Title(cluster.Name)]
  339. if ok {
  340. sTypes = append(sTypes, stype)
  341. }
  342. }
  343. sTypes = common.Unique(sTypes)
  344. for _, st := range sTypes {
  345. storageType += st + storeLink.COMMA
  346. }
  347. storageType = strings.TrimSuffix(storageType, storeLink.COMMA)
  348. s.StorageType = storageType
  349. }
  350. return nil
  351. }

PCM is positioned as Software stack over Cloud, aiming to build the standards and ecology of heterogeneous cloud collaboration for JCC in a non intrusive and autonomous peer-to-peer manner.