You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

schedulecreatetasklogic.go 12 kB

3 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
10 months ago
8 months ago
10 months ago
3 months ago
11 months ago
6 months ago
11 months ago
10 months ago
11 months ago
10 months ago
11 months ago
10 months ago
11 months ago
3 months ago
11 months ago
6 months ago
11 months ago
11 months ago
10 months ago
10 months ago
10 months ago
11 months ago
10 months ago
11 months ago
10 months ago
11 months ago
10 months ago
11 months ago
10 months ago
11 months ago
11 months ago
10 months ago
11 months ago
3 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
3 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
11 months ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. package schedule
  2. import (
  3. "context"
  4. "fmt"
  5. "slices"
  6. "strings"
  7. "time"
  8. "github.com/pkg/errors"
  9. "gitlink.org.cn/JointCloud/pcm-coordinator/internal/scheduler/common"
  10. "gitlink.org.cn/JointCloud/pcm-coordinator/internal/scheduler/service/collector"
  11. "gitlink.org.cn/JointCloud/pcm-coordinator/internal/scheduler/service/utils/task"
  12. "gitlink.org.cn/JointCloud/pcm-coordinator/internal/scheduler/strategy"
  13. "gitlink.org.cn/JointCloud/pcm-coordinator/internal/storeLink"
  14. "gitlink.org.cn/JointCloud/pcm-coordinator/internal/svc"
  15. "gitlink.org.cn/JointCloud/pcm-coordinator/internal/types"
  16. "gopkg.in/yaml.v3"
  17. "github.com/zeromicro/go-zero/core/logx"
  18. )
  19. const (
  20. TRAINNING_TASK_REPLICA = 1
  21. TRAINNING_TASK_SUFFIX_LEN = 10
  22. QUERY_RESOURCE_RETRY = 3
  23. )
  24. type ClustersWithDataDistributes struct {
  25. Clusters []*strategy.AssignedCluster
  26. DataDistributes *types.DataDistribute
  27. }
  28. type ScheduleCreateTaskLogic struct {
  29. logx.Logger
  30. ctx context.Context
  31. svcCtx *svc.ServiceContext
  32. queryResource *QueryResourcesLogic
  33. }
  34. func NewScheduleCreateTaskLogic(ctx context.Context, svcCtx *svc.ServiceContext) *ScheduleCreateTaskLogic {
  35. return &ScheduleCreateTaskLogic{
  36. Logger: logx.WithContext(ctx),
  37. ctx: ctx,
  38. svcCtx: svcCtx,
  39. queryResource: NewQueryResourcesLogic(ctx, svcCtx),
  40. }
  41. }
  42. func generateFilteredDataDistributes(clusters []*strategy.AssignedCluster, distribute types.DataDistribute) *ClustersWithDataDistributes {
  43. var clusterIds []string
  44. for _, c := range clusters {
  45. clusterIds = append(clusterIds, c.ClusterId)
  46. }
  47. clustersWithDataDistributes := &ClustersWithDataDistributes{
  48. Clusters: clusters,
  49. DataDistributes: &types.DataDistribute{
  50. Dataset: make([]*types.DatasetDistribute, 0),
  51. Image: make([]*types.ImageDistribute, 0),
  52. Model: make([]*types.ModelDistribute, 0),
  53. Code: make([]*types.CodeDistribute, 0),
  54. },
  55. }
  56. for _, datasetDistribute := range distribute.Dataset {
  57. dataset := &types.DatasetDistribute{}
  58. dataset.DataName = datasetDistribute.DataName
  59. dataset.PackageID = datasetDistribute.PackageID
  60. clusterScheduledList := make([]*types.ClusterScheduled, 0)
  61. if len(datasetDistribute.Clusters) != 0 {
  62. for _, cluster := range datasetDistribute.Clusters {
  63. if slices.Contains(clusterIds, cluster.ClusterID) {
  64. clusterScheduledList = append(clusterScheduledList, cluster)
  65. }
  66. }
  67. }
  68. dataset.Clusters = clusterScheduledList
  69. clustersWithDataDistributes.DataDistributes.Dataset = append(clustersWithDataDistributes.DataDistributes.Dataset, dataset)
  70. }
  71. for _, imageDistribute := range distribute.Image {
  72. image := &types.ImageDistribute{}
  73. image.DataName = imageDistribute.DataName
  74. image.PackageID = imageDistribute.PackageID
  75. clusterScheduledList := make([]*types.ClusterScheduled, 0)
  76. if len(imageDistribute.Clusters) != 0 {
  77. for _, cluster := range imageDistribute.Clusters {
  78. if slices.Contains(clusterIds, cluster.ClusterID) {
  79. clusterScheduledList = append(clusterScheduledList, cluster)
  80. }
  81. }
  82. }
  83. image.Clusters = clusterScheduledList
  84. clustersWithDataDistributes.DataDistributes.Image = append(clustersWithDataDistributes.DataDistributes.Image, image)
  85. }
  86. for _, codeDistribute := range distribute.Code {
  87. code := &types.CodeDistribute{}
  88. code.DataName = codeDistribute.DataName
  89. code.PackageID = codeDistribute.PackageID
  90. code.Output = codeDistribute.Output
  91. clusterScheduledList := make([]*types.ClusterScheduled, 0)
  92. if len(codeDistribute.Clusters) != 0 {
  93. for _, cluster := range codeDistribute.Clusters {
  94. if slices.Contains(clusterIds, cluster.ClusterID) {
  95. clusterScheduledList = append(clusterScheduledList, cluster)
  96. }
  97. }
  98. }
  99. code.Clusters = clusterScheduledList
  100. clustersWithDataDistributes.DataDistributes.Code = append(clustersWithDataDistributes.DataDistributes.Code, code)
  101. }
  102. for _, modelDistribute := range distribute.Model {
  103. model := &types.ModelDistribute{}
  104. model.DataName = modelDistribute.DataName
  105. model.PackageID = modelDistribute.PackageID
  106. clusterScheduledList := make([]*types.ClusterScheduled, 0)
  107. if len(modelDistribute.Clusters) != 0 {
  108. for _, cluster := range modelDistribute.Clusters {
  109. if slices.Contains(clusterIds, cluster.ClusterID) {
  110. clusterScheduledList = append(clusterScheduledList, cluster)
  111. }
  112. }
  113. }
  114. model.Clusters = clusterScheduledList
  115. clustersWithDataDistributes.DataDistributes.Model = append(clustersWithDataDistributes.DataDistributes.Model, model)
  116. }
  117. return clustersWithDataDistributes
  118. }
  119. func (l *ScheduleCreateTaskLogic) ScheduleCreateTask(req *types.CreateTaskReq) (resp *types.CreateTaskResp, err error) {
  120. resp = &types.CreateTaskResp{}
  121. err = task.ValidateJobResources(req.JobResources, "training")
  122. if err != nil {
  123. return nil, err
  124. }
  125. taskName, err := l.svcCtx.Scheduler.AiService.HandleDuplicateTaskName(req.Name, "training")
  126. if err != nil {
  127. return nil, err
  128. }
  129. var clusters []string
  130. if len(req.JobResources.Clusters) == 1 {
  131. clusters = append(clusters, req.JobResources.Clusters[0].ClusterID)
  132. schedatas, err := l.generateScheduleResult(req.DataDistributes, clusters)
  133. if err != nil {
  134. return nil, err
  135. }
  136. assignedClusters := task.CopyParams([]*strategy.AssignedCluster{{
  137. ClusterId: req.JobResources.Clusters[0].ClusterID, Replicas: 1,
  138. }}, req.JobResources.Clusters, "")
  139. // filter data distribution
  140. clustersWithDataDistributes := generateFilteredDataDistributes(assignedClusters, req.DataDistributes)
  141. taskId, err := l.createTask(taskName, req.Description, req.UserId, req.JobResources.ScheduleStrategy, clustersWithDataDistributes, req.Token, req.UserIp, req.UserName)
  142. if err != nil {
  143. return nil, err
  144. }
  145. resp.ScheduleDatas = schedatas
  146. resp.TaskID = taskId
  147. resp.TaskName = taskName
  148. return resp, nil
  149. } else {
  150. assignedClusters, err := l.getAssignedClustersByStrategy(&req.JobResources, &req.DataDistributes)
  151. if err != nil {
  152. return nil, err
  153. }
  154. if len(assignedClusters) == 0 {
  155. return nil, fmt.Errorf("failed to create task, no scheduled cluster found")
  156. }
  157. for _, c := range assignedClusters {
  158. clusters = append(clusters, c.ClusterId)
  159. }
  160. schedatas, err := l.generateScheduleResult(req.DataDistributes, clusters)
  161. if err != nil {
  162. return nil, err
  163. }
  164. // filter data distribution
  165. clustersWithDataDistributes := generateFilteredDataDistributes(assignedClusters, req.DataDistributes)
  166. taskId, err := l.createTask(taskName, req.Description, req.UserId, req.JobResources.ScheduleStrategy, clustersWithDataDistributes, req.Token, req.UserIp, req.UserName)
  167. if err != nil {
  168. return nil, err
  169. }
  170. resp.ScheduleDatas = schedatas
  171. resp.TaskID = taskId
  172. resp.TaskName = taskName
  173. return resp, nil
  174. }
  175. }
  176. func (l *ScheduleCreateTaskLogic) getAssignedClustersByStrategy(resources *types.JobResources, dataDistribute *types.DataDistribute) ([]*strategy.AssignedCluster, error) {
  177. var assignedClusters []*strategy.AssignedCluster
  178. switch resources.ScheduleStrategy {
  179. case strategy.LEASTLOADFIRST:
  180. var resSpecs []*collector.ResourceSpec
  181. var resCount int
  182. for i := 0; i < QUERY_RESOURCE_RETRY; i++ {
  183. defer time.Sleep(time.Second)
  184. qResources, err := l.queryResource.QueryResourcesByClusterId(nil, "Train")
  185. if err != nil {
  186. continue
  187. }
  188. for _, resource := range qResources {
  189. if resource.Resources != nil {
  190. resCount++
  191. }
  192. }
  193. if resCount >= 1 {
  194. resSpecs = qResources
  195. break
  196. } else {
  197. resCount = 0
  198. continue
  199. }
  200. }
  201. if resCount == 0 {
  202. return nil, fmt.Errorf("failed to create task, resources counting fails")
  203. }
  204. strtg := strategy.NewLeastLoadFirst(TRAINNING_TASK_REPLICA, resSpecs)
  205. clusters, err := strtg.Schedule()
  206. if err != nil {
  207. return nil, err
  208. }
  209. assignedClusters = task.CopyParams(clusters, resources.Clusters, "")
  210. case strategy.DATA_LOCALITY:
  211. strtg := strategy.NewDataLocality(TRAINNING_TASK_REPLICA, dataDistribute)
  212. clusters, err := strtg.Schedule()
  213. if err != nil {
  214. return nil, err
  215. }
  216. assignedClusters = task.CopyParams(clusters, resources.Clusters, "")
  217. default:
  218. return nil, errors.New("no strategy has been chosen")
  219. }
  220. return assignedClusters, nil
  221. }
  222. func (l *ScheduleCreateTaskLogic) createTask(taskName string, desc string, userId int64, strategyName string, clustersWithDataDistributes *ClustersWithDataDistributes, token string, userIp string, userName string) (int64, error) {
  223. var synergyStatus int64
  224. if len(clustersWithDataDistributes.Clusters) > 1 {
  225. synergyStatus = 1
  226. }
  227. y, err := yaml.Marshal(clustersWithDataDistributes)
  228. if err != nil {
  229. fmt.Printf("Error while Marshaling. %v", err)
  230. }
  231. taskId, err := l.svcCtx.Scheduler.CreateTask(taskName, desc, userId, synergyStatus, strategyName, string(y), token, userIp, &l.svcCtx.Config, userName)
  232. if err != nil {
  233. return 0, err
  234. }
  235. return taskId, nil
  236. }
  237. func (l *ScheduleCreateTaskLogic) generateScheduleResult(distribute types.DataDistribute, clusters []string) ([]*types.ScheduleData, error) {
  238. var schedatas []*types.ScheduleData
  239. for _, d := range distribute.Dataset {
  240. data := &types.ScheduleData{
  241. DataType: "dataset",
  242. PackageID: d.PackageID,
  243. ClusterIDs: make([]string, 0),
  244. }
  245. var cSlc []string
  246. for _, cluster := range d.Clusters {
  247. cSlc = append(cSlc, cluster.ClusterID)
  248. }
  249. for _, cluster := range clusters {
  250. if !slices.Contains(cSlc, cluster) {
  251. data.ClusterIDs = append(data.ClusterIDs, cluster)
  252. } else {
  253. continue
  254. }
  255. }
  256. if len(data.ClusterIDs) != 0 {
  257. schedatas = append(schedatas, data)
  258. }
  259. }
  260. for _, d := range distribute.Code {
  261. data := &types.ScheduleData{
  262. DataType: "code",
  263. PackageID: d.PackageID,
  264. ClusterIDs: make([]string, 0),
  265. }
  266. var cSlc []string
  267. for _, cluster := range d.Clusters {
  268. cSlc = append(cSlc, cluster.ClusterID)
  269. }
  270. for _, cluster := range clusters {
  271. if !slices.Contains(cSlc, cluster) {
  272. data.ClusterIDs = append(data.ClusterIDs, cluster)
  273. } else {
  274. continue
  275. }
  276. }
  277. if len(data.ClusterIDs) != 0 {
  278. schedatas = append(schedatas, data)
  279. }
  280. }
  281. for _, d := range distribute.Image {
  282. data := &types.ScheduleData{
  283. DataType: "image",
  284. PackageID: d.PackageID,
  285. ClusterIDs: make([]string, 0),
  286. }
  287. var cSlc []string
  288. for _, cluster := range d.Clusters {
  289. cSlc = append(cSlc, cluster.ClusterID)
  290. }
  291. for _, cluster := range clusters {
  292. if !slices.Contains(cSlc, cluster) {
  293. data.ClusterIDs = append(data.ClusterIDs, cluster)
  294. } else {
  295. continue
  296. }
  297. }
  298. if len(data.ClusterIDs) != 0 {
  299. schedatas = append(schedatas, data)
  300. }
  301. }
  302. for _, d := range distribute.Model {
  303. data := &types.ScheduleData{
  304. DataType: "model",
  305. PackageID: d.PackageID,
  306. ClusterIDs: make([]string, 0),
  307. }
  308. var cSlc []string
  309. for _, cluster := range d.Clusters {
  310. cSlc = append(cSlc, cluster.ClusterID)
  311. }
  312. for _, cluster := range clusters {
  313. if !slices.Contains(cSlc, cluster) {
  314. data.ClusterIDs = append(data.ClusterIDs, cluster)
  315. } else {
  316. continue
  317. }
  318. }
  319. if len(data.ClusterIDs) != 0 {
  320. schedatas = append(schedatas, data)
  321. }
  322. }
  323. if len(schedatas) != 0 {
  324. err := l.updateStorageType(&schedatas)
  325. if err != nil {
  326. return nil, err
  327. }
  328. }
  329. return schedatas, nil
  330. }
  331. func (l *ScheduleCreateTaskLogic) updateStorageType(schedatas *[]*types.ScheduleData) error {
  332. for _, s := range *schedatas {
  333. var storageType string
  334. var sTypes []string
  335. for _, id := range s.ClusterIDs {
  336. cluster, err := l.svcCtx.Scheduler.AiStorages.GetClustersById(id)
  337. if err != nil {
  338. return err
  339. }
  340. stype, ok := storeLink.StorageTypeMap[strings.Title(cluster.Name)]
  341. if ok {
  342. sTypes = append(sTypes, stype)
  343. }
  344. }
  345. sTypes = common.Unique(sTypes)
  346. for _, st := range sTypes {
  347. storageType += st + storeLink.COMMA
  348. }
  349. storageType = strings.TrimSuffix(storageType, storeLink.COMMA)
  350. s.StorageType = storageType
  351. }
  352. return nil
  353. }

PCM is positioned as Software stack over Cloud, aiming to build the standards and ecology of heterogeneous cloud collaboration for JCC in a non intrusive and autonomous peer-to-peer manner.