You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

commithpctasklogic.go 5.1 kB

11 months ago
11 months ago
1 year ago
11 months ago
11 months ago
11 months ago
1 year ago
1 year ago
1 year ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. package hpc
  2. import (
  3. "context"
  4. "errors"
  5. "github.com/go-resty/resty/v2"
  6. clientCore "gitlink.org.cn/JointCloud/pcm-coordinator/client"
  7. "gitlink.org.cn/JointCloud/pcm-coordinator/pkg/models"
  8. "gitlink.org.cn/JointCloud/pcm-coordinator/pkg/utils/remoteUtil"
  9. v1 "gitlink.org.cn/JointCloud/pcm-hpc/routers/v1"
  10. "k8s.io/apimachinery/pkg/util/json"
  11. "strconv"
  12. "time"
  13. "gitlink.org.cn/JointCloud/pcm-coordinator/internal/svc"
  14. "gitlink.org.cn/JointCloud/pcm-coordinator/internal/types"
  15. "github.com/zeromicro/go-zero/core/logx"
  16. )
  17. type CommitHpcTaskLogic struct {
  18. logx.Logger
  19. ctx context.Context
  20. svcCtx *svc.ServiceContext
  21. }
  22. func NewCommitHpcTaskLogic(ctx context.Context, svcCtx *svc.ServiceContext) *CommitHpcTaskLogic {
  23. return &CommitHpcTaskLogic{
  24. Logger: logx.WithContext(ctx),
  25. ctx: ctx,
  26. svcCtx: svcCtx,
  27. }
  28. }
  29. func (l *CommitHpcTaskLogic) CommitHpcTask(req *types.CommitHpcTaskReq) (resp *types.CommitHpcTaskResp, err error) {
  30. var clusterInfo types.ClusterInfo
  31. l.svcCtx.DbEngin.Raw("SELECT * FROM `t_cluster` where id = ?", req.ClusterId).First(&clusterInfo)
  32. if len(clusterInfo.Id) == 0 {
  33. return resp, errors.New("cluster not found")
  34. }
  35. // 构建主任务结构体
  36. taskModel := models.Task{
  37. Name: req.Name,
  38. Description: req.Description,
  39. CommitTime: time.Now(),
  40. Status: "Running",
  41. AdapterTypeDict: "2",
  42. UserId: req.UserId,
  43. }
  44. // 保存任务数据到数据库
  45. tx := l.svcCtx.DbEngin.Create(&taskModel)
  46. if tx.Error != nil {
  47. return nil, tx.Error
  48. }
  49. var adapterName string
  50. l.svcCtx.DbEngin.Raw("SELECT name FROM `t_adapter` where id = ?", clusterInfo.AdapterId).Scan(&adapterName)
  51. var server string
  52. l.svcCtx.DbEngin.Raw("SELECT server FROM `t_adapter` where id = ?", clusterInfo.AdapterId).Scan(&server)
  53. if len(adapterName) == 0 || adapterName == "" {
  54. return nil, errors.New("no corresponding adapter found")
  55. }
  56. clusterId, err := strconv.ParseInt(req.ClusterId, 10, 64)
  57. hpcInfo := models.TaskHpc{
  58. TaskId: taskModel.Id,
  59. AdapterId: clusterInfo.AdapterId,
  60. AdapterName: adapterName,
  61. ClusterId: clusterId,
  62. ClusterName: clusterInfo.Name,
  63. Name: taskModel.Name,
  64. CmdScript: req.CmdScript,
  65. StartTime: time.Now().String(),
  66. CardCount: req.CardCount,
  67. WorkDir: req.WorkDir,
  68. WallTime: req.WallTime,
  69. AppType: req.AppType,
  70. AppName: req.AppName,
  71. Queue: req.Queue,
  72. SubmitType: req.SubmitType,
  73. NNode: req.NNode,
  74. Account: clusterInfo.Username,
  75. StdInput: req.StdInput,
  76. Partition: req.Partition,
  77. CreatedTime: time.Now(),
  78. UpdatedTime: time.Now(),
  79. Status: "Running",
  80. }
  81. hpcInfo.WorkDir = clusterInfo.WorkDir + req.WorkDir
  82. tx = l.svcCtx.DbEngin.Create(&hpcInfo)
  83. if tx.Error != nil {
  84. return nil, tx.Error
  85. }
  86. // 保存操作记录
  87. noticeInfo := clientCore.NoticeInfo{
  88. AdapterId: clusterInfo.AdapterId,
  89. AdapterName: adapterName,
  90. ClusterId: clusterId,
  91. ClusterName: clusterInfo.Name,
  92. NoticeType: "create",
  93. TaskName: req.Name,
  94. Incident: "任务创建中",
  95. CreatedTime: time.Now(),
  96. }
  97. result := l.svcCtx.DbEngin.Table("t_notice").Create(&noticeInfo)
  98. if result.Error != nil {
  99. logx.Errorf("Task creation failure, err: %v", result.Error)
  100. }
  101. resp = &types.CommitHpcTaskResp{
  102. JobId: string(""),
  103. }
  104. // 数据上链
  105. bytes, _ := json.Marshal(taskModel)
  106. remoteUtil.Evidence(remoteUtil.EvidenceParam{
  107. UserIp: req.UserIp,
  108. Url: l.svcCtx.Config.BlockChain.Url,
  109. ContractAddress: l.svcCtx.Config.BlockChain.ContractAddress,
  110. FunctionName: l.svcCtx.Config.BlockChain.FunctionName,
  111. Type: l.svcCtx.Config.BlockChain.Type,
  112. Token: req.Token,
  113. Args: []string{strconv.FormatInt(taskModel.Id, 10), string(bytes)},
  114. })
  115. // 提交job到指定集群
  116. logx.Info("提交job到指定集群")
  117. go func() {
  118. submitJob(&hpcInfo, &clusterInfo, server)
  119. }()
  120. return resp, nil
  121. }
  122. func submitJob(hpcInfo *models.TaskHpc, clusterInfo *types.ClusterInfo, adapterAddress string) (int, error) {
  123. SubmitJobReq := v1.SubmitJobReq{
  124. Server: clusterInfo.Server,
  125. Version: clusterInfo.Version,
  126. Username: clusterInfo.Username,
  127. Token: clusterInfo.Token,
  128. JobOptions: v1.JobOptions{
  129. Script: hpcInfo.CmdScript,
  130. Job: &v1.JobProperties{
  131. Account: hpcInfo.Account,
  132. Name: hpcInfo.Name,
  133. NTasks: 1,
  134. CurrentWorkingDirectory: hpcInfo.WorkDir,
  135. Partition: hpcInfo.Partition,
  136. Environment: map[string]string{"PATH": clusterInfo.EnvPath,
  137. "LD_LIBRARY_PATH": clusterInfo.EnvLdPath},
  138. StandardOutput: hpcInfo.WorkDir + "/job.out",
  139. StandardError: hpcInfo.WorkDir + "/job.err",
  140. },
  141. },
  142. }
  143. var resp v1.SubmitJobResp
  144. httpClient := resty.New().R()
  145. logx.Info("远程调用p端接口开始")
  146. _, err := httpClient.SetHeader("Content-Type", "application/json").
  147. SetBody(SubmitJobReq).
  148. SetResult(&resp).
  149. Post(adapterAddress + "/api/v1/job/submit")
  150. logx.Info("远程调用p端接口完成")
  151. if err != nil {
  152. return 0, err
  153. }
  154. return resp.JobId, nil
  155. }

PCM is positioned as Software stack over Cloud, aiming to build the standards and ecology of heterogeneous cloud collaboration for JCC in a non intrusive and autonomous peer-to-peer manner.