You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

imageinferencelogic.go 9.5 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387
  1. package inference
  2. import (
  3. "context"
  4. "errors"
  5. "github.com/go-resty/resty/v2"
  6. "github.com/zeromicro/go-zero/core/logx"
  7. "gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/scheduler/schedulers/option"
  8. "gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/scheduler/service/collector"
  9. "gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/scheduler/strategy"
  10. "gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/svc"
  11. "gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/types"
  12. "gitlink.org.cn/JointCloud/pcm-coordinator/pkg/constants"
  13. "gitlink.org.cn/JointCloud/pcm-coordinator/pkg/models"
  14. "math/rand"
  15. "mime/multipart"
  16. "net/http"
  17. "sort"
  18. "strconv"
  19. "sync"
  20. "time"
  21. )
  22. type ImageInferenceLogic struct {
  23. logx.Logger
  24. ctx context.Context
  25. svcCtx *svc.ServiceContext
  26. }
  27. func NewImageInferenceLogic(ctx context.Context, svcCtx *svc.ServiceContext) *ImageInferenceLogic {
  28. return &ImageInferenceLogic{
  29. Logger: logx.WithContext(ctx),
  30. ctx: ctx,
  31. svcCtx: svcCtx,
  32. }
  33. }
  34. func (l *ImageInferenceLogic) ImageInference(req *types.ImageInferenceReq) (resp *types.ImageInferenceResp, err error) {
  35. return nil, nil
  36. }
  37. func (l *ImageInferenceLogic) ImageInfer(r *http.Request, req *types.ImageInferenceReq) (resp *types.ImageInferenceResp, err error) {
  38. resp = &types.ImageInferenceResp{}
  39. opt := &option.InferOption{
  40. TaskName: req.TaskName,
  41. TaskDesc: req.TaskDesc,
  42. AdapterId: req.AdapterId,
  43. AiClusterIds: req.AiClusterIds,
  44. ModelName: req.ModelName,
  45. ModelType: req.ModelType,
  46. Strategy: req.Strategy,
  47. StaticWeightMap: req.StaticWeightMap,
  48. }
  49. var ts []struct {
  50. imageResult *types.ImageResult
  51. file multipart.File
  52. }
  53. uploadedFiles := r.MultipartForm.File
  54. if len(uploadedFiles) == 0 {
  55. return nil, errors.New("Images does not exist")
  56. }
  57. if len(uploadedFiles["images"]) == 0 {
  58. return nil, errors.New("Images does not exist")
  59. }
  60. for _, header := range uploadedFiles["images"] {
  61. file, err := header.Open()
  62. if err != nil {
  63. return nil, err
  64. }
  65. defer file.Close()
  66. var ir types.ImageResult
  67. ir.ImageName = header.Filename
  68. t := struct {
  69. imageResult *types.ImageResult
  70. file multipart.File
  71. }{
  72. imageResult: &ir,
  73. file: file,
  74. }
  75. ts = append(ts, t)
  76. }
  77. _, ok := l.svcCtx.Scheduler.AiService.AiCollectorAdapterMap[opt.AdapterId]
  78. if !ok {
  79. return nil, errors.New("AdapterId does not exist")
  80. }
  81. var strat strategy.Strategy
  82. switch opt.Strategy {
  83. case strategy.STATIC_WEIGHT:
  84. strat = strategy.NewStaticWeightStrategy(opt.StaticWeightMap, int32(len(ts)))
  85. if err != nil {
  86. return nil, err
  87. }
  88. default:
  89. return nil, errors.New("no strategy has been chosen")
  90. }
  91. clusters, err := strat.Schedule()
  92. if err != nil {
  93. return nil, err
  94. }
  95. results, err := infer(opt, clusters, ts, l.svcCtx, l.ctx)
  96. if err != nil {
  97. return nil, err
  98. }
  99. resp.InferResults = results
  100. return resp, nil
  101. }
  102. func infer(opt *option.InferOption, clusters []*strategy.AssignedCluster, ts []struct {
  103. imageResult *types.ImageResult
  104. file multipart.File
  105. }, svcCtx *svc.ServiceContext, ctx context.Context) ([]*types.ImageResult, error) {
  106. if clusters == nil || len(clusters) == 0 {
  107. return nil, errors.New("clusters is nil")
  108. }
  109. for i := len(clusters) - 1; i >= 0; i-- {
  110. if clusters[i].Replicas == 0 {
  111. clusters = append(clusters[:i], clusters[i+1:]...)
  112. }
  113. }
  114. var wg sync.WaitGroup
  115. var cluster_ch = make(chan struct {
  116. urls []*collector.ImageInferUrl
  117. clusterId string
  118. clusterName string
  119. imageNum int32
  120. }, len(clusters))
  121. var cs []struct {
  122. urls []*collector.ImageInferUrl
  123. clusterId string
  124. clusterName string
  125. imageNum int32
  126. }
  127. collectorMap := svcCtx.Scheduler.AiService.AiCollectorAdapterMap[opt.AdapterId]
  128. //save task
  129. var synergystatus int64
  130. if len(clusters) > 1 {
  131. synergystatus = 1
  132. }
  133. strategyCode, err := svcCtx.Scheduler.AiStorages.GetStrategyCode(opt.Strategy)
  134. if err != nil {
  135. return nil, err
  136. }
  137. adapterName, err := svcCtx.Scheduler.AiStorages.GetAdapterNameById(opt.AdapterId)
  138. if err != nil {
  139. return nil, err
  140. }
  141. id, err := svcCtx.Scheduler.AiStorages.SaveTask(opt.TaskName, strategyCode, synergystatus, "11")
  142. if err != nil {
  143. return nil, err
  144. }
  145. svcCtx.Scheduler.AiStorages.AddNoticeInfo(opt.AdapterId, adapterName, "", "", opt.TaskName, "create", "任务创建中")
  146. //save taskai
  147. for _, c := range clusters {
  148. clusterName, _ := svcCtx.Scheduler.AiStorages.GetClusterNameById(c.ClusterId)
  149. opt.Replica = c.Replicas
  150. err := svcCtx.Scheduler.AiStorages.SaveAiTask(id, opt, adapterName, c.ClusterId, clusterName, "", constants.Saved, "")
  151. if err != nil {
  152. return nil, err
  153. }
  154. }
  155. for _, cluster := range clusters {
  156. wg.Add(1)
  157. c := cluster
  158. go func() {
  159. imageUrls, err := collectorMap[c.ClusterId].GetImageInferUrl(ctx, opt)
  160. if err != nil {
  161. wg.Done()
  162. return
  163. }
  164. clusterName, _ := svcCtx.Scheduler.AiStorages.GetClusterNameById(c.ClusterId)
  165. s := struct {
  166. urls []*collector.ImageInferUrl
  167. clusterId string
  168. clusterName string
  169. imageNum int32
  170. }{
  171. urls: imageUrls,
  172. clusterId: c.ClusterId,
  173. clusterName: clusterName,
  174. imageNum: c.Replicas,
  175. }
  176. cluster_ch <- s
  177. wg.Done()
  178. return
  179. }()
  180. }
  181. wg.Wait()
  182. close(cluster_ch)
  183. for s := range cluster_ch {
  184. cs = append(cs, s)
  185. }
  186. var aiTaskList []*models.TaskAi
  187. tx := svcCtx.DbEngin.Raw("select * from task_ai where `task_id` = ? ", id).Scan(&aiTaskList)
  188. if tx.Error != nil {
  189. return nil, tx.Error
  190. }
  191. //change cluster status
  192. if len(clusters) != len(cs) {
  193. var acs []*strategy.AssignedCluster
  194. for _, cluster := range clusters {
  195. if contains(cs, cluster.ClusterId) {
  196. continue
  197. } else {
  198. var ac *strategy.AssignedCluster
  199. ac = cluster
  200. acs = append(acs, ac)
  201. }
  202. }
  203. // update failed cluster status
  204. for _, ac := range acs {
  205. for _, t := range aiTaskList {
  206. if ac.ClusterId == strconv.Itoa(int(t.ClusterId)) {
  207. t.Status = constants.Failed
  208. err := svcCtx.Scheduler.AiStorages.UpdateAiTask(t)
  209. if err != nil {
  210. logx.Errorf(tx.Error.Error())
  211. }
  212. }
  213. }
  214. }
  215. }
  216. var result_ch = make(chan *types.ImageResult, len(ts))
  217. var results []*types.ImageResult
  218. var imageNumIdx int32 = 0
  219. var imageNumIdxEnd int32 = 0
  220. for _, c := range cs {
  221. new_images := make([]struct {
  222. imageResult *types.ImageResult
  223. file multipart.File
  224. }, len(ts))
  225. copy(new_images, ts)
  226. imageNumIdxEnd = imageNumIdxEnd + c.imageNum
  227. new_images = new_images[imageNumIdx:imageNumIdxEnd]
  228. imageNumIdx = imageNumIdx + c.imageNum
  229. wg.Add(len(new_images))
  230. go sendInferReq(new_images, c, &wg, result_ch)
  231. }
  232. wg.Wait()
  233. close(result_ch)
  234. for s := range result_ch {
  235. results = append(results, s)
  236. }
  237. sort.Slice(results, func(p, q int) bool {
  238. return results[p].ClusterName < results[q].ClusterName
  239. })
  240. // update succeeded cluster status
  241. for _, c := range cs {
  242. for _, t := range aiTaskList {
  243. if c.clusterId == strconv.Itoa(int(t.ClusterId)) {
  244. t.Status = constants.Completed
  245. err := svcCtx.Scheduler.AiStorages.UpdateAiTask(t)
  246. if err != nil {
  247. logx.Errorf(tx.Error.Error())
  248. }
  249. }
  250. }
  251. }
  252. return results, nil
  253. }
  254. func sendInferReq(images []struct {
  255. imageResult *types.ImageResult
  256. file multipart.File
  257. }, cluster struct {
  258. urls []*collector.ImageInferUrl
  259. clusterId string
  260. clusterName string
  261. imageNum int32
  262. }, wg *sync.WaitGroup, ch chan<- *types.ImageResult) {
  263. for _, image := range images {
  264. go func(t struct {
  265. imageResult *types.ImageResult
  266. file multipart.File
  267. }, c struct {
  268. urls []*collector.ImageInferUrl
  269. clusterId string
  270. clusterName string
  271. imageNum int32
  272. }) {
  273. if len(c.urls) == 1 {
  274. r, err := getInferResult(c.urls[0].Url, t.file, t.imageResult.ImageName)
  275. if err != nil {
  276. t.imageResult.ImageResult = err.Error()
  277. t.imageResult.ClusterName = c.clusterName
  278. t.imageResult.Card = c.urls[0].Card
  279. ch <- t.imageResult
  280. wg.Done()
  281. return
  282. }
  283. t.imageResult.ImageResult = r
  284. t.imageResult.ClusterName = c.clusterName
  285. t.imageResult.Card = c.urls[0].Card
  286. ch <- t.imageResult
  287. wg.Done()
  288. return
  289. } else {
  290. idx := rand.Intn(len(c.urls))
  291. r, err := getInferResult(c.urls[idx].Url, t.file, t.imageResult.ImageName)
  292. if err != nil {
  293. t.imageResult.ImageResult = err.Error()
  294. t.imageResult.ClusterName = c.clusterName
  295. t.imageResult.Card = c.urls[idx].Card
  296. ch <- t.imageResult
  297. wg.Done()
  298. return
  299. }
  300. t.imageResult.ImageResult = r
  301. t.imageResult.ClusterName = c.clusterName
  302. t.imageResult.Card = c.urls[idx].Card
  303. ch <- t.imageResult
  304. wg.Done()
  305. return
  306. }
  307. }(image, cluster)
  308. }
  309. }
  310. func getInferResult(url string, file multipart.File, fileName string) (string, error) {
  311. var res Res
  312. req := GetRestyRequest(10)
  313. _, err := req.
  314. SetFileReader("file", fileName, file).
  315. SetResult(&res).
  316. Post(url)
  317. if err != nil {
  318. return "", err
  319. }
  320. return res.Result, nil
  321. }
  322. func GetRestyRequest(timeoutSeconds int64) *resty.Request {
  323. client := resty.New().SetTimeout(time.Duration(timeoutSeconds) * time.Second)
  324. request := client.R()
  325. return request
  326. }
  327. type Res struct {
  328. Result string `json:"result"`
  329. }
  330. func contains(cs []struct {
  331. urls []*collector.ImageInferUrl
  332. clusterId string
  333. clusterName string
  334. imageNum int32
  335. }, e string) bool {
  336. for _, c := range cs {
  337. if c.clusterId == e {
  338. return true
  339. }
  340. }
  341. return false
  342. }

PCM is positioned as Software stack over Cloud, aiming to build the standards and ecology of heterogeneous cloud collaboration for JCC in a non intrusive and autonomous peer-to-peer manner.