From b7e230aa59b62f91d2a92f9db3d1782618cd4896 Mon Sep 17 00:00:00 2001 From: tzwang Date: Wed, 8 Jan 2025 17:45:00 +0800 Subject: [PATCH] updated shuguangai resourceSpec --- internal/scheduler/schedulers/aiScheduler.go | 2 +- internal/storeLink/openi.go | 8 ++-- internal/storeLink/shuguangai.go | 42 +++++++++++++------- 3 files changed, 33 insertions(+), 19 deletions(-) diff --git a/internal/scheduler/schedulers/aiScheduler.go b/internal/scheduler/schedulers/aiScheduler.go index 60954758..6ef1dcf5 100644 --- a/internal/scheduler/schedulers/aiScheduler.go +++ b/internal/scheduler/schedulers/aiScheduler.go @@ -357,7 +357,7 @@ func (as *AiScheduler) findClustersWithResources() ([]*collector.ResourceStats, }) msg += fmt.Sprintf("clusterId: %v , error: %v \n", e.clusterId, e.err.Error()) } - return nil, errors.New(msg) + //return nil, errors.New(msg) } return resourceSpecs, nil diff --git a/internal/storeLink/openi.go b/internal/storeLink/openi.go index e20b0484..4b150ff1 100644 --- a/internal/storeLink/openi.go +++ b/internal/storeLink/openi.go @@ -610,14 +610,14 @@ func (o OpenI) GetResourceSpecs(ctx context.Context) (*collector.ResourceSpec, e mem := &collector.Usage{ Type: strings.ToUpper(MEMORY), Name: strings.ToUpper(RAM), - Total: &collector.UnitValue{Unit: CPUCORE, Value: v.MemGiB}, - Available: &collector.UnitValue{Unit: CPUCORE, Value: v.MemGiB}, + Total: &collector.UnitValue{Unit: GIGABYTE, Value: v.MemGiB}, + Available: &collector.UnitValue{Unit: GIGABYTE, Value: v.MemGiB}, } vmem := &collector.Usage{ Type: strings.ToUpper(MEMORY), Name: strings.ToUpper(VRAM), - Total: &collector.UnitValue{Unit: CPUCORE, Value: v.GpuMemGiB}, - Available: &collector.UnitValue{Unit: CPUCORE, Value: v.GpuMemGiB}, + Total: &collector.UnitValue{Unit: GIGABYTE, Value: v.GpuMemGiB}, + Available: &collector.UnitValue{Unit: GIGABYTE, Value: v.GpuMemGiB}, } bres = append(bres, cpu) bres = append(bres, mem) diff --git a/internal/storeLink/shuguangai.go b/internal/storeLink/shuguangai.go index 473847e3..0b237a2d 100644 --- a/internal/storeLink/shuguangai.go +++ b/internal/storeLink/shuguangai.go @@ -1086,7 +1086,7 @@ func (s *ShuguangAi) GetResourceSpecs(ctx context.Context) (*collector.ResourceS uwg.Add(4) var ch = make(chan *collector.Usage, 2) var qCh = make(chan *collector.Usage, 2) - var uCh = make(chan *collector.Usage) + var cresCh = make(chan *collector.ClusterResource) resUsage := &collector.ResourceSpec{ ClusterId: strconv.FormatInt(s.participantId, 10), @@ -1125,7 +1125,7 @@ func (s *ShuguangAi) GetResourceSpecs(ctx context.Context) (*collector.ResourceS Total: &collector.UnitValue{Unit: PERHOUR, Value: queChargeRate}, } - uCh <- rate + cresCh <- &collector.ClusterResource{Resource: rate} }() var freeNodes int64 @@ -1174,7 +1174,7 @@ func (s *ShuguangAi) GetResourceSpecs(ctx context.Context) (*collector.ResourceS Value: v, } - uCh <- run + cresCh <- &collector.ClusterResource{Resource: run} }() return @@ -1188,7 +1188,7 @@ func (s *ShuguangAi) GetResourceSpecs(ctx context.Context) (*collector.ResourceS Value: v, } - uCh <- run + cresCh <- &collector.ClusterResource{Resource: run} }() } @@ -1253,22 +1253,22 @@ func (s *ShuguangAi) GetResourceSpecs(ctx context.Context) (*collector.ResourceS return } - totalStorage := common.RoundFloat(diskResp.Data[0].Threshold*KB*KB*KB, 3) - availStorage := common.RoundFloat((diskResp.Data[0].Threshold-diskResp.Data[0].Usage)*KB*KB*KB, 3) + totalStorage := common.RoundFloat(diskResp.Data[0].Threshold, 0) + availStorage := common.RoundFloat((diskResp.Data[0].Threshold - diskResp.Data[0].Usage), 0) storage := &collector.Usage{} storage.Type = STORAGE storage.Name = DISK storage.Total = &collector.UnitValue{ - Unit: KILOBYTE, + Unit: GIGABYTE, Value: totalStorage, } storage.Available = &collector.UnitValue{ - Unit: KILOBYTE, + Unit: GIGABYTE, Value: availStorage, } - uCh <- storage + cresCh <- &collector.ClusterResource{Resource: storage} }() @@ -1289,32 +1289,46 @@ func (s *ShuguangAi) GetResourceSpecs(ctx context.Context) (*collector.ResourceS Value: balance, } - uCh <- bal + cresCh <- &collector.ClusterResource{Resource: bal} }() go func() { uwg.Wait() - close(uCh) + close(cresCh) }() - for v := range uCh { + for v := range cresCh { resources = append(resources, v) } wg.Wait() + cres := &collector.ClusterResource{} + bres := make([]*collector.Usage, 0) if len(qCh) == 0 { for v := range ch { v.Available = v.Total - resources = append(resources, v) + switch v.Type { + case DCU: + cres.Resource = v + case CPU: + bres = append(bres, v) + } } } else { for v := range qCh { - resources = append(resources, v) + switch v.Type { + case DCU: + cres.Resource = v + case CPU: + bres = append(bres, v) + } } } + cres.BaseResources = bres + resources = append(resources, cres) resUsage.Resources = resources return resUsage, nil