|
|
@@ -0,0 +1,83 @@ |
|
|
|
|
|
package monitoring |
|
|
|
|
|
|
|
|
|
|
|
import ( |
|
|
|
|
|
"context" |
|
|
|
|
|
"gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/svc" |
|
|
|
|
|
"gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/types" |
|
|
|
|
|
"strings" |
|
|
|
|
|
|
|
|
|
|
|
"github.com/zeromicro/go-zero/core/logx" |
|
|
|
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
type ScheduleSituationLogic struct { |
|
|
|
|
|
logx.Logger |
|
|
|
|
|
ctx context.Context |
|
|
|
|
|
svcCtx *svc.ServiceContext |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
func NewScheduleSituationLogic(ctx context.Context, svcCtx *svc.ServiceContext) *ScheduleSituationLogic { |
|
|
|
|
|
return &ScheduleSituationLogic{ |
|
|
|
|
|
Logger: logx.WithContext(ctx), |
|
|
|
|
|
ctx: ctx, |
|
|
|
|
|
svcCtx: svcCtx, |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
func (l *ScheduleSituationLogic) ScheduleSituation() (resp *types.ScheduleSituationResp, err error) { |
|
|
|
|
|
// todo: add your logic here and delete this line |
|
|
|
|
|
resp = &types.ScheduleSituationResp{} |
|
|
|
|
|
// node region |
|
|
|
|
|
tx := l.svcCtx.DbEngin.Raw("SELECT c.id, c.name, tdi.id AS category, count(DISTINCT ta.id)+count(DISTINCT tc.id)+COUNT(DISTINCT th.id)+COUNT(tv.id) as value FROM t_cluster c LEFT JOIN t_dict_item tdi ON c.region_dict = tdi.id left JOIN task_ai ta ON ta.cluster_id = c.id left JOIN task_cloud tc ON tc.cluster_id = c.id left JOIN task_hpc th ON th.cluster_id = c.id left JOIN task_vm tv ON tv.cluster_id = c.id WHERE tc.deleted_at IS NULL GROUP BY c.id").Scan(&resp.Nodes) |
|
|
|
|
|
if tx.Error != nil { |
|
|
|
|
|
return nil, tx.Error |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// hpc |
|
|
|
|
|
var hpcLinks []string |
|
|
|
|
|
tx = l.svcCtx.DbEngin.Raw("SELECT GROUP_CONCAT(cluster_id SEPARATOR ',') as cluster_ids FROM task_hpc WHERE deleted_at IS NULL GROUP BY task_id HAVING COUNT(*) > 1;").Scan(&hpcLinks) |
|
|
|
|
|
if tx.Error != nil { |
|
|
|
|
|
return nil, tx.Error |
|
|
|
|
|
} |
|
|
|
|
|
LinksHandler(hpcLinks, resp) |
|
|
|
|
|
// cloud |
|
|
|
|
|
var cloudLinks []string |
|
|
|
|
|
tx = l.svcCtx.DbEngin.Raw("SELECT GROUP_CONCAT(cluster_id SEPARATOR ',') as cluster_ids FROM task_cloud WHERE deleted_at IS NULL GROUP BY task_id HAVING COUNT(*) > 1;").Scan(&cloudLinks) |
|
|
|
|
|
if tx.Error != nil { |
|
|
|
|
|
return nil, tx.Error |
|
|
|
|
|
} |
|
|
|
|
|
LinksHandler(cloudLinks, resp) |
|
|
|
|
|
// ai |
|
|
|
|
|
var aiLinks []string |
|
|
|
|
|
tx = l.svcCtx.DbEngin.Raw("SELECT GROUP_CONCAT(cluster_id SEPARATOR ',') as cluster_ids FROM task_ai WHERE deleted_at IS NULL GROUP BY task_id HAVING COUNT(*) > 1;").Scan(&aiLinks) |
|
|
|
|
|
if tx.Error != nil { |
|
|
|
|
|
return nil, tx.Error |
|
|
|
|
|
} |
|
|
|
|
|
LinksHandler(aiLinks, resp) |
|
|
|
|
|
// vm |
|
|
|
|
|
var vmLinks []string |
|
|
|
|
|
tx = l.svcCtx.DbEngin.Raw("SELECT GROUP_CONCAT(cluster_id SEPARATOR ',') as cluster_ids FROM task_vm WHERE deleted_at IS NULL GROUP BY task_id HAVING COUNT(*) > 1;").Scan(&vmLinks) |
|
|
|
|
|
if tx.Error != nil { |
|
|
|
|
|
return nil, tx.Error |
|
|
|
|
|
} |
|
|
|
|
|
LinksHandler(vmLinks, resp) |
|
|
|
|
|
|
|
|
|
|
|
// categories |
|
|
|
|
|
tx = l.svcCtx.DbEngin.Raw("select tdi.item_text as name from t_dict_item tdi,t_dict td where td.dict_code = 'cluster_region_dict' and tdi.dict_id = td.id").Scan(&resp.Categories) |
|
|
|
|
|
if tx.Error != nil { |
|
|
|
|
|
return nil, tx.Error |
|
|
|
|
|
} |
|
|
|
|
|
return resp, nil |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
func LinksHandler(sources []string, resp *types.ScheduleSituationResp) { |
|
|
|
|
|
for _, source := range sources { |
|
|
|
|
|
links := strings.Split(source, ",") |
|
|
|
|
|
|
|
|
|
|
|
for i := 1; i < len(links); i++ { |
|
|
|
|
|
if links[i] != links[i-1] { |
|
|
|
|
|
resp.Links = append(resp.Links, types.Link{Source: links[i], Target: links[i-1]}) |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
} |