| @@ -1,107 +0,0 @@ | |||
| version: 2 | |||
| name: rpc | |||
| description: "" | |||
| global: | |||
| concurrent: 1 | |||
| param: | |||
| - ref: deploy_path | |||
| name: deploy_path | |||
| value: '"pcm-coordinator-rpc.yaml"' | |||
| required: false | |||
| type: STRING | |||
| hidden: false | |||
| - ref: nacos_host | |||
| name: nacos_host | |||
| value: '"10.206.0.12"' | |||
| required: false | |||
| type: STRING | |||
| hidden: false | |||
| - ref: secret_name | |||
| name: "" | |||
| value: '"jcce-aliyuncs"' | |||
| required: false | |||
| type: STRING | |||
| hidden: false | |||
| - ref: project_name | |||
| name: "" | |||
| value: '"pcm-coordinator-rpc"' | |||
| required: false | |||
| type: STRING | |||
| hidden: false | |||
| trigger: | |||
| webhook: gitlink@1.0.0 | |||
| event: | |||
| - ref: create_tag | |||
| ruleset: | |||
| - param-ref: tag | |||
| operator: EQ | |||
| value: '""' | |||
| ruleset-operator: AND | |||
| workflow: | |||
| - ref: start | |||
| name: 开始 | |||
| task: start | |||
| - ref: git_clone_0 | |||
| name: git clone | |||
| task: git_clone@1.2.6 | |||
| input: | |||
| remote_url: '"https://gitlink.org.cn/jcce-pcm/pcm-coordinator.git"' | |||
| ref: '"refs/heads/master"' | |||
| commit_id: '""' | |||
| depth: 1 | |||
| needs: | |||
| - start | |||
| - ref: docker_image_build_0 | |||
| name: docker镜像构建 | |||
| task: docker_image_build@1.6.0 | |||
| input: | |||
| docker_username: ((dev.docker_user)) | |||
| docker_password: ((dev.docker_password)) | |||
| image_name: '"registry.cn-hangzhou.aliyuncs.com/jcce/pcm-coordinator-rpc"' | |||
| image_tag: git_clone_0.commit_time | |||
| registry_address: '"registry.cn-hangzhou.aliyuncs.com"' | |||
| docker_file: git_clone_0.git_path + '/rpc/Dockerfile' | |||
| docker_build_path: git_clone_0.git_path | |||
| workspace: git_clone_0.git_path | |||
| image_clean: true | |||
| image_push: true | |||
| build_args: '""' | |||
| needs: | |||
| - shell_0 | |||
| - ref: end | |||
| name: 结束 | |||
| task: end | |||
| needs: | |||
| - kubectl_deploy_0 | |||
| - ref: kubectl_deploy_0 | |||
| name: kubectl部署资源 | |||
| task: kubectl_deploy@1.1.0 | |||
| input: | |||
| command: '"apply"' | |||
| resource_file_path: git_clone_0.git_path + '/rpc' | |||
| certificate_authority_data: ((dev.k8s_cad)) | |||
| server: '"https://119.45.100.73:6443"' | |||
| client_certificate_data: ((dev.k8s_ccd)) | |||
| client_key_data: ((dev.k8s_ckd)) | |||
| hosts: '""' | |||
| needs: | |||
| - docker_image_build_0 | |||
| - ref: shell_0 | |||
| name: shell | |||
| image: docker.jianmuhub.com/library/debian:buster-slim | |||
| env: | |||
| IMAGE_NAME: '"registry.cn-hangzhou.aliyuncs.com/jcce/pcm-coordinator-rpc"' | |||
| IMAGE_TAG: git_clone_0.commit_time | |||
| SECRET_NAME: global.secret_name | |||
| NACOS_HOST: global.nacos_host | |||
| PROJECT_NAME: global.project_name | |||
| PROJECT_PATH: git_clone_0.git_path | |||
| script: | |||
| - cd ${PROJECT_PATH}/rpc | |||
| - sed -i "s#image_name#${IMAGE_NAME}:${IMAGE_TAG}#" ${PROJECT_NAME}.yaml | |||
| - sed -i "s#secret_name#${SECRET_NAME}#" ${PROJECT_NAME}.yaml | |||
| - sed -i "s#nacos_host#${NACOS_HOST}#" ${PROJECT_NAME}.yaml | |||
| - cat ${PROJECT_NAME}.yaml | |||
| needs: | |||
| - git_clone_0 | |||
| @@ -1,5 +1,5 @@ | |||
| version: 2 | |||
| name: api | |||
| name: xjlab-api | |||
| description: "" | |||
| global: | |||
| concurrent: 1 | |||
| @@ -10,12 +10,6 @@ global: | |||
| required: false | |||
| type: STRING | |||
| hidden: false | |||
| - ref: nacos_host | |||
| name: nacos_host | |||
| value: '"10.206.0.12"' | |||
| required: false | |||
| type: STRING | |||
| hidden: false | |||
| - ref: secret_name | |||
| name: "" | |||
| value: '"jcce-aliyuncs"' | |||
| @@ -45,8 +39,8 @@ workflow: | |||
| name: git clone | |||
| task: git_clone@1.2.6 | |||
| input: | |||
| remote_url: '"https://gitlink.org.cn/jcce-pcm/pcm-coordinator.git"' | |||
| ref: '"refs/heads/master"' | |||
| remote_url: '"https://gitlink.org.cn/JointCloud/pcm-coordinator.git"' | |||
| ref: '"refs/heads/v0.1.0-xjlab-alpha1"' | |||
| commit_id: '""' | |||
| depth: 1 | |||
| needs: | |||
| @@ -93,14 +87,12 @@ workflow: | |||
| IMAGE_NAME: '"registry.cn-hangzhou.aliyuncs.com/jcce/pcm-coordinator-api"' | |||
| IMAGE_TAG: git_clone_0.commit_time | |||
| SECRET_NAME: global.secret_name | |||
| NACOS_HOST: global.nacos_host | |||
| PROJECT_NAME: global.project_name | |||
| PROJECT_PATH: git_clone_0.git_path | |||
| script: | |||
| - cd ${PROJECT_PATH}/api | |||
| - sed -i "s#image_name#${IMAGE_NAME}:${IMAGE_TAG}#" ${PROJECT_NAME}.yaml | |||
| - sed -i "s#secret_name#${SECRET_NAME}#" ${PROJECT_NAME}.yaml | |||
| - sed -i "s#nacos_host#${NACOS_HOST}#" ${PROJECT_NAME}.yaml | |||
| - cat ${PROJECT_NAME}.yaml | |||
| needs: | |||
| - git_clone_0 | |||
| @@ -1,15 +1,22 @@ | |||
| FROM golang:1.20.2-alpine3.17 AS builder | |||
| WORKDIR /app | |||
| FROM golang:1.21.2-alpine3.18 AS builder | |||
| LABEL stage=gobuilder | |||
| ENV CGO_ENABLED 0 | |||
| ENV GOARCH amd64 | |||
| ENV GOPROXY https://goproxy.cn,direct | |||
| RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.sjtug.sjtu.edu.cn/g' /etc/apk/repositories && \ | |||
| apk update --no-cache && apk add --no-cache tzdata | |||
| WORKDIR /app | |||
| ADD go.mod . | |||
| ADD go.sum . | |||
| RUN go mod download | |||
| COPY . . | |||
| COPY api/etc/ /app/ | |||
| RUN go mod download && go build -o pcm-coordinator-api /app/api/pcm.go | |||
| RUN go build -o pcm-coordinator-api /app/api/pcm.go | |||
| FROM alpine:3.16.2 | |||
| @@ -6,6 +6,50 @@ info( | |||
| author: "zhouqj" | |||
| email: "450705171@qq.com" | |||
| ) | |||
| type ( | |||
| resourceCenterInfoReq { | |||
| participantId int64 `path:"participantId"` | |||
| } | |||
| resourceCenterInfoResp { | |||
| tasksName []string `json:"tasksName"` | |||
| runningTasksNum int64 `json:"runningTasksNum"` | |||
| resourceUsage ResourceUsage `json:"resourceUsage"` | |||
| resourceTotal ResourceTotal `json:"resourceTotal"` | |||
| } | |||
| ResourceUsage { | |||
| cpu float64 `json:"cpu"` | |||
| memory float64 `json:"memory"` | |||
| storage float64 `json:"storage"` | |||
| } | |||
| ResourceTotal { | |||
| cpu int64 `json:"cpu"` | |||
| memory int64 `json:"memory"` | |||
| storage int64 `json:"storage"` | |||
| } | |||
| ) | |||
| type ( | |||
| tasksNumReq { | |||
| } | |||
| tasksNumResp { | |||
| totalNum int64 `json:"totalNum"` | |||
| runningNum int64 `json:"runningNum"` | |||
| completedNum int64 `json:"completedNum"` | |||
| } | |||
| ) | |||
| type ( | |||
| tasksNumReq { | |||
| } | |||
| tasksNumResp { | |||
| totalNum int64 `json:"totalNum"` | |||
| runningNum int64 `json:"runningNum"` | |||
| completedNum int64 `json:"completedNum"` | |||
| } | |||
| ) | |||
| type ( | |||
| submitJobReq { | |||
| @@ -521,16 +565,41 @@ type NodeAsset { | |||
| ParticipantId int64 `json:"ParticipantId"` // 集群动态信息id | |||
| } | |||
| type crackProgressResp { | |||
| crackProgressList []CrackProgress `json:"crackProgressList"` | |||
| } | |||
| type CrackProgress { | |||
| Name string `json:"name"` | |||
| Progress string `json:"progress"` | |||
| Current string `json:"current"` | |||
| Total string `json:"total"` | |||
| Speed string `json:"speed"` | |||
| } | |||
| type SaveHashcatReq { | |||
| CrackTaskId string `json:"crackTaskId"` // 任务id | |||
| CrackContainerId string `json:"crackContainerId"` // 容器id | |||
| CrackStatus string `json:"crackStatus"` // 状态 | |||
| CrackStartTime string `json:"crackStartTime"` //开始时间 | |||
| CrackEstimatedTime string `json:"crackEstimatedTime"` // 预计时间 | |||
| CrackProgress string `json:"crackProgress"` // 进度 | |||
| CrackResult string `json:"crackResult"` // 结果 | |||
| Started string `json:"started"` // 开始时间 | |||
| Stopped string `json:"stopped"` // 结束时间 | |||
| CrackTaskId string `json:"crackTaskId"` // 任务id | |||
| CrackContainerId string `json:"crackContainerId"` // 容器id | |||
| CrackStatus string `json:"crackStatus"` // 状态 | |||
| CrackStartTime string `json:"crackStartTime"` //开始时间 | |||
| CrackEstimatedTime string `json:"crackEstimatedTime"` // 预计时间 | |||
| CrackProgress string `json:"crackProgress"` // 进度 | |||
| CrackResult string `json:"crackResult"` // 结果 | |||
| Started string `json:"started,optional"` // 开始时间 | |||
| Stopped string `json:"stopped,optional"` // 结束时间 | |||
| KernelFeature string `json:"kernelFeature"` | |||
| HashMode string `json:"hashMode"` | |||
| Rejected string `json:"rejected"` | |||
| Session string `json:"session"` | |||
| HashTarget string `json:"hashTarget"` | |||
| Speed string `json:"speed"` | |||
| Candidates string `json:"candidates"` | |||
| RestorePoint string `json:"restorePoint"` | |||
| Recovered string `json:"recovered"` | |||
| GuessQueue string `json:"guessQueue"` | |||
| CandidateEngine string `json:"candidateEngine"` | |||
| GuessMask string `json:"guessMask"` | |||
| RestoreSub string `json:"restoreSub"` | |||
| } | |||
| type getHashcatHandlerReq { | |||
| @@ -538,10 +607,6 @@ type getHashcatHandlerReq { | |||
| } | |||
| type getHashcatHandlerResp { | |||
| HashCatList []HashCat `json:"hashCatList"` | |||
| } | |||
| type HashCat { | |||
| CrackTaskId string `json:"crackTaskId"` // 任务id | |||
| CrackContainerId string `json:"crackContainerId"` // 容器id | |||
| CrackStatus string `json:"crackStatus"` // 状态 | |||
| @@ -551,8 +616,23 @@ type HashCat { | |||
| CrackResult string `json:"crackResult"` // 结果 | |||
| Started string `json:"started"` // 开始时间 | |||
| Stopped string `json:"stopped"` // 结束时间 | |||
| KernelFeature string `json:"kernelFeature"` | |||
| HashMode string `json:"hashMode"` | |||
| Rejected string `json:"rejected"` | |||
| Session string `json:"session"` | |||
| HashTarget string `json:"hashTarget"` | |||
| Speed string `json:"speed"` | |||
| Candidates string `json:"candidates"` | |||
| RestorePoint string `json:"restorePoint"` | |||
| Recovered string `json:"recovered"` | |||
| GuessQueue string `json:"guessQueue"` | |||
| CandidateEngine string `json:"candidateEngine"` | |||
| GuessMask string `json:"guessMask"` | |||
| RestoreSub string `json:"restoreSub"` | |||
| } | |||
| type participantListResp { | |||
| Participants []Participant `json:"participants"` | |||
| } | |||
| @@ -23,6 +23,7 @@ info( | |||
| prefix: pcm/v1 | |||
| group : core | |||
| ) | |||
| service pcm { | |||
| @doc "查询P端服务列表" | |||
| @handler participantListHandler | |||
| @@ -42,7 +43,7 @@ service pcm { | |||
| @doc "查询任务列表" | |||
| @handler TaskListHandler | |||
| get /core/taskList (taskListReq)returns (taskListResp) | |||
| get /core/taskList (taskListReq) returns (taskListResp) | |||
| @doc "查询任务详情" | |||
| @handler TaskDetailHandler | |||
| @@ -104,9 +105,25 @@ service pcm { | |||
| @handler saveHashcatHandler | |||
| post /core/saveHashcat (SaveHashcatReq) | |||
| @doc "Task Data Statistics" | |||
| @handler tasksNumHandler | |||
| get /core/tasks/num (tasksNumReq) returns (tasksNumResp) | |||
| @doc "获取hashcat" | |||
| @handler getHashcatHandler | |||
| get /core/getHashcat/:crackTaskId (getHashcatHandlerReq) returns (getHashcatHandlerResp) | |||
| @doc "Task Count Statistics" | |||
| @handler tasksNumHandler | |||
| get /core/tasks/num (tasksNumReq) returns (tasksNumResp) | |||
| @doc "Hashcat Crack Progress" | |||
| @handler crackProgressHandler | |||
| get /core/crack/progress returns (crackProgressResp) | |||
| @doc "Resource Center Information" | |||
| @handler resourceCenterInfoHandler | |||
| get /core/center/resource/:participantId (resourceCenterInfoReq) returns (resourceCenterInfoResp) | |||
| } | |||
| //hpc二级接口 | |||
| @@ -466,7 +483,7 @@ service pcm { | |||
| @doc "查询节点详情" | |||
| @handler ShowNodeDetailsHandler | |||
| get /vm/showNodeDetails (ShowNodeDetailsReq) returns (ShowNodeDetailsResp) | |||
| get /vm/showNodeDetails (ShowNodeDetailsReq) returns (ShowNodeDetailsResp) | |||
| } | |||
| //存算联动 接口 | |||
| @@ -1,18 +1,97 @@ | |||
| NacosConfig: | |||
| DataId: pcm-core-api.yaml | |||
| Group: DEFAULT_GROUP | |||
| ServerConfigs: | |||
| # - IpAddr: 127.0.0.1 | |||
| # Port: 8848 | |||
| # - IpAddr: 10.101.15.7 | |||
| # Port: 8848 | |||
| - IpAddr: 119.45.100.73 | |||
| Port: 8848 | |||
| ClientConfig: | |||
| NamespaceId: tzwang | |||
| # NamespaceId: test | |||
| TimeoutMs: 5000 | |||
| NotLoadCacheAtStart: true | |||
| LogDir: | |||
| CacheDir: | |||
| LogLevel: debug | |||
| Name: pcm.core.api | |||
| Host: 0.0.0.0 | |||
| Port: 8999 | |||
| Timeout: 50000 | |||
| DB: | |||
| DataSource: root:uJpLd6u-J?HC1@(10.206.0.7:3306)/pcm?parseTime=true&loc=Local | |||
| Redis: | |||
| Host: 10.206.0.7:6379 | |||
| Pass: redisPW123 | |||
| Cache: | |||
| - Host: 10.206.0.7:6379 | |||
| Pass: redisPW123 | |||
| # k8s rpc | |||
| K8sNativeConf: | |||
| # target: nacos://10.206.0.12:8848/pcm.kubenative.rpc?timeout=30s&namespaceid=test&groupname=DEFAULT_GROUP&appName=pcm.core.api | |||
| Endpoints: | |||
| - pcm-participant-kubernetes-service:2003 | |||
| NonBlock: true | |||
| #rpc | |||
| THRpcConf: | |||
| target: nacos://10.206.0.12:8848/pcm.th.rpc?timeout=30s&namespaceid=test&groupname=DEFAULT_GROUP&appName=pcm.core.api | |||
| # Endpoints: | |||
| # - 127.0.0.1:8888 | |||
| NonBlock: true | |||
| #rpc | |||
| ModelArtsRpcConf: | |||
| target: nacos://10.206.0.12:8848/pcm.modelarts.rpc?timeout=30s&namespaceid=test&groupname=DEFAULT_GROUP&appName=pcm.core.api | |||
| # Endpoints: | |||
| # - 127.0.0.1:8888 | |||
| NonBlock: true | |||
| #rpc | |||
| ModelArtsImgRpcConf: | |||
| target: nacos://10.206.0.12:8848/pcm.modelarts.rpc?timeout=30s&namespaceid=test&groupname=DEFAULT_GROUP&appName=pcm.core.api | |||
| # Endpoints: | |||
| # - 127.0.0.1:8888 | |||
| NonBlock: true | |||
| #rpc | |||
| ACRpcConf: | |||
| target: nacos://10.206.0.12:8848/pcm.ac.rpc?timeout=30s&namespaceid=test&groupname=DEFAULT_GROUP&appName=pcm.core.api | |||
| # Endpoints: | |||
| # - 127.0.0.1:8888 | |||
| NonBlock: true | |||
| Timeout: 20000 | |||
| #rpc | |||
| CephRpcConf: | |||
| # target: nacos://10.206.0.12:8848/pcm.ceph.rpc?timeout=30s&namespaceid=test&groupname=DEFAULT_GROUP&appName=pcm.core.api | |||
| Endpoints: | |||
| - pcm-participant-ceph-service:2008 | |||
| NonBlock: true | |||
| Timeout: 50000 | |||
| OctopusRpcConf: | |||
| target: nacos://10.206.0.12:8848/pcm.octopus.rpc?timeout=30s&namespaceid=test&groupname=DEFAULT_GROUP&appName=pcm.core.api | |||
| # Endpoints: | |||
| # - 127.0.0.1:8888 | |||
| NonBlock: true | |||
| Timeout: 20000 | |||
| OpenstackRpcConf: | |||
| target: nacos://10.206.0.12:8848/pcm.openstack.rpc?timeout=30s&namespaceid=test&groupname=DEFAULT_GROUP&appName=pcm.core.api | |||
| # Endpoints: | |||
| # - 127.0.0.1:8888 | |||
| NonBlock: true | |||
| Timeout: 20000 | |||
| # core rpc | |||
| PcmCoreRpcConf: | |||
| target: nacos://10.206.0.12:8848/pcm.core.rpc?timeout=30s&namespaceid=test&groupname=DEFAULT_GROUP&appName=pcm.core.api | |||
| # Endpoints: | |||
| # - 127.0.0.1:8888 | |||
| NonBlock: true | |||
| Timeout: 20000 | |||
| JccScheduleUrl: http://jcce-schedule-service:8082 | |||
| MinioConf: | |||
| Secret: minio_xnu122@_ | |||
| AccessKey: minioadmin | |||
| Endpoint: http://121.89.220.60:9000 | |||
| RegistryConf: | |||
| Username: jointcloudNudt | |||
| Password: Nudt@123 | |||
| SnowflakeConf: | |||
| MachineId: 1 | |||
| @@ -16,7 +16,6 @@ package config | |||
| import ( | |||
| "github.com/zeromicro/go-zero/core/logx" | |||
| "github.com/zeromicro/go-zero/core/stores/cache" | |||
| "github.com/zeromicro/go-zero/core/stores/redis" | |||
| "github.com/zeromicro/go-zero/rest" | |||
| "github.com/zeromicro/go-zero/zrpc" | |||
| @@ -28,7 +27,6 @@ type Config struct { | |||
| DataSource string | |||
| } | |||
| Redis redis.RedisConf | |||
| Cache cache.CacheConf | |||
| LogConf logx.LogConf | |||
| K8sNativeConf zrpc.RpcClientConf | |||
| ACRpcConf zrpc.RpcClientConf | |||
| @@ -39,7 +37,6 @@ type Config struct { | |||
| OpenstackRpcConf zrpc.RpcClientConf | |||
| OctopusRpcConf zrpc.RpcClientConf | |||
| PcmCoreRpcConf zrpc.RpcClientConf | |||
| NexusUrl string | |||
| JccScheduleUrl string | |||
| MinioConf struct { | |||
| Secret string | |||
| @@ -0,0 +1,17 @@ | |||
| package core | |||
| import ( | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/pkg/repository/result" | |||
| "net/http" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/api/internal/logic/core" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/api/internal/svc" | |||
| ) | |||
| func CrackProgressHandler(svcCtx *svc.ServiceContext) http.HandlerFunc { | |||
| return func(w http.ResponseWriter, r *http.Request) { | |||
| l := core.NewCrackProgressLogic(r.Context(), svcCtx) | |||
| resp, err := l.CrackProgress() | |||
| result.HttpResult(r, w, resp, err) | |||
| } | |||
| } | |||
| @@ -0,0 +1,25 @@ | |||
| package core | |||
| import ( | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/pkg/repository/result" | |||
| "net/http" | |||
| "github.com/zeromicro/go-zero/rest/httpx" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/api/internal/logic/core" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/api/internal/svc" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/api/internal/types" | |||
| ) | |||
| func ResourceCenterInfoHandler(svcCtx *svc.ServiceContext) http.HandlerFunc { | |||
| return func(w http.ResponseWriter, r *http.Request) { | |||
| var req types.ResourceCenterInfoReq | |||
| if err := httpx.Parse(r, &req); err != nil { | |||
| httpx.ErrorCtx(r.Context(), w, err) | |||
| return | |||
| } | |||
| l := core.NewResourceCenterInfoLogic(r.Context(), svcCtx) | |||
| resp, err := l.ResourceCenterInfo(&req) | |||
| result.HttpResult(r, w, resp, err) | |||
| } | |||
| } | |||
| @@ -0,0 +1,25 @@ | |||
| package core | |||
| import ( | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/pkg/repository/result" | |||
| "net/http" | |||
| "github.com/zeromicro/go-zero/rest/httpx" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/api/internal/logic/core" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/api/internal/svc" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/api/internal/types" | |||
| ) | |||
| func TasksNumHandler(svcCtx *svc.ServiceContext) http.HandlerFunc { | |||
| return func(w http.ResponseWriter, r *http.Request) { | |||
| var req types.TasksNumReq | |||
| if err := httpx.Parse(r, &req); err != nil { | |||
| httpx.ErrorCtx(r.Context(), w, err) | |||
| return | |||
| } | |||
| l := core.NewTasksNumLogic(r.Context(), svcCtx) | |||
| resp, err := l.TasksNum(&req) | |||
| result.HttpResult(r, w, resp, err) | |||
| } | |||
| } | |||
| @@ -125,6 +125,21 @@ func RegisterHandlers(server *rest.Server, serverCtx *svc.ServiceContext) { | |||
| Path: "/core/getHashcat/:crackTaskId", | |||
| Handler: core.GetHashcatHandler(serverCtx), | |||
| }, | |||
| { | |||
| Method: http.MethodGet, | |||
| Path: "/core/tasks/num", | |||
| Handler: core.TasksNumHandler(serverCtx), | |||
| }, | |||
| { | |||
| Method: http.MethodGet, | |||
| Path: "/core/crack/progress", | |||
| Handler: core.CrackProgressHandler(serverCtx), | |||
| }, | |||
| { | |||
| Method: http.MethodGet, | |||
| Path: "/core/center/resource/:participantId", | |||
| Handler: core.ResourceCenterInfoHandler(serverCtx), | |||
| }, | |||
| }, | |||
| rest.WithPrefix("/pcm/v1"), | |||
| ) | |||
| @@ -0,0 +1,52 @@ | |||
| package core | |||
| import ( | |||
| "context" | |||
| "strings" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/api/internal/svc" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/api/internal/types" | |||
| "github.com/zeromicro/go-zero/core/logx" | |||
| ) | |||
| type CrackProgressLogic struct { | |||
| logx.Logger | |||
| ctx context.Context | |||
| svcCtx *svc.ServiceContext | |||
| } | |||
| func NewCrackProgressLogic(ctx context.Context, svcCtx *svc.ServiceContext) *CrackProgressLogic { | |||
| return &CrackProgressLogic{ | |||
| Logger: logx.WithContext(ctx), | |||
| ctx: ctx, | |||
| svcCtx: svcCtx, | |||
| } | |||
| } | |||
| func (l *CrackProgressLogic) CrackProgress() (resp *types.CrackProgressResp, err error) { | |||
| // todo: add your logic here and delete this line | |||
| result := types.CrackProgressResp{} | |||
| var crackProgressList []*types.CrackProgress | |||
| tx := l.svcCtx.DbEngin.Raw("SELECT h.id, h.speed, h.crack_progress as progress,c.`name` \nFROM t_hashcat h \nJOIN ( \n SELECT crack_task_id, MAX(id) as max_id \n FROM t_hashcat \n GROUP BY crack_task_id \n) AS sub_query ON h.crack_task_id = sub_query.crack_task_id AND h.id = sub_query.max_id \nJOIN cloud c ON h.crack_task_id = c.`name`").Scan(&crackProgressList) | |||
| if tx.Error != nil { | |||
| return nil, tx.Error | |||
| } | |||
| for _, crackProgress := range crackProgressList { | |||
| if len(crackProgress.Speed) != 0 { | |||
| str1 := strings.Split(crackProgress.Speed, "@") | |||
| crackProgress.Speed = str1[0] | |||
| } | |||
| if len(crackProgress.Progress) != 0 { | |||
| str1 := strings.Split(crackProgress.Progress, "/") | |||
| crackProgress.Current = str1[0] | |||
| str2 := strings.Split(str1[1], " (") | |||
| crackProgress.Total = str2[0] | |||
| str3 := strings.Split(str2[1], "%") | |||
| crackProgress.Progress = str3[0] | |||
| } | |||
| result.CrackProgressList = append(result.CrackProgressList, *crackProgress) | |||
| } | |||
| return &result, nil | |||
| } | |||
| @@ -39,12 +39,12 @@ func NewGetHashcatLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetHas | |||
| func (l *GetHashcatLogic) GetHashcat(req *types.GetHashcatHandlerReq) (resp *types.GetHashcatHandlerResp, err error) { | |||
| // todo: add your logic here and delete this line | |||
| var hashcatList []*models.THashcat | |||
| tx := l.svcCtx.DbEngin.Where("crack_task_id = ?", req.CrackTaskId).Find(&hashcatList) | |||
| var hashcat *models.THashcat | |||
| tx := l.svcCtx.DbEngin.Where("crack_task_id = ?", req.CrackTaskId).Order("id desc").Limit(1).Find(&hashcat) | |||
| if tx.Error != nil { | |||
| return nil, tx.Error | |||
| } | |||
| result := types.GetHashcatHandlerResp{} | |||
| utils.Convert(hashcatList, &result.HashCatList) | |||
| utils.Convert(hashcat, &result) | |||
| return &result, nil | |||
| } | |||
| @@ -0,0 +1,37 @@ | |||
| package core | |||
| import ( | |||
| "context" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/api/internal/svc" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/api/internal/types" | |||
| "github.com/zeromicro/go-zero/core/logx" | |||
| ) | |||
| type ResourceCenterInfoLogic struct { | |||
| logx.Logger | |||
| ctx context.Context | |||
| svcCtx *svc.ServiceContext | |||
| } | |||
| func NewResourceCenterInfoLogic(ctx context.Context, svcCtx *svc.ServiceContext) *ResourceCenterInfoLogic { | |||
| return &ResourceCenterInfoLogic{ | |||
| Logger: logx.WithContext(ctx), | |||
| ctx: ctx, | |||
| svcCtx: svcCtx, | |||
| } | |||
| } | |||
| func (l *ResourceCenterInfoLogic) ResourceCenterInfo(req *types.ResourceCenterInfoReq) (resp *types.ResourceCenterInfoResp, err error) { | |||
| // todo: add your logic here and delete this line | |||
| resp = &types.ResourceCenterInfoResp{} | |||
| l.svcCtx.DbEngin.Raw("select count(t.id) from task t,cloud c where c.task_id = t.id and c.participant_id = ? and t.status = ?", req.ParticipantId, "Running").Scan(&resp.RunningTasksNum) | |||
| l.svcCtx.DbEngin.Raw("select t.name from task t,cloud c where c.task_id = t.id and c.participant_id = ?", req.ParticipantId).Scan(&resp.TasksName) | |||
| l.svcCtx.DbEngin.Raw("select SUm(cpu_total) as cpu,sum(mem_total) as memory,SUM(disk_total) as storage FROM sc_node_avail_info where participant_id = ? GROUP BY created_time ORDER BY created_time desc LIMIT 1", req.ParticipantId).Scan(&resp.ResourceTotal) | |||
| l.svcCtx.DbEngin.Raw("select IFNULL(SUM(cpu_usable)/COUNT(*),0)as cpu,IFNULL(sum(mem_avail)/SUM(mem_total),0) as memory,IFNULL(sum(disk_avail)/SUM(disk_total),0) as storage FROM sc_node_avail_info where cpu_total != 0 and participant_id = ? GROUP BY created_time ORDER BY created_time desc LIMIT 1", req.ParticipantId).Scan(&resp.ResourceUsage) | |||
| return | |||
| } | |||
| @@ -38,10 +38,10 @@ func NewSaveHashcatLogic(ctx context.Context, svcCtx *svc.ServiceContext) *SaveH | |||
| } | |||
| func (l *SaveHashcatLogic) SaveHashcat(req *types.SaveHashcatReq) error { | |||
| var hashcat models.THashcat | |||
| utils.Convert(req, &hashcat) | |||
| hashcat.Id = utils.GenSnowflakeID() | |||
| tx := l.svcCtx.DbEngin.Save(hashcat) | |||
| var hashCat models.THashcat | |||
| utils.Convert(req, &hashCat) | |||
| hashCat.Id = utils.GenSnowflakeID() | |||
| tx := l.svcCtx.DbEngin.Save(hashCat) | |||
| if tx.Error != nil { | |||
| return tx.Error | |||
| } | |||
| @@ -0,0 +1,32 @@ | |||
| package core | |||
| import ( | |||
| "context" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/api/internal/svc" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/api/internal/types" | |||
| "github.com/zeromicro/go-zero/core/logx" | |||
| ) | |||
| type TasksNumLogic struct { | |||
| logx.Logger | |||
| ctx context.Context | |||
| svcCtx *svc.ServiceContext | |||
| } | |||
| func NewTasksNumLogic(ctx context.Context, svcCtx *svc.ServiceContext) *TasksNumLogic { | |||
| return &TasksNumLogic{ | |||
| Logger: logx.WithContext(ctx), | |||
| ctx: ctx, | |||
| svcCtx: svcCtx, | |||
| } | |||
| } | |||
| func (l *TasksNumLogic) TasksNum(req *types.TasksNumReq) (resp *types.TasksNumResp, err error) { | |||
| resp = &types.TasksNumResp{} | |||
| l.svcCtx.DbEngin.Raw("select count(*) from task ").Scan(&resp.TotalNum) | |||
| l.svcCtx.DbEngin.Raw("select count(*) from task where status ='Running' ").Scan(&resp.RunningNum) | |||
| l.svcCtx.DbEngin.Raw("select count(*) from task where status ='Completed' ").Scan(&resp.CompletedNum) | |||
| return resp, nil | |||
| } | |||
| @@ -77,11 +77,15 @@ func NewServiceContext(c config.Config) *ServiceContext { | |||
| downloader := s3manager.NewDownloader(session) | |||
| uploader := s3manager.NewUploader(session) | |||
| //启动Gorm支持 | |||
| dbEngin, _ := gorm.Open(mysql.Open(c.DB.DataSource), &gorm.Config{ | |||
| dbEngin, err := gorm.Open(mysql.Open(c.DB.DataSource), &gorm.Config{ | |||
| NamingStrategy: schema.NamingStrategy{ | |||
| SingularTable: true, // 使用单数表名,启用该选项,此时,`User` 的表名应该是 `t_user` | |||
| }, | |||
| }) | |||
| if err != nil { | |||
| logx.Error("gorm初始化错误:", err.Error()) | |||
| return nil | |||
| } | |||
| dockerClient, err := client.NewClientWithOpts() | |||
| if err != nil { | |||
| logx.Error(err.Error()) | |||
| @@ -1,6 +1,38 @@ | |||
| // Code generated by goctl. DO NOT EDIT. | |||
| package types | |||
| type ResourceCenterInfoReq struct { | |||
| ParticipantId int64 `path:"participantId"` | |||
| } | |||
| type ResourceCenterInfoResp struct { | |||
| TasksName []string `json:"tasksName"` | |||
| RunningTasksNum int64 `json:"runningTasksNum"` | |||
| ResourceUsage ResourceUsage `json:"resourceUsage"` | |||
| ResourceTotal ResourceTotal `json:"resourceTotal"` | |||
| } | |||
| type ResourceUsage struct { | |||
| Cpu float64 `json:"cpu"` | |||
| Memory float64 `json:"memory"` | |||
| Storage float64 `json:"storage"` | |||
| } | |||
| type ResourceTotal struct { | |||
| Cpu int64 `json:"cpu"` | |||
| Memory int64 `json:"memory"` | |||
| Storage int64 `json:"storage"` | |||
| } | |||
| type TasksNumReq struct { | |||
| } | |||
| type TasksNumResp struct { | |||
| TotalNum int64 `json:"totalNum"` | |||
| RunningNum int64 `json:"runningNum"` | |||
| CompletedNum int64 `json:"completedNum"` | |||
| } | |||
| type SubmitJobReq struct { | |||
| SlurmVersion string `json:"slurmVersion"` | |||
| Apptype string `json:"apptype,optional"` | |||
| @@ -485,6 +517,18 @@ type NodeAsset struct { | |||
| ParticipantId int64 `json:"ParticipantId"` // 集群动态信息id | |||
| } | |||
| type CrackProgressResp struct { | |||
| CrackProgressList []CrackProgress `json:"crackProgressList"` | |||
| } | |||
| type CrackProgress struct { | |||
| Name string `json:"name"` | |||
| Progress string `json:"progress"` | |||
| Current string `json:"current"` | |||
| Total string `json:"total"` | |||
| Speed string `json:"speed"` | |||
| } | |||
| type SaveHashcatReq struct { | |||
| CrackTaskId string `json:"crackTaskId"` // 任务id | |||
| CrackContainerId string `json:"crackContainerId"` // 容器id | |||
| @@ -493,8 +537,21 @@ type SaveHashcatReq struct { | |||
| CrackEstimatedTime string `json:"crackEstimatedTime"` // 预计时间 | |||
| CrackProgress string `json:"crackProgress"` // 进度 | |||
| CrackResult string `json:"crackResult"` // 结果 | |||
| Started string `json:"started"` // 开始时间 | |||
| Stopped string `json:"stopped"` // 结束时间 | |||
| Started string `json:"started,optional"` // 开始时间 | |||
| Stopped string `json:"stopped,optional"` // 结束时间 | |||
| KernelFeature string `json:"kernelFeature"` | |||
| HashMode string `json:"hashMode"` | |||
| Rejected string `json:"rejected"` | |||
| Session string `json:"session"` | |||
| HashTarget string `json:"hashTarget"` | |||
| Speed string `json:"speed"` | |||
| Candidates string `json:"candidates"` | |||
| RestorePoint string `json:"restorePoint"` | |||
| Recovered string `json:"recovered"` | |||
| GuessQueue string `json:"guessQueue"` | |||
| CandidateEngine string `json:"candidateEngine"` | |||
| GuessMask string `json:"guessMask"` | |||
| RestoreSub string `json:"restoreSub"` | |||
| } | |||
| type GetHashcatHandlerReq struct { | |||
| @@ -502,10 +559,6 @@ type GetHashcatHandlerReq struct { | |||
| } | |||
| type GetHashcatHandlerResp struct { | |||
| HashCatList []HashCat `json:"hashCatList"` | |||
| } | |||
| type HashCat struct { | |||
| CrackTaskId string `json:"crackTaskId"` // 任务id | |||
| CrackContainerId string `json:"crackContainerId"` // 容器id | |||
| CrackStatus string `json:"crackStatus"` // 状态 | |||
| @@ -515,6 +568,19 @@ type HashCat struct { | |||
| CrackResult string `json:"crackResult"` // 结果 | |||
| Started string `json:"started"` // 开始时间 | |||
| Stopped string `json:"stopped"` // 结束时间 | |||
| KernelFeature string `json:"kernelFeature"` | |||
| HashMode string `json:"hashMode"` | |||
| Rejected string `json:"rejected"` | |||
| Session string `json:"session"` | |||
| HashTarget string `json:"hashTarget"` | |||
| Speed string `json:"speed"` | |||
| Candidates string `json:"candidates"` | |||
| RestorePoint string `json:"restorePoint"` | |||
| Recovered string `json:"recovered"` | |||
| GuessQueue string `json:"guessQueue"` | |||
| CandidateEngine string `json:"candidateEngine"` | |||
| GuessMask string `json:"guessMask"` | |||
| RestoreSub string `json:"restoreSub"` | |||
| } | |||
| type ParticipantListResp struct { | |||
| @@ -16,10 +16,6 @@ spec: | |||
| labels: | |||
| k8s-app: pcm-coordinator-api | |||
| spec: | |||
| hostAliases: | |||
| - hostnames: | |||
| - nacos.jcce.dev | |||
| ip: nacos_host | |||
| imagePullSecrets: | |||
| - name: secret_name | |||
| containers: | |||
| @@ -26,7 +26,6 @@ import ( | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/api/internal/handler" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/api/internal/mqs" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/api/internal/svc" | |||
| commonConfig "gitlink.org.cn/jcce-pcm/utils/nacos" | |||
| ) | |||
| var configFile = flag.String("f", "api/etc/pcm.yaml", "the config file") | |||
| @@ -34,26 +33,8 @@ var configFile = flag.String("f", "api/etc/pcm.yaml", "the config file") | |||
| func main() { | |||
| flag.Parse() | |||
| var bootstrapConfig commonConfig.BootstrapConfig | |||
| conf.MustLoad(*configFile, &bootstrapConfig) | |||
| //解析业务配置 | |||
| var c config.Config | |||
| nacosConfig := bootstrapConfig.NacosConfig | |||
| serviceConfigContent := nacosConfig.InitConfig(func(data string) { | |||
| err := conf.LoadFromYamlBytes([]byte(data), &c) | |||
| if err != nil { | |||
| panic(err) | |||
| } | |||
| }) | |||
| err := conf.LoadFromYamlBytes([]byte(serviceConfigContent), &c) | |||
| if err != nil { | |||
| panic(err) | |||
| } | |||
| // 注册到nacos | |||
| nacosConfig.DiscoveryRest(&c.RestConf) | |||
| conf.MustLoad(*configFile, &c) | |||
| serviceGroup := service.NewServiceGroup() | |||
| defer serviceGroup.Stop() | |||
| @@ -36,8 +36,8 @@ require ( | |||
| google.golang.org/grpc v1.59.0 | |||
| google.golang.org/protobuf v1.31.0 | |||
| gorm.io/datatypes v1.2.0 | |||
| gorm.io/driver/mysql v1.5.2 | |||
| gorm.io/gorm v1.25.5 | |||
| gorm.io/driver/mysql v1.5.0 | |||
| gorm.io/gorm v1.25.4 | |||
| k8s.io/apimachinery v0.28.3 | |||
| k8s.io/client-go v0.28.3 | |||
| sigs.k8s.io/yaml v1.4.0 | |||
| @@ -1729,14 +1729,14 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= | |||
| gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | |||
| gorm.io/datatypes v1.2.0 h1:5YT+eokWdIxhJgWHdrb2zYUimyk0+TaFth+7a0ybzco= | |||
| gorm.io/datatypes v1.2.0/go.mod h1:o1dh0ZvjIjhH/bngTpypG6lVRJ5chTBxE09FH/71k04= | |||
| gorm.io/driver/mysql v1.5.2 h1:QC2HRskSE75wBuOxe0+iCkyJZ+RqpudsQtqkp+IMuXs= | |||
| gorm.io/driver/mysql v1.5.2/go.mod h1:pQLhh1Ut/WUAySdTHwBpBv6+JKcj+ua4ZFx1QQTBzb8= | |||
| gorm.io/driver/mysql v1.5.0 h1:6hSAT5QcyIaty0jfnff0z0CLDjyRgZ8mlMHLqSt7uXM= | |||
| gorm.io/driver/mysql v1.5.0/go.mod h1:FFla/fJuCvyTi7rJQd27qlNX2v3L6deTR1GgTjSOLPo= | |||
| gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U= | |||
| gorm.io/driver/sqlite v1.4.3 h1:HBBcZSDnWi5BW3B3rwvVTc510KGkBkexlOg0QrmLUuU= | |||
| gorm.io/driver/sqlserver v1.4.1 h1:t4r4r6Jam5E6ejqP7N82qAJIJAht27EGT41HyPfXRw0= | |||
| gorm.io/gorm v1.25.2-0.20230530020048-26663ab9bf55/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k= | |||
| gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls= | |||
| gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= | |||
| gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k= | |||
| gorm.io/gorm v1.25.4 h1:iyNd8fNAe8W9dvtlgeRI5zSVZPsq3OpcTu37cYcpCmw= | |||
| gorm.io/gorm v1.25.4/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k= | |||
| gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= | |||
| honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= | |||
| honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= | |||
| @@ -1,41 +0,0 @@ | |||
| /* | |||
| Copyright (c) [2023] [pcm] | |||
| [pcm-coordinator] is licensed under Mulan PSL v2. | |||
| You can use this software according to the terms and conditions of the Mulan PSL v2. | |||
| You may obtain a copy of Mulan PSL v2 at: | |||
| http://license.coscl.org.cn/MulanPSL2 | |||
| THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, | |||
| EITHER EXPaRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, | |||
| MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. | |||
| See the Mulan PSL v2 for more details. | |||
| */ | |||
| package models | |||
| import ( | |||
| "github.com/zeromicro/go-zero/core/stores/cache" | |||
| "github.com/zeromicro/go-zero/core/stores/sqlx" | |||
| ) | |||
| var _ ScQueuePhyInfoModel = (*customScQueuePhyInfoModel)(nil) | |||
| type ( | |||
| // ScQueuePhyInfoModel is an interface to be customized, add more methods here, | |||
| // and implement the added methods in customScQueuePhyInfoModel. | |||
| ScQueuePhyInfoModel interface { | |||
| scQueuePhyInfoModel | |||
| } | |||
| customScQueuePhyInfoModel struct { | |||
| *defaultScQueuePhyInfoModel | |||
| } | |||
| ) | |||
| // NewScQueuePhyInfoModel returns a models for the database table. | |||
| func NewScQueuePhyInfoModel(conn sqlx.SqlConn, c cache.CacheConf, opts ...cache.Option) ScQueuePhyInfoModel { | |||
| return &customScQueuePhyInfoModel{ | |||
| defaultScQueuePhyInfoModel: newScQueuePhyInfoModel(conn, c, opts...), | |||
| } | |||
| } | |||
| @@ -5,14 +5,10 @@ package models | |||
| import ( | |||
| "context" | |||
| "database/sql" | |||
| "fmt" | |||
| "strings" | |||
| "time" | |||
| "github.com/zeromicro/go-zero/core/stores/builder" | |||
| "github.com/zeromicro/go-zero/core/stores/cache" | |||
| "github.com/zeromicro/go-zero/core/stores/sqlc" | |||
| "github.com/zeromicro/go-zero/core/stores/sqlx" | |||
| "github.com/zeromicro/go-zero/core/stringx" | |||
| ) | |||
| @@ -39,102 +35,27 @@ type ( | |||
| } | |||
| ScQueuePhyInfo struct { | |||
| Id int64 `db:"id"` // id | |||
| AclHosts string `db:"aclHosts"` // 可用节点,多个节点用逗号隔开 | |||
| QueueId string `db:"queue_id"` // 队列名称 | |||
| Text string `db:"text"` // 队列名称 | |||
| QueueName string `db:"queue_name"` // 队列名称 | |||
| QueNodes string `db:"que_nodes"` // 队列节点总数 | |||
| QueMinNodect string `db:"que_min_nodect"` // 队列最小节点数 | |||
| QueMaxNgpus string `db:"que_max_ngpus"` // 队列最大GPU卡数 | |||
| QueMaxPpn string `db:"que_max_ppn"` // 使用该队列作业最大CPU核心数 | |||
| QueChargeRate string `db:"que_charge_rate"` // 费率 | |||
| QueMaxNcpus string `db:"que_max_ncpus"` // 用户最大可用核心数 | |||
| QueMaxNdcus string `db:"que_max_ndcus"` // 队列总DCU卡数 | |||
| QueMinNcpus string `db:"que_min_ncpus"` // 队列最小CPU核数 | |||
| QueFreeNodes string `db:"que_free_nodes"` // 队列空闲节点数 | |||
| QueMaxNodect string `db:"que_max_nodect"` // 队列作业最大节点数 | |||
| QueMaxGpuPN string `db:"que_max_gpu_PN"` // 队列单作业最大GPU卡数 | |||
| QueMaxWalltime string `db:"que_max_walltime"` // 队列最大运行时间 | |||
| QueMaxDcuPN string `db:"que_max_dcu_PN"` // 队列单作业最大DCU卡数 | |||
| ParticipantId int64 `db:"participant_id"` // 集群动态信息id | |||
| DeletedFlag int64 `db:"deleted_flag"` // 是否删除 | |||
| QueNcpus string `db:"que_ncpus"` | |||
| QueFreeNcpus string `db:"que_free_ncpus"` | |||
| CreatedBy sql.NullInt64 `db:"created_by"` // 创建人 | |||
| CreatedTime time.Time `db:"created_time"` // 创建时间 | |||
| UpdatedBy sql.NullInt64 `db:"updated_by"` // 更新人 | |||
| UpdatedTime sql.NullTime `db:"updated_time"` // 更新时间 | |||
| Id int64 `db:"id"` // id | |||
| AclHosts string `db:"aclHosts"` // 可用节点,多个节点用逗号隔开 | |||
| QueueId string `db:"queue_id"` // 队列名称 | |||
| Text string `db:"text"` // 队列名称 | |||
| QueueName string `db:"queue_name"` // 队列名称 | |||
| QueNodes string `db:"que_nodes"` // 队列节点总数 | |||
| QueMinNodect string `db:"que_min_nodect"` // 队列最小节点数 | |||
| QueMaxNgpus string `db:"que_max_ngpus"` // 队列最大GPU卡数 | |||
| QueMaxPpn string `db:"que_max_ppn"` // 使用该队列作业最大CPU核心数 | |||
| QueChargeRate string `db:"que_charge_rate"` // 费率 | |||
| QueMaxNcpus string `db:"que_max_ncpus"` // 用户最大可用核心数 | |||
| QueMaxNdcus string `db:"que_max_ndcus"` // 队列总DCU卡数 | |||
| QueMinNcpus string `db:"que_min_ncpus"` // 队列最小CPU核数 | |||
| QueFreeNodes string `db:"que_free_nodes"` // 队列空闲节点数 | |||
| QueMaxNodect string `db:"que_max_nodect"` // 队列作业最大节点数 | |||
| QueMaxGpuPN string `db:"que_max_gpu_PN"` // 队列单作业最大GPU卡数 | |||
| QueMaxWalltime string `db:"que_max_walltime"` // 队列最大运行时间 | |||
| QueMaxDcuPN string `db:"que_max_dcu_PN"` // 队列单作业最大DCU卡数 | |||
| ParticipantId int64 `db:"participant_id"` // 集群动态信息id | |||
| DeletedFlag int64 `db:"deleted_flag"` // 是否删除 | |||
| QueNcpus string `db:"que_ncpus"` | |||
| QueFreeNcpus string `db:"que_free_ncpus"` | |||
| } | |||
| ) | |||
| func newScQueuePhyInfoModel(conn sqlx.SqlConn, c cache.CacheConf, opts ...cache.Option) *defaultScQueuePhyInfoModel { | |||
| return &defaultScQueuePhyInfoModel{ | |||
| CachedConn: sqlc.NewConn(conn, c, opts...), | |||
| table: "`sc_queue_phy_info`", | |||
| } | |||
| } | |||
| func (m *defaultScQueuePhyInfoModel) withSession(session sqlx.Session) *defaultScQueuePhyInfoModel { | |||
| return &defaultScQueuePhyInfoModel{ | |||
| CachedConn: m.CachedConn.WithSession(session), | |||
| table: "`sc_queue_phy_info`", | |||
| } | |||
| } | |||
| func (m *defaultScQueuePhyInfoModel) Delete(ctx context.Context, id int64) error { | |||
| pcmScQueuePhyInfoIdKey := fmt.Sprintf("%s%v", cachePcmScQueuePhyInfoIdPrefix, id) | |||
| _, err := m.ExecCtx(ctx, func(ctx context.Context, conn sqlx.SqlConn) (result sql.Result, err error) { | |||
| query := fmt.Sprintf("delete from %s where `id` = ?", m.table) | |||
| return conn.ExecCtx(ctx, query, id) | |||
| }, pcmScQueuePhyInfoIdKey) | |||
| return err | |||
| } | |||
| func (m *defaultScQueuePhyInfoModel) FindOne(ctx context.Context, id int64) (*ScQueuePhyInfo, error) { | |||
| pcmScQueuePhyInfoIdKey := fmt.Sprintf("%s%v", cachePcmScQueuePhyInfoIdPrefix, id) | |||
| var resp ScQueuePhyInfo | |||
| err := m.QueryRowCtx(ctx, &resp, pcmScQueuePhyInfoIdKey, func(ctx context.Context, conn sqlx.SqlConn, v any) error { | |||
| query := fmt.Sprintf("select %s from %s where `id` = ? limit 1", scQueuePhyInfoRows, m.table) | |||
| return conn.QueryRowCtx(ctx, v, query, id) | |||
| }) | |||
| switch err { | |||
| case nil: | |||
| return &resp, nil | |||
| case sqlc.ErrNotFound: | |||
| return nil, ErrNotFound | |||
| default: | |||
| return nil, err | |||
| } | |||
| } | |||
| func (m *defaultScQueuePhyInfoModel) Insert(ctx context.Context, data *ScQueuePhyInfo) (sql.Result, error) { | |||
| pcmScQueuePhyInfoIdKey := fmt.Sprintf("%s%v", cachePcmScQueuePhyInfoIdPrefix, data.Id) | |||
| ret, err := m.ExecCtx(ctx, func(ctx context.Context, conn sqlx.SqlConn) (result sql.Result, err error) { | |||
| query := fmt.Sprintf("insert into %s (%s) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", m.table, scQueuePhyInfoRowsExpectAutoSet) | |||
| return conn.ExecCtx(ctx, query, data.Id, data.AclHosts, data.QueueId, data.Text, data.QueueName, data.QueNodes, data.QueMinNodect, data.QueMaxNgpus, data.QueMaxPpn, data.QueChargeRate, data.QueMaxNcpus, data.QueMaxNdcus, data.QueMinNcpus, data.QueFreeNodes, data.QueMaxNodect, data.QueMaxGpuPN, data.QueMaxWalltime, data.QueMaxDcuPN, data.ParticipantId, data.DeletedFlag, data.CreatedBy, data.CreatedTime, data.UpdatedBy, data.UpdatedTime) | |||
| }, pcmScQueuePhyInfoIdKey) | |||
| return ret, err | |||
| } | |||
| func (m *defaultScQueuePhyInfoModel) Update(ctx context.Context, data *ScQueuePhyInfo) error { | |||
| pcmScQueuePhyInfoIdKey := fmt.Sprintf("%s%v", cachePcmScQueuePhyInfoIdPrefix, data.Id) | |||
| _, err := m.ExecCtx(ctx, func(ctx context.Context, conn sqlx.SqlConn) (result sql.Result, err error) { | |||
| query := fmt.Sprintf("update %s set %s where `id` = ?", m.table, scQueuePhyInfoRowsWithPlaceHolder) | |||
| return conn.ExecCtx(ctx, query, data.AclHosts, data.QueueId, data.Text, data.QueueName, data.QueNodes, data.QueMinNodect, data.QueMaxNgpus, data.QueMaxPpn, data.QueChargeRate, data.QueMaxNcpus, data.QueMaxNdcus, data.QueMinNcpus, data.QueFreeNodes, data.QueMaxNodect, data.QueMaxGpuPN, data.QueMaxWalltime, data.QueMaxDcuPN, data.ParticipantId, data.DeletedFlag, data.CreatedBy, data.CreatedTime, data.UpdatedBy, data.UpdatedTime, data.Id) | |||
| }, pcmScQueuePhyInfoIdKey) | |||
| return err | |||
| } | |||
| func (m *defaultScQueuePhyInfoModel) formatPrimary(primary any) string { | |||
| return fmt.Sprintf("%s%v", cachePcmScQueuePhyInfoIdPrefix, primary) | |||
| } | |||
| func (m *defaultScQueuePhyInfoModel) queryPrimary(ctx context.Context, conn sqlx.SqlConn, v, primary any) error { | |||
| query := fmt.Sprintf("select %s from %s where `id` = ? limit 1", scQueuePhyInfoRows, m.table) | |||
| return conn.QueryRowCtx(ctx, v, query, primary) | |||
| } | |||
| func (m *defaultScQueuePhyInfoModel) tableName() string { | |||
| return m.table | |||
| } | |||
| @@ -26,6 +26,19 @@ type ( | |||
| CreatedBy int64 `db:"created_by"` // 创建人 | |||
| UpdatedBy int64 `db:"updated_by"` // 更新人 | |||
| DeletedFlag int64 `db:"deleted_flag"` // 是否删除(0-否,1-是) | |||
| KernelFeature string `db:"kernel_feature"` | |||
| HashMode string `db:"hash_mode"` | |||
| Rejected string `db:"rejected"` | |||
| Session string `db:"session"` | |||
| HashTarget string `db:"hash_target"` | |||
| Speed string `db:"speed"` | |||
| Candidates string `db:"candidates"` | |||
| RestorePoint string `db:"restore_point"` | |||
| Recovered string `db:"recovered"` | |||
| GuessQueue string `db:"guess_queue"` | |||
| CandidateEngine string `db:"candidate_engine"` | |||
| GuessMask string `db:"guess_mask"` | |||
| RestoreSub string `db:"restore_sub"` | |||
| } | |||
| ) | |||
| @@ -1,15 +1,22 @@ | |||
| FROM golang:1.20.2-alpine3.17 AS builder | |||
| WORKDIR /app | |||
| FROM golang:1.21.2-alpine3.18 AS builder | |||
| LABEL stage=gobuilder | |||
| ENV CGO_ENABLED 0 | |||
| ENV GOARCH amd64 | |||
| ENV GOPROXY https://goproxy.cn,direct | |||
| RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.sjtug.sjtu.edu.cn/g' /etc/apk/repositories && \ | |||
| apk update --no-cache && apk add --no-cache tzdata | |||
| WORKDIR /app | |||
| ADD go.mod . | |||
| ADD go.sum . | |||
| RUN go mod download | |||
| COPY . . | |||
| COPY rpc/etc/ /app/ | |||
| RUN go mod download && go build -o pcm-coordinator-rpc /app/rpc/pcmcore.go | |||
| RUN go build -o pcm-coordinator-rpc /app/rpc/pcmcore.go | |||
| FROM alpine:3.16.2 | |||
| @@ -1,15 +1,14 @@ | |||
| NacosConfig: | |||
| DataId: pcm-core-rpc.yaml | |||
| Group: DEFAULT_GROUP | |||
| ServerConfigs: | |||
| # - IpAddr: 127.0.0.1 | |||
| # Port: 8848 | |||
| - IpAddr: nacos.jcce.dev | |||
| Port: 8848 | |||
| ClientConfig: | |||
| NamespaceId: test | |||
| TimeoutMs: 5000 | |||
| NotLoadCacheAtStart: true | |||
| LogDir: | |||
| CacheDir: | |||
| LogLevel: info | |||
| Name: pcm.core.rpc | |||
| ListenOn: 0.0.0.0:2004 | |||
| Timeout: 15000 # 15s,设置rpc服务的响应的超时时间,若超过15s还未返回则结束请求 | |||
| DB: | |||
| DataSource: root:uJpLd6u-J?HC1@(10.206.0.7:3306)/pcm?parseTime=true&loc=Local | |||
| SnowflakeConf: | |||
| MachineId: 1 | |||
| RedisConf: | |||
| Host: 10.206.0.7:6379 | |||
| Pass: redisPW123 | |||
| @@ -1,79 +0,0 @@ | |||
| /* | |||
| Copyright (c) [2023] [pcm] | |||
| [pcm-coordinator] is licensed under Mulan PSL v2. | |||
| You can use this software according to the terms and conditions of the Mulan PSL v2. | |||
| You may obtain a copy of Mulan PSL v2 at: | |||
| http://license.coscl.org.cn/MulanPSL2 | |||
| THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, | |||
| EITHER EXPaRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, | |||
| MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. | |||
| See the Mulan PSL v2 for more details. | |||
| */ | |||
| package cron | |||
| import ( | |||
| "github.com/zeromicro/go-zero/core/logx" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/pkg/constants" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/pkg/models" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/rpc/internal/svc" | |||
| "gorm.io/gorm" | |||
| "strings" | |||
| ) | |||
| func InitCron(svc *svc.ServiceContext) { | |||
| svc.Cron.Start() | |||
| svc.Cron.AddFunc("*/5 * * * * ?", func() { | |||
| var tasks []models.Task | |||
| svc.DbEngin.Where("status not in ?", []string{constants.Deleted, constants.Succeeded, constants.Completed, constants.Failed}).Find(&tasks) | |||
| for _, task := range tasks { | |||
| var allStatus string | |||
| tx := svc.DbEngin.Raw("SELECT CONCAT_WS(',',GROUP_CONCAT(DISTINCT h.status) ,GROUP_CONCAT(DISTINCT a.status) ,GROUP_CONCAT(DISTINCT c.status))as status from task t left join hpc h on t.id = h.task_id left join cloud c on t.id = c.task_id left join ai a on t.id = a.task_id where t.id = ?", task.Id).Scan(&allStatus) | |||
| if tx.Error != nil { | |||
| logx.Error(tx.Error) | |||
| } | |||
| // 子状态统一则修改主任务状态 | |||
| statusArray := strings.Split(allStatus, ",") | |||
| if len(removeRepeatedElement(statusArray)) == 1 { | |||
| updateTask(svc.DbEngin, &task, statusArray[0]) | |||
| continue | |||
| } | |||
| // 子任务包含失败状态 主任务则失败 | |||
| if strings.Contains(allStatus, constants.Failed) { | |||
| updateTask(svc.DbEngin, &task, constants.Failed) | |||
| continue | |||
| } | |||
| if strings.Contains(allStatus, constants.Running) { | |||
| updateTask(svc.DbEngin, &task, constants.Running) | |||
| } | |||
| } | |||
| }) | |||
| } | |||
| func updateTask(dbEngin *gorm.DB, task *models.Task, status string) { | |||
| if task.Status != status { | |||
| task.Status = status | |||
| dbEngin.Updates(&task) | |||
| } | |||
| } | |||
| func removeRepeatedElement(arr []string) (newArr []string) { | |||
| newArr = make([]string, 0) | |||
| for i := 0; i < len(arr); i++ { | |||
| repeat := false | |||
| for j := i + 1; j < len(arr); j++ { | |||
| if arr[i] == arr[j] { | |||
| repeat = true | |||
| break | |||
| } | |||
| } | |||
| if !repeat { | |||
| newArr = append(newArr, arr[i]) | |||
| } | |||
| } | |||
| return | |||
| } | |||
| @@ -88,7 +88,6 @@ func (l *RegisterParticipantLogic) RegisterParticipant(in *pcmCore.ParticipantPh | |||
| for _, info := range in.QueueInfo { | |||
| queueInfo := &models2.ScQueuePhyInfo{} | |||
| utils.Convert(info, queueInfo) | |||
| queueInfo.CreatedTime = time.Now() | |||
| queueInfo.ParticipantId = participantInfo.Id | |||
| //查询队列name与ParticipantId是否存在 | |||
| queueErr := db.Where(&models2.ScQueuePhyInfo{QueueName: queueInfo.QueueName, ParticipantId: in.ParticipantId}).Take(queueInfo) | |||
| @@ -44,7 +44,11 @@ func (l *InfoListLogic) InfoList(in *pcmCore.InfoListReq) (*pcmCore.InfoListResp | |||
| result := pcmCore.InfoListResp{} | |||
| // 查询p端类型 | |||
| var kind string | |||
| l.svcCtx.DbEngin.Raw("select type as kind from sc_participant_phy_info where id = ?", in.ParticipantId).Scan(&kind) | |||
| tx := l.svcCtx.DbEngin.Raw("select type as kind from sc_participant_phy_info where id = ?", in.ParticipantId).Scan(&kind) | |||
| if tx.Error != nil { | |||
| logx.Error(tx.Error) | |||
| return nil, tx.Error | |||
| } | |||
| // 查询云智超中的数据列表 | |||
| switch kind { | |||
| case constants.HPC: | |||
| @@ -66,6 +70,7 @@ func (l *InfoListLogic) InfoList(in *pcmCore.InfoListReq) (*pcmCore.InfoListResp | |||
| func findModelList(participantId int64, dbEngin *gorm.DB, data interface{}) error { | |||
| tx := dbEngin.Where("participant_id = ? AND status NOT IN ?", participantId, []string{"Deleted", "Succeeded", "Completed", "Failed"}).Find(data) | |||
| if tx.Error != nil { | |||
| logx.Error(tx.Error) | |||
| return tx.Error | |||
| } | |||
| return nil | |||
| @@ -17,8 +17,11 @@ package pcmcorelogic | |||
| import ( | |||
| "context" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/pkg/constants" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/pkg/models" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/rpc/internal/svc" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/rpc/pcmCore" | |||
| "gorm.io/gorm" | |||
| "strings" | |||
| "time" | |||
| "github.com/zeromicro/go-zero/core/logx" | |||
| @@ -54,20 +57,79 @@ func (l *SyncInfoLogic) SyncInfo(in *pcmCore.SyncInfoReq) (*pcmCore.SyncInfoResp | |||
| switch kind { | |||
| case constants.CLOUD: | |||
| for _, cloudInfo := range in.CloudInfoList { | |||
| l.svcCtx.DbEngin.Exec("update cloud set status = ?,start_time = ?,running_time = ?,result = ? where participant_id = ? and task_id = ? and namespace = ? and name = ?", | |||
| cloudInfo.Status, cloudInfo.StartTime, cloudInfo.RunningTime, cloudInfo.Result, in.ParticipantId, cloudInfo.TaskId, cloudInfo.Namespace, cloudInfo.Name) | |||
| tx := l.svcCtx.DbEngin.Exec("update cloud set status = ?,start_time = ?,result = ? where participant_id = ? and id = ?", | |||
| cloudInfo.Status, cloudInfo.StartTime, cloudInfo.Result, in.ParticipantId, cloudInfo.Id) | |||
| if tx.Error != nil { | |||
| logx.Error(tx.Error) | |||
| } | |||
| syncTask(l.svcCtx.DbEngin, cloudInfo.TaskId) | |||
| } | |||
| case constants.HPC: | |||
| for _, hpcInfo := range in.HpcInfoList { | |||
| l.svcCtx.DbEngin.Exec("update hpc set status = ?,start_time = ?,running_time = ?,job_id = ? where participant_id = ? and task_id = ? and name = ?", | |||
| tx := l.svcCtx.DbEngin.Exec("update hpc set status = ?,start_time = ?,job_id = ? where participant_id = ? and task_id = ? and name = ?", | |||
| hpcInfo.Status, hpcInfo.StartTime, hpcInfo.RunningTime, hpcInfo.JobId, in.ParticipantId, hpcInfo.TaskId, hpcInfo.Name) | |||
| if tx.Error != nil { | |||
| logx.Error(tx.Error) | |||
| } | |||
| syncTask(l.svcCtx.DbEngin, hpcInfo.TaskId) | |||
| } | |||
| case constants.AI: | |||
| for _, aiInfo := range in.AiInfoList { | |||
| l.svcCtx.DbEngin.Exec("update ai set status = ?,start_time = ?,running_time = ?,project_id = ?,job_id = ? where participant_id = ? and task_id = ? and name = ?", | |||
| aiInfo.Status, aiInfo.StartTime, aiInfo.RunningTime, aiInfo.ProjectId, aiInfo.JobId, in.ParticipantId, aiInfo.TaskId, aiInfo.Name) | |||
| l.svcCtx.DbEngin.Exec("update ai set status = ?,start_time = ?,project_id = ?,job_id = ? where participant_id = ? and task_id = ? and name = ?", | |||
| aiInfo.Status, aiInfo.StartTime, aiInfo.ProjectId, aiInfo.JobId, in.ParticipantId, aiInfo.TaskId, aiInfo.Name) | |||
| syncTask(l.svcCtx.DbEngin, aiInfo.TaskId) | |||
| } | |||
| } | |||
| return &pcmCore.SyncInfoResp{}, nil | |||
| } | |||
| func syncTask(gorm *gorm.DB, taskId int64) { | |||
| var allStatus string | |||
| tx := gorm.Raw("SELECT CONCAT_WS(',',GROUP_CONCAT(DISTINCT h.status) ,GROUP_CONCAT(DISTINCT a.status) ,GROUP_CONCAT(DISTINCT c.status))as status from task t left join hpc h on t.id = h.task_id left join cloud c on t.id = c.task_id left join ai a on t.id = a.task_id where t.id = ?", taskId).Scan(&allStatus) | |||
| if tx.Error != nil { | |||
| logx.Error(tx.Error) | |||
| } | |||
| // 子状态统一则修改主任务状态 | |||
| statusArray := strings.Split(allStatus, ",") | |||
| if len(removeRepeatedElement(statusArray)) == 1 { | |||
| updateTask(gorm, taskId, statusArray[0]) | |||
| } | |||
| // 子任务包含失败状态 主任务则失败 | |||
| if strings.Contains(allStatus, constants.Failed) { | |||
| updateTask(gorm, taskId, constants.Failed) | |||
| } | |||
| if strings.Contains(allStatus, constants.Running) { | |||
| updateTask(gorm, taskId, constants.Running) | |||
| } | |||
| } | |||
| func updateTask(gorm *gorm.DB, taskId int64, status string) { | |||
| var task models.Task | |||
| gorm.Where("id = ? ", taskId).Find(&task) | |||
| if task.Status != status { | |||
| task.Status = status | |||
| gorm.Updates(&task) | |||
| } | |||
| } | |||
| func removeRepeatedElement(arr []string) (newArr []string) { | |||
| newArr = make([]string, 0) | |||
| for i := 0; i < len(arr); i++ { | |||
| repeat := false | |||
| for j := i + 1; j < len(arr); j++ { | |||
| if arr[i] == arr[j] { | |||
| repeat = true | |||
| break | |||
| } | |||
| } | |||
| if !repeat { | |||
| newArr = append(newArr, arr[i]) | |||
| } | |||
| } | |||
| return | |||
| } | |||
| @@ -36,14 +36,18 @@ type ServiceContext struct { | |||
| func NewServiceContext(c config.Config) *ServiceContext { | |||
| //启动Gorm支持 | |||
| dbEngin, _ := gorm.Open(mysql.Open(c.DB.DataSource), &gorm.Config{ | |||
| dbEngin, err := gorm.Open(mysql.Open(c.DB.DataSource), &gorm.Config{ | |||
| NamingStrategy: schema.NamingStrategy{ | |||
| SingularTable: true, // 使用单数表名,启用该选项,此时,`User` 的表名应该是 `t_user` | |||
| }, | |||
| Logger: logger.Default.LogMode(logger.Warn), | |||
| }) | |||
| if err != nil { | |||
| logx.Error("gorm初始化错误:", err.Error()) | |||
| panic(err) | |||
| } | |||
| //添加snowflake支持 | |||
| err := utils.InitSnowflake(c.SnowflakeConf.MachineId) | |||
| err = utils.InitSnowflake(c.SnowflakeConf.MachineId) | |||
| if err != nil { | |||
| logx.Errorf("InitSnowflake err: ", err) | |||
| panic("InitSnowflake err") | |||
| @@ -42,18 +42,19 @@ message CloudInfo { | |||
| int64 runningTime = 9; | |||
| string result = 10; | |||
| string yamlString = 11; | |||
| int64 id = 12; | |||
| } | |||
| message VmInfo { | |||
| int64 participantId = 1; | |||
| int64 taskId = 2; | |||
| string name = 3; | |||
| string flavor_ref =4; | |||
| string image_ref =5; | |||
| string network_uuid=6; | |||
| string block_uuid=7; | |||
| string source_type=8; | |||
| bool delete_on_termination=9; | |||
| string flavor_ref = 4; | |||
| string image_ref = 5; | |||
| string network_uuid = 6; | |||
| string block_uuid = 7; | |||
| string source_type = 8; | |||
| bool delete_on_termination = 9; | |||
| string state = 10; | |||
| } | |||
| @@ -16,10 +16,6 @@ spec: | |||
| labels: | |||
| k8s-app: pcm-coordinator-rpc | |||
| spec: | |||
| hostAliases: | |||
| - hostnames: | |||
| - nacos.jcce.dev | |||
| ip: nacos_host | |||
| imagePullSecrets: | |||
| - name: secret_name | |||
| containers: | |||
| @@ -323,6 +323,7 @@ type CloudInfo struct { | |||
| RunningTime int64 `protobuf:"varint,9,opt,name=runningTime,proto3" json:"runningTime,omitempty"` | |||
| Result string `protobuf:"bytes,10,opt,name=result,proto3" json:"result,omitempty"` | |||
| YamlString string `protobuf:"bytes,11,opt,name=yamlString,proto3" json:"yamlString,omitempty"` | |||
| Id int64 `protobuf:"varint,12,opt,name=id,proto3" json:"id,omitempty"` | |||
| } | |||
| func (x *CloudInfo) Reset() { | |||
| @@ -434,6 +435,13 @@ func (x *CloudInfo) GetYamlString() string { | |||
| return "" | |||
| } | |||
| func (x *CloudInfo) GetId() int64 { | |||
| if x != nil { | |||
| return x.Id | |||
| } | |||
| return 0 | |||
| } | |||
| type VmInfo struct { | |||
| state protoimpl.MessageState | |||
| sizeCache protoimpl.SizeCache | |||
| @@ -2542,7 +2550,7 @@ var file_pcmCore_proto_rawDesc = []byte{ | |||
| 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x69, 0x74, | |||
| 0x65, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, | |||
| 0x09, 0x52, 0x0d, 0x69, 0x74, 0x65, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, | |||
| 0x22, 0xbb, 0x02, 0x0a, 0x09, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x20, | |||
| 0x22, 0xcb, 0x02, 0x0a, 0x09, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x20, | |||
| 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x18, 0x01, 0x20, | |||
| 0x01, 0x28, 0x03, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, | |||
| 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, | |||
| @@ -2561,7 +2569,8 @@ var file_pcmCore_proto_rawDesc = []byte{ | |||
| 0x6e, 0x67, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, | |||
| 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x1e, | |||
| 0x0a, 0x0a, 0x79, 0x61, 0x6d, 0x6c, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x0b, 0x20, 0x01, | |||
| 0x28, 0x09, 0x52, 0x0a, 0x79, 0x61, 0x6d, 0x6c, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x22, 0xc3, | |||
| 0x28, 0x09, 0x52, 0x0a, 0x79, 0x61, 0x6d, 0x6c, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x0e, | |||
| 0x0a, 0x02, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x22, 0xc3, | |||
| 0x02, 0x0a, 0x06, 0x56, 0x6d, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x24, 0x0a, 0x0d, 0x70, 0x61, 0x72, | |||
| 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, | |||
| 0x52, 0x0d, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, | |||
| @@ -21,13 +21,11 @@ import ( | |||
| "github.com/zeromicro/go-zero/core/service" | |||
| "github.com/zeromicro/go-zero/zrpc" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/rpc/internal/config" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/rpc/internal/cron" | |||
| participantserviceServer "gitlink.org.cn/jcce-pcm/pcm-coordinator/rpc/internal/server/participantservice" | |||
| pcmcoreServer "gitlink.org.cn/jcce-pcm/pcm-coordinator/rpc/internal/server/pcmcore" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/rpc/internal/svc" | |||
| "gitlink.org.cn/jcce-pcm/pcm-coordinator/rpc/pcmCore" | |||
| "gitlink.org.cn/jcce-pcm/utils/interceptor/rpcserver" | |||
| commonConfig "gitlink.org.cn/jcce-pcm/utils/nacos" | |||
| "google.golang.org/grpc" | |||
| "google.golang.org/grpc/reflection" | |||
| ) | |||
| @@ -38,28 +36,8 @@ func main() { | |||
| flag.Parse() | |||
| var bootstrapConfig commonConfig.BootstrapConfig | |||
| conf.MustLoad(*configFile, &bootstrapConfig) | |||
| //解析业务配置 | |||
| var c config.Config | |||
| nacosConfig := bootstrapConfig.NacosConfig | |||
| serviceConfigContent := nacosConfig.InitConfig(func(data string) { | |||
| err := conf.LoadFromYamlBytes([]byte(data), &c) | |||
| if err != nil { | |||
| panic(err) | |||
| } | |||
| }) | |||
| err := conf.LoadFromYamlBytes([]byte(serviceConfigContent), &c) | |||
| if err != nil { | |||
| panic(err) | |||
| } | |||
| // start log component | |||
| logx.MustSetup(c.LogConf) | |||
| // 注册到nacos | |||
| nacosConfig.Discovery(&c.RpcServerConf) | |||
| conf.MustLoad(*configFile, &c) | |||
| ctx := svc.NewServiceContext(c) | |||
| s := zrpc.MustNewServer(c.RpcServerConf, func(grpcServer *grpc.Server) { | |||
| @@ -74,8 +52,6 @@ func main() { | |||
| s.AddUnaryInterceptors(rpcserver.LoggerInterceptor) | |||
| defer s.Stop() | |||
| // 初始化定时任务 | |||
| cron.InitCron(ctx) | |||
| logx.Infof("Starting rpc server at %s...\n", c.ListenOn) | |||
| s.Start() | |||
| } | |||