You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

harvester.go 19 kB

3 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595
  1. package service
  2. import (
  3. pbecs "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_vm/gen/idl"
  4. "code.gitlink.org.cn/JCCE/PCM.git/common/tenanter"
  5. pbtenant "code.gitlink.org.cn/JCCE/PCM.git/tenant/gen/idl"
  6. "context"
  7. "fmt"
  8. "github.com/harvester/harvester/pkg/apis/harvesterhci.io/v1beta1"
  9. harvClient "github.com/harvester/harvester/pkg/generated/clientset/versioned"
  10. "github.com/longhorn/longhorn-manager/util"
  11. "github.com/pkg/errors"
  12. "github.com/sirupsen/logrus"
  13. v1 "k8s.io/api/core/v1"
  14. "k8s.io/apimachinery/pkg/api/resource"
  15. k8smetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  16. "k8s.io/apimachinery/pkg/types"
  17. k8s "k8s.io/client-go/kubernetes"
  18. kubirtv1 "kubevirt.io/client-go/api/v1"
  19. "strconv"
  20. "strings"
  21. "time"
  22. )
  23. const (
  24. prefix = "harvesterhci.io"
  25. vmAnnotationPVC = prefix + "/volumeClaimTemplates"
  26. vmAnnotationNetworkIps = "network.harvesterhci.io/ips"
  27. defaultCloudInitUserData = "#cloud-config\npackage_update: true\npackages:\n - qemu-guest-agent\nruncmd:\n - - systemctl\n - enable\n - '--now'\n - qemu-guest-agent\n"
  28. )
  29. type Config struct {
  30. Host string
  31. Token string
  32. Port int
  33. }
  34. type HarVMer struct {
  35. k8sCli *k8s.Clientset
  36. harvCli *harvClient.Clientset
  37. region tenanter.Region
  38. tenanter tenanter.Tenanter
  39. }
  40. func newHarvesterClient(tenant tenanter.Tenanter) (Ecser, error) {
  41. var (
  42. k8sclient *k8s.Clientset
  43. harvesterClient *harvClient.Clientset
  44. err error
  45. )
  46. switch t := tenant.(type) {
  47. case *tenanter.AccessKeyTenant:
  48. k8sclient, err = GetKubernetesClient(t.GetUrl(), t.GetToken())
  49. if err != nil {
  50. return nil, err
  51. }
  52. harvesterClient, err = GetHarvesterClient(t.GetUrl(), t.GetToken())
  53. if err != nil {
  54. return nil, err
  55. }
  56. default:
  57. }
  58. if err != nil {
  59. return nil, errors.Wrap(err, "init harvester client error")
  60. }
  61. return &HarVMer{
  62. k8sCli: k8sclient,
  63. harvCli: harvesterClient,
  64. region: nil,
  65. tenanter: tenant,
  66. }, nil
  67. }
  68. func (h *HarVMer) CreateEcs(ctx context.Context, req *pbecs.CreateEcsReq) (resp *pbecs.CreateEcsResp, err error) {
  69. var (
  70. vmTemplate *kubirtv1.VirtualMachineInstanceTemplateSpec
  71. vmImage *v1beta1.VirtualMachineImage
  72. )
  73. if req.ImageId != "" {
  74. vmImage, err = h.harvCli.HarvesterhciV1beta1().VirtualMachineImages(req.GetNamespace()).Get(context.TODO(), req.ImageId, k8smetav1.GetOptions{})
  75. if err != nil {
  76. return nil, errors.Wrap(err, "get pcm_vm image error")
  77. }
  78. } else {
  79. return nil, errors.Wrap(err, "Image ID given does not exist!")
  80. }
  81. storageClassName := vmImage.Status.StorageClassName
  82. vmNameBase := req.InstanceName
  83. vmLabels := map[string]string{
  84. prefix + "/creator": "harvester",
  85. }
  86. vmiLabels := vmLabels
  87. _amount := req.Amount
  88. if _amount == 0 {
  89. return nil, fmt.Errorf("VM count provided is 0, no VM will be created")
  90. }
  91. repAmount := 0
  92. InstanceIds := make([]string, 0)
  93. for i := 1; i <= int(_amount); i++ {
  94. var (
  95. vmName string
  96. secretRandomID string
  97. )
  98. randomID := util.RandomID()
  99. if _amount > 1 {
  100. vmName = vmNameBase + "-" + fmt.Sprint(i)
  101. secretRandomID = vmNameBase + "-" + randomID
  102. } else {
  103. vmName = vmNameBase
  104. secretRandomID = vmNameBase + "-" + randomID
  105. }
  106. vmiLabels[prefix+"/vmName"] = vmName
  107. vmiLabels[prefix+"/vmNamePrefix"] = vmNameBase
  108. diskRandomID := util.RandomID()
  109. pvcName := vmName + "-disk-0-" + diskRandomID
  110. pvcAnnotation := "[{\"metadata\":{\"name\":\"" + pvcName + "\",\"annotations\":{\"harvesterhci.io/imageId\":\"" + req.GetNamespace() + "/" + req.GetImageId() + "\"}},\"spec\":{\"accessModes\":[\"ReadWriteMany\"],\"resources\":{\"requests\":{\"storage\":\"" + req.GetDiskSize() + "\"}},\"volumeMode\":\"Block\",\"storageClassName\":\"" + storageClassName + "\"}}]"
  111. vmTemplate, err = buildVMTemplate(int(req.GetCpu()), req.GetMemory(), req.GetSshKey(), h.harvCli, pvcName, vmiLabels, vmNameBase, secretRandomID)
  112. if err != nil {
  113. return nil, errors.Wrap(err, "")
  114. }
  115. vm := &kubirtv1.VirtualMachine{
  116. ObjectMeta: k8smetav1.ObjectMeta{
  117. Name: vmName,
  118. Namespace: req.GetNamespace(),
  119. Annotations: map[string]string{
  120. vmAnnotationPVC: pvcAnnotation,
  121. vmAnnotationNetworkIps: "[]",
  122. },
  123. Labels: vmLabels,
  124. },
  125. Spec: kubirtv1.VirtualMachineSpec{
  126. Running: NewTrue(),
  127. Template: vmTemplate,
  128. },
  129. }
  130. resp, err1 := h.harvCli.KubevirtV1().VirtualMachines(req.GetNamespace()).Create(context.TODO(), vm, k8smetav1.CreateOptions{})
  131. if err1 != nil {
  132. return nil, errors.Wrap(err, "VM create failed")
  133. }
  134. var sshKey *v1beta1.KeyPair
  135. cloudInitSSHSection := ""
  136. if req.GetSshKey() != "" {
  137. sshArr := strings.Split(req.GetSshKey(), "/")
  138. if len(sshArr) != 2 {
  139. return nil, errors.New("sshKeyName should be in format namespace/name")
  140. }
  141. sshKey, err = h.harvCli.HarvesterhciV1beta1().KeyPairs(sshArr[0]).Get(context.TODO(), sshArr[1], k8smetav1.GetOptions{})
  142. if err != nil {
  143. return nil, errors.Wrap(err, "error during getting keypair from Harvester")
  144. }
  145. cloudInitSSHSection = "\nssh_authorized_keys:\n - >-\n" + sshKey.Spec.PublicKey + "\n"
  146. logrus.Debugf("SSH Key Name %s given does exist!", req.GetSshKey())
  147. }
  148. if req.UserDataTemplate == "" {
  149. req.UserDataTemplate = defaultCloudInitUserData
  150. }
  151. // Create the secret for the VM
  152. if _, secreterr := createCloudInitDataFromSecret(h.k8sCli, vmName, resp.ObjectMeta.UID, secretRandomID, req.Namespace, req.UserDataTemplate+cloudInitSSHSection, req.NetworkDataTemplate); secreterr != nil {
  153. logrus.Errorf("Create secret failed, %s", secreterr)
  154. return nil, errors.Wrap(secreterr, "Create cloud init data from secret failed")
  155. }
  156. InstanceIds = append(InstanceIds, string(resp.UID))
  157. repAmount++
  158. }
  159. isFinished := false
  160. if int32(repAmount) == req.Amount {
  161. isFinished = true
  162. }
  163. return &pbecs.CreateEcsResp{
  164. Provider: pbecs.CloudProvider_harvester,
  165. AccountName: h.tenanter.AccountName(),
  166. InstanceIdSets: InstanceIds,
  167. Finished: isFinished,
  168. }, nil
  169. }
  170. // buildVMTemplate creates a *kubirtv1.VirtualMachineInstanceTemplateSpec from the CLI Flags and some computed values
  171. func buildVMTemplate(vCpu int, memory, sshKeyName string, c *harvClient.Clientset,
  172. pvcName string, vmiLabels map[string]string, vmName string, secretName string) (vmTemplate *kubirtv1.VirtualMachineInstanceTemplateSpec, err error) {
  173. vmTemplate = nil
  174. _memory := resource.MustParse(memory)
  175. if sshKeyName != "" {
  176. sshArr := strings.Split(sshKeyName, "/")
  177. if len(sshArr) != 2 {
  178. return nil, errors.New("sshKeyName should be in format namespace/name")
  179. }
  180. _, keyerr := c.HarvesterhciV1beta1().KeyPairs(sshArr[0]).Get(context.TODO(), sshArr[1], k8smetav1.GetOptions{})
  181. if keyerr != nil {
  182. return nil, errors.Wrap(keyerr, "error during getting keypair from Harvester")
  183. }
  184. logrus.Debugf("SSH Key Name %s given does exist!", sshKeyName)
  185. }
  186. logrus.Debug("CloudInit: ")
  187. vmTemplate = &kubirtv1.VirtualMachineInstanceTemplateSpec{
  188. ObjectMeta: k8smetav1.ObjectMeta{
  189. Annotations: vmiAnnotations(pvcName, sshKeyName),
  190. Labels: vmiLabels,
  191. },
  192. Spec: kubirtv1.VirtualMachineInstanceSpec{
  193. Hostname: vmName,
  194. Networks: []kubirtv1.Network{
  195. {
  196. Name: "default",
  197. NetworkSource: kubirtv1.NetworkSource{
  198. Multus: &kubirtv1.MultusNetwork{
  199. NetworkName: "default/service-network",
  200. },
  201. },
  202. },
  203. },
  204. Volumes: []kubirtv1.Volume{
  205. {
  206. Name: "disk-0",
  207. VolumeSource: kubirtv1.VolumeSource{
  208. PersistentVolumeClaim: &kubirtv1.PersistentVolumeClaimVolumeSource{
  209. PersistentVolumeClaimVolumeSource: v1.PersistentVolumeClaimVolumeSource{
  210. ClaimName: pvcName,
  211. },
  212. },
  213. },
  214. },
  215. {
  216. Name: "cloudinitdisk",
  217. VolumeSource: kubirtv1.VolumeSource{
  218. CloudInitNoCloud: &kubirtv1.CloudInitNoCloudSource{
  219. UserDataSecretRef: &v1.LocalObjectReference{Name: secretName},
  220. NetworkDataSecretRef: &v1.LocalObjectReference{Name: secretName},
  221. },
  222. },
  223. },
  224. },
  225. Domain: kubirtv1.DomainSpec{
  226. CPU: &kubirtv1.CPU{
  227. Cores: uint32(vCpu),
  228. Sockets: uint32(1),
  229. Threads: uint32(1),
  230. },
  231. Memory: &kubirtv1.Memory{
  232. Guest: &_memory,
  233. },
  234. Devices: kubirtv1.Devices{
  235. Inputs: []kubirtv1.Input{
  236. {
  237. Bus: "usb",
  238. Type: "tablet",
  239. Name: "tablet",
  240. },
  241. },
  242. Interfaces: []kubirtv1.Interface{
  243. {
  244. Name: "default",
  245. Model: "virtio",
  246. InterfaceBindingMethod: kubirtv1.DefaultBridgeNetworkInterface().InterfaceBindingMethod,
  247. },
  248. },
  249. Disks: []kubirtv1.Disk{
  250. {
  251. BootOrder: PointerToUint(1),
  252. Name: "disk-0",
  253. DiskDevice: kubirtv1.DiskDevice{
  254. Disk: &kubirtv1.DiskTarget{
  255. Bus: "virtio",
  256. },
  257. },
  258. },
  259. {
  260. Name: "cloudinitdisk",
  261. DiskDevice: kubirtv1.DiskDevice{
  262. Disk: &kubirtv1.DiskTarget{
  263. Bus: "virtio",
  264. },
  265. },
  266. },
  267. },
  268. },
  269. Resources: kubirtv1.ResourceRequirements{
  270. Limits: v1.ResourceList{
  271. "cpu": resource.MustParse(strconv.Itoa(vCpu)),
  272. "memory": resource.MustParse(memory),
  273. },
  274. Requests: v1.ResourceList{
  275. "memory": resource.MustParse(memory),
  276. },
  277. },
  278. },
  279. Affinity: &v1.Affinity{
  280. PodAntiAffinity: &v1.PodAntiAffinity{
  281. PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{
  282. {
  283. Weight: int32(1),
  284. PodAffinityTerm: v1.PodAffinityTerm{
  285. TopologyKey: "kubernetes.io/hostname",
  286. LabelSelector: &k8smetav1.LabelSelector{
  287. MatchLabels: map[string]string{
  288. prefix + "/vmNamePrefix": vmName,
  289. },
  290. },
  291. },
  292. },
  293. },
  294. },
  295. },
  296. },
  297. }
  298. return
  299. }
  300. // vmiAnnotations generates a map of strings to be injected as annotations from a PVC name and an SSK Keyname
  301. func vmiAnnotations(pvcName string, sshKeyName string) map[string]string {
  302. sshKey := "[]"
  303. if sshKeyName != "" {
  304. sshKey = "[\"" + sshKeyName + "\"]"
  305. }
  306. return map[string]string{
  307. prefix + "/diskNames": "[\"" + pvcName + "\"]",
  308. prefix + "/sshNames": sshKey,
  309. }
  310. }
  311. // CreateCloudInitDataFromSecret creates a cloud-init configmap from a secret
  312. func createCloudInitDataFromSecret(c *k8s.Clientset, vmName string, uid types.UID, secretName, namespace, userData, networkData string) (secret *v1.Secret, err error) {
  313. toCreate := &v1.Secret{
  314. TypeMeta: k8smetav1.TypeMeta{
  315. Kind: "Secret",
  316. APIVersion: "v1",
  317. },
  318. ObjectMeta: k8smetav1.ObjectMeta{
  319. Name: secretName,
  320. Namespace: namespace,
  321. Labels: map[string]string{
  322. prefix + "/cloud-init-template": "harvester",
  323. },
  324. OwnerReferences: []k8smetav1.OwnerReference{
  325. {
  326. APIVersion: "kubevirt.io/v1",
  327. Kind: "VirtualMachine",
  328. Name: vmName,
  329. UID: uid,
  330. },
  331. },
  332. },
  333. Type: "secret",
  334. Data: map[string][]byte{
  335. "userdata": []byte(userData),
  336. "networkdata": []byte(networkData),
  337. },
  338. }
  339. resp, err := c.CoreV1().Secrets(namespace).Create(context.TODO(), toCreate, k8smetav1.CreateOptions{})
  340. if err != nil {
  341. return nil, errors.Wrap(err, "error during getting cloud-init secret")
  342. }
  343. return resp, nil
  344. }
  345. func (h *HarVMer) DeleteEcs(ctx context.Context, req *pbecs.DeleteEcsReq) (resp *pbecs.DeleteEcsResp, err error) {
  346. if req.Namespace == "" {
  347. return nil, errors.New("namespace is required")
  348. }
  349. vm, err := h.harvCli.KubevirtV1().VirtualMachines(req.GetNamespace()).Get(context.TODO(), req.GetInstanceName(), k8smetav1.GetOptions{})
  350. if err != nil {
  351. return nil, errors.Wrap(err, "VM does not exist")
  352. }
  353. err = h.harvCli.KubevirtV1().VirtualMachines(req.GetNamespace()).Delete(context.TODO(), req.GetInstanceName(), k8smetav1.DeleteOptions{})
  354. if err != nil {
  355. logrus.Errorf("delete pcm_vm error: %v", err)
  356. return nil, errors.Wrap(err, "VM could not be deleted successfully: %w")
  357. }
  358. //delete pcm_vm disk
  359. if req.DiskName != "" {
  360. for _, delName := range strings.Split(req.DiskName, ",") {
  361. for _, disk := range vm.Spec.Template.Spec.Volumes {
  362. if disk.Name == delName {
  363. ClaimName := disk.VolumeSource.PersistentVolumeClaim.ClaimName
  364. err1 := h.k8sCli.CoreV1().PersistentVolumeClaims(req.GetNamespace()).Delete(context.TODO(), ClaimName, k8smetav1.DeleteOptions{})
  365. if err1 != nil {
  366. logrus.Errorf("delete pvc failed,err:%v", err1)
  367. return nil, errors.Wrap(err, "VM disk not be deleted successfully")
  368. }
  369. }
  370. }
  371. }
  372. }
  373. return &pbecs.DeleteEcsResp{
  374. Provider: pbecs.CloudProvider_harvester,
  375. AccountName: h.tenanter.AccountName(),
  376. }, nil
  377. }
  378. func (h *HarVMer) UpdateEcs(ctx context.Context, req *pbecs.UpdateEcsReq) (resp *pbecs.UpdateEcsResp, err error) {
  379. //查询删除的vm
  380. vm, err := h.harvCli.KubevirtV1().VirtualMachines(req.GetNamespace()).Get(context.TODO(), req.GetInstanceName(), k8smetav1.GetOptions{})
  381. if err != nil {
  382. return nil, errors.Wrap(err, "VM does not exist")
  383. }
  384. if req.Cpu != "" && req.Memory != "" {
  385. vm.Spec.Template.Spec.Domain.Resources = kubirtv1.ResourceRequirements{
  386. Limits: v1.ResourceList{
  387. "cpu": resource.MustParse(req.Cpu),
  388. "memory": resource.MustParse(req.Memory),
  389. },
  390. }
  391. }
  392. vm.ObjectMeta.Annotations["field.cattle.io/description"] = req.Description
  393. if req.Cpu != "" {
  394. j, err := strconv.ParseUint(req.Cpu, 10, 32)
  395. if err != nil {
  396. return nil, errors.Wrap(err, "cpu is not a number")
  397. }
  398. vm.Spec.Template.Spec.Domain.CPU = &kubirtv1.CPU{
  399. Cores: uint32(j),
  400. Sockets: uint32(1),
  401. Threads: uint32(1),
  402. }
  403. }
  404. if req.Memory != "" {
  405. _memory := resource.MustParse(req.Memory)
  406. vm.Spec.Template.Spec.Domain.Memory = &kubirtv1.Memory{
  407. Guest: &_memory,
  408. }
  409. }
  410. if err != nil {
  411. return nil, errors.Wrap(err, "Harvester client connection failed")
  412. }
  413. //update
  414. _, err = h.harvCli.KubevirtV1().VirtualMachines(req.GetNamespace()).Update(context.TODO(), vm, k8smetav1.UpdateOptions{})
  415. if err != nil {
  416. return nil, errors.Wrap(err, "VM update failed")
  417. }
  418. if req.IsRestart {
  419. //重启
  420. err = restartVmByName(h.harvCli, req.GetNamespace(), req.GetInstanceName())
  421. if err != nil {
  422. return nil, errors.Wrap(err, "VM restart failed")
  423. }
  424. }
  425. return &pbecs.UpdateEcsResp{
  426. Provider: pbecs.CloudProvider_harvester,
  427. AccountName: h.tenanter.AccountName(),
  428. }, nil
  429. }
  430. func (h *HarVMer) ListDetail(ctx context.Context, req *pbecs.ListDetailReq) (resp *pbecs.ListDetailResp, err error) {
  431. vmList, err := h.harvCli.KubevirtV1().VirtualMachines(req.GetNamespace()).List(context.TODO(), k8smetav1.ListOptions{})
  432. if err != nil {
  433. return nil, errors.Wrap(err, "VM list failed")
  434. }
  435. vmiList, err := h.harvCli.KubevirtV1().VirtualMachineInstances(req.GetNamespace()).List(context.TODO(), k8smetav1.ListOptions{})
  436. if err != nil {
  437. return nil, errors.Wrap(err, "VMI list failed")
  438. }
  439. vmiMap := map[string]kubirtv1.VirtualMachineInstance{}
  440. for _, vmi := range vmiList.Items {
  441. vmiMap[vmi.Name] = vmi
  442. }
  443. var ecses = make([]*pbecs.EcsInstance, len(vmList.Items))
  444. for k, vm := range vmList.Items {
  445. running := *vm.Spec.Running
  446. var state string
  447. if running {
  448. state = "Running"
  449. } else {
  450. state = "Not Running"
  451. }
  452. IP := make([]string, 0)
  453. if vmiMap[vm.Name].Status.Interfaces == nil {
  454. IP = append(IP, "")
  455. } else {
  456. IP = append(IP, vmiMap[vm.Name].Status.Interfaces[0].IP)
  457. }
  458. ecses[k] = &pbecs.EcsInstance{
  459. Provider: pbecs.CloudProvider_harvester,
  460. AccountName: h.tenanter.AccountName(),
  461. Status: state,
  462. InstanceName: vm.Name,
  463. Node: vmiMap[vm.Name].Status.NodeName,
  464. Cpu: vm.Spec.Template.Spec.Domain.Resources.Limits.Cpu().String(),
  465. Memory: vm.Spec.Template.Spec.Domain.Resources.Limits.Memory().String(),
  466. PublicIps: IP,
  467. CreationTime: vm.CreationTimestamp.String(),
  468. Description: vm.ObjectMeta.Annotations["field.cattle.io/description"],
  469. Namespace: vm.Namespace,
  470. }
  471. }
  472. isFinished := false
  473. if len(ecses) > 0 {
  474. isFinished = true
  475. }
  476. return &pbecs.ListDetailResp{
  477. Ecses: ecses,
  478. Finished: isFinished,
  479. }, nil
  480. }
  481. func (h *HarVMer) ActionEcs(ctx context.Context, req *pbecs.ActionReq) (resp *pbecs.ActionResp, err error) {
  482. status := ""
  483. switch req.GetActionType() {
  484. case pbecs.ActionType_start:
  485. err := startVmByName(h.harvCli, req.GetNamespace(), req.GetVmName())
  486. if err != nil {
  487. return nil, err
  488. }
  489. status = "Running"
  490. case pbecs.ActionType_stop:
  491. err := stopVmByName(h.harvCli, req.GetNamespace(), req.GetVmName())
  492. if err != nil {
  493. return nil, err
  494. }
  495. status = "Off"
  496. case pbecs.ActionType_restart:
  497. err := restartVmByName(h.harvCli, req.GetNamespace(), req.GetVmName())
  498. if err != nil {
  499. return nil, err
  500. }
  501. status = "Running"
  502. }
  503. return &pbecs.ActionResp{
  504. Provider: pbecs.CloudProvider_harvester,
  505. AccountName: h.tenanter.AccountName(),
  506. Status: status,
  507. }, nil
  508. }
  509. // startVmByName starts a VM by first issuing a GET using the VM name, then updating the resulting VM object
  510. func startVmByName(c *harvClient.Clientset, namespace, vmName string) error {
  511. vm, err := c.KubevirtV1().VirtualMachines(namespace).Get(context.TODO(), vmName, k8smetav1.GetOptions{})
  512. if err != nil {
  513. return errors.Wrap(err, "VM not found")
  514. }
  515. *vm.Spec.Running = true
  516. _, err = c.KubevirtV1().VirtualMachines(namespace).Update(context.TODO(), vm, k8smetav1.UpdateOptions{})
  517. if err != nil {
  518. return errors.Wrap(err, "VM start failed")
  519. }
  520. return nil
  521. }
  522. // stopVmByName will stop a VM by first finding it by its name and then call stopBMbyRef function
  523. func stopVmByName(c *harvClient.Clientset, namespace, vmName string) error {
  524. vm, err := c.KubevirtV1().VirtualMachines(namespace).Get(context.TODO(), vmName, k8smetav1.GetOptions{})
  525. if err != nil {
  526. return errors.Wrap(err, "VM not found")
  527. }
  528. *vm.Spec.Running = false
  529. _, err = c.KubevirtV1().VirtualMachines(namespace).Update(context.TODO(), vm, k8smetav1.UpdateOptions{})
  530. if err != nil {
  531. return errors.Wrap(err, "VM stop failed")
  532. }
  533. return nil
  534. }
  535. // restartVMbyName will restart a VM by first finding it by its name and then call restartVMbyRef function
  536. func restartVmByName(c *harvClient.Clientset, namespace, vmName string) error {
  537. vm, err := c.KubevirtV1().VirtualMachines(namespace).Get(context.TODO(), vmName, k8smetav1.GetOptions{})
  538. if err != nil {
  539. return errors.Wrap(err, "VM not found")
  540. }
  541. err = stopVmByName(c, namespace, vm.Name)
  542. if err != nil {
  543. return errors.Wrap(err, "VM stop failed")
  544. }
  545. select {
  546. case <-time.Tick(1 * time.Second):
  547. return startVmByName(c, namespace, vm.Name)
  548. }
  549. }
  550. func (h *HarVMer) ListEcsImages(ctx context.Context, req *pbecs.ListImagesReq) (resp *pbecs.ListImagesResp, err error) {
  551. //harvester查询默认命名空间
  552. namespace := "default"
  553. images, err := h.harvCli.HarvesterhciV1beta1().VirtualMachineImages(namespace).List(ctx, k8smetav1.ListOptions{})
  554. if err != nil {
  555. return nil, err
  556. }
  557. imageList := make([]*pbecs.Image, 0)
  558. for _, im := range images.Items {
  559. image := pbecs.Image{
  560. Provider: pbecs.CloudProvider(pbtenant.CloudProvider_harvester),
  561. Id: im.Name,
  562. DisplayName: im.Spec.DisplayName,
  563. Namespace: im.Namespace,
  564. }
  565. imageList = append(imageList, &image)
  566. }
  567. return &pbecs.ListImagesResp{
  568. Provider: pbecs.CloudProvider(pbtenant.CloudProvider_harvester),
  569. Images: imageList,
  570. }, nil
  571. }

PCM is positioned as Software stack over Cloud, aiming to build the standards and ecology of heterogeneous cloud collaboration for JCC in a non intrusive and autonomous peer-to-peer manner.