You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

submit_openmp_container.go 4.4 kB

3 years ago
3 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. package main
  2. //import "slurm/submitjob"
  3. //import "slurm"
  4. //import "os/user"
  5. //import "os"
  6. //import "strconv"
  7. //import "fmt"
  8. //import "os/exec"
  9. //import "path/filepath"
  10. //import "slurm/jobinfo"
  11. //import "time"
  12. //
  13. //
  14. //func fileExists(filename string) bool {
  15. // info, err := os.Stat(filename)
  16. // if os.IsNotExist(err) {
  17. // return false
  18. // }
  19. // return !info.IsDir()
  20. //}
  21. //func build_container(file_name,container_name string){
  22. //
  23. // cmd := exec.Command("sudo", "/usr/local/bin/singularity", "build",container_name, file_name)
  24. // fmt.Print("Now build new container")
  25. // fmt.Printf("%s\n", cmd.String())
  26. // stdoutStderr, err := cmd.CombinedOutput()
  27. // if err != nil {
  28. // fmt.Printf("error in creating container %s \n", err)
  29. //
  30. // fmt.Printf("%s\n", stdoutStderr)
  31. //// return
  32. // }
  33. // fmt.Printf("%s\n", stdoutStderr)
  34. //}
  35. //
  36. //func main(){
  37. // job_desc := submit_job.Job_descriptor{}
  38. //
  39. // dir, _ := os.Getwd()
  40. // container := filepath.Join(dir, "openmp_container.img")
  41. // definition := filepath.Join(dir, "openmp_container.def")
  42. // if !fileExists(container){
  43. // build_container(definition,container)
  44. // }
  45. //
  46. // if !fileExists(container){
  47. // return
  48. // }
  49. // /* use Cmd to create our script */
  50. //
  51. // job_desc.Script = "#!/bin/bash\n export PATH=$PATH:/usr/local/bin\n hostname \n"
  52. // job_desc.Script+= "export OMP_NUM_THREADS=$SLURM_JOB_CPUS_PER_NODE\n"
  53. // cmd := exec.Command( "/usr/local/bin/singularity", "exec",container, "/opt/openmp_example" )
  54. //
  55. // job_desc.Script+= cmd.String()
  56. // fmt.Printf("cmd %s\n", job_desc.Script)
  57. // user, _:= user.Current()
  58. // userid , _ := strconv.Atoi(user.Uid)
  59. // job_desc.User_id= uint32(userid)
  60. // groupid , _ := strconv.Atoi(user.Gid)
  61. //
  62. // job_desc.Group_id= uint32(groupid)
  63. // job_desc.Name = "test_job"
  64. // job_desc.Partition="long"
  65. // job_desc.Time_limit = uint32(60)
  66. // job_desc.Min_nodes =uint32(1)
  67. // job_desc.Num_tasks = uint32(1)
  68. //
  69. // job_desc.Cpus_per_task = uint16(2)
  70. // job_desc.Std_out = ("./%j-out.txt")
  71. // job_desc.Std_err = ("./%j-err.txt")
  72. // job_desc.Work_dir = dir
  73. //
  74. // answer := submit_job.Submit_job(&job_desc)
  75. // if(answer.Error_code != 0) {
  76. // msg := slurm.GetErrorString(answer.Error_code)
  77. // fmt.Printf("Error: %s\n" ,msg)
  78. // return
  79. // }
  80. // fmt.Printf("Submitted Job %d\n", answer.Job_id)
  81. //
  82. // /*Now, we submit the same jon again, ut with some oversubsciption */
  83. // job_desc.Script = "#!/bin/bash\n export PATH=$PATH:/usr/local/bin\n hostname \n"
  84. // job_desc.Script+= "export OMP_NUM_THREADS=4\n"
  85. //
  86. // job_desc.Script+= cmd.String()
  87. // fmt.Printf("cmd %s\n", job_desc.Script)
  88. // answer2 := submit_job.Submit_job(&job_desc)
  89. // if(answer2.Error_code != 0) {
  90. // msg := slurm.GetErrorString(answer.Error_code)
  91. // fmt.Printf("Error: %s\n" ,msg)
  92. // return
  93. // }
  94. // fmt.Printf("Submitted Job %d\n", answer2.Job_id)
  95. //
  96. //
  97. //
  98. // job_list := job_info.Get_job(answer.Job_id)
  99. // if job_list.Error_code != 0 {
  100. // msg := slurm.GetErrorString(job_list.Error_code)
  101. // fmt.Printf("Error: %s\n" ,msg)
  102. // return
  103. //
  104. // }
  105. // job := job_list.Job_list[0]
  106. //
  107. // fmt.Printf("job is %s\n",job.Job_stateS)
  108. // state := job.Job_stateS
  109. // for state == "Pending" || state == "Running" {
  110. // time.Sleep(2 * time.Second)
  111. // job_list = job_info.Get_job(answer.Job_id)
  112. // if job_list.Error_code != 0 {
  113. // msg := slurm.GetErrorString(job_list.Error_code)
  114. // fmt.Printf("Error: %s\n" ,msg)
  115. // return
  116. //
  117. // }
  118. // job = job_list.Job_list[0]
  119. //
  120. // state = job.Job_stateS
  121. //
  122. // fmt.Printf("job is %s\n",job.Job_stateS)
  123. //
  124. //
  125. // }
  126. //
  127. // fmt.Printf("Total runtime first job %s\n",job_info.Get_job_runtime(job).String() )
  128. // /*wait for second job */
  129. // job_list = job_info.Get_job(answer2.Job_id)
  130. // if job_list.Error_code != 0 {
  131. // msg := slurm.GetErrorString(job_list.Error_code)
  132. // fmt.Printf("Error: %s\n" ,msg)
  133. // return
  134. //
  135. // }
  136. // job = job_list.Job_list[0]
  137. //
  138. // fmt.Printf("job is %s\n",job.Job_stateS)
  139. // state = job.Job_stateS
  140. // for state == "Pending" || state == "Running" {
  141. // time.Sleep(2 * time.Second)
  142. // job_list = job_info.Get_job(answer2.Job_id)
  143. // if job_list.Error_code != 0 {
  144. // msg := slurm.GetErrorString(job_list.Error_code)
  145. // fmt.Printf("Error: %s\n" ,msg)
  146. // return
  147. //
  148. // }
  149. // job = job_list.Job_list[0]
  150. //
  151. // state = job.Job_stateS
  152. //
  153. // fmt.Printf("job is %s\n",job.Job_stateS)
  154. //
  155. //
  156. // }
  157. //
  158. //
  159. // fmt.Printf("Total runtime second job %s\n",job_info.Get_job_runtime(job).String() )
  160. //
  161. //}

PCM is positioned as Software stack over Cloud, aiming to build the standards and ecology of heterogeneous cloud collaboration for JCC in a non intrusive and autonomous peer-to-peer manner.