You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

dockerci.sh 3.4 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980
  1. #!/bin/bash
  2. MODELSCOPE_CACHE_DIR_IN_CONTAINER=/modelscope_cache
  3. CODE_DIR=$PWD
  4. CODE_DIR_IN_CONTAINER=/Maas-lib
  5. echo "$USER"
  6. gpus='7 6 5 4 3 2 1 0'
  7. cpu_sets='0-7 8-15 16-23 24-30 31-37 38-44 45-51 52-58'
  8. cpu_sets_arr=($cpu_sets)
  9. is_get_file_lock=false
  10. # export RUN_CASE_COMMAND='python tests/run.py --run_config tests/run_config.yaml'
  11. CI_COMMAND=${CI_COMMAND:-bash .dev_scripts/ci_container_test.sh $RUN_CASE_BASE_COMMAND}
  12. echo "ci command: $CI_COMMAND"
  13. for gpu in $gpus
  14. do
  15. exec {lock_fd}>"/tmp/gpu$gpu" || exit 1
  16. flock -n "$lock_fd" || { echo "WARN: gpu $gpu is in use!" >&2; continue; }
  17. echo "get gpu lock $gpu"
  18. CONTAINER_NAME="modelscope-ci-$gpu"
  19. let is_get_file_lock=true
  20. # pull image if there are update
  21. docker pull ${IMAGE_NAME}:${IMAGE_VERSION}
  22. if [ "$MODELSCOPE_SDK_DEBUG" == "True" ]; then
  23. docker run --rm --name $CONTAINER_NAME --shm-size=16gb \
  24. --cpuset-cpus=${cpu_sets_arr[$gpu]} \
  25. --gpus="device=$gpu" \
  26. -v $CODE_DIR:$CODE_DIR_IN_CONTAINER \
  27. -v $MODELSCOPE_CACHE:$MODELSCOPE_CACHE_DIR_IN_CONTAINER \
  28. -v $MODELSCOPE_HOME_CACHE/$gpu:/root \
  29. -v /home/admin/pre-commit:/home/admin/pre-commit \
  30. -e CI_TEST=True \
  31. -e TEST_LEVEL=$TEST_LEVEL \
  32. -e MODELSCOPE_CACHE=$MODELSCOPE_CACHE_DIR_IN_CONTAINER \
  33. -e MODELSCOPE_DOMAIN=$MODELSCOPE_DOMAIN \
  34. -e MODELSCOPE_SDK_DEBUG=True \
  35. -e HUB_DATASET_ENDPOINT=$HUB_DATASET_ENDPOINT \
  36. -e TEST_ACCESS_TOKEN_CITEST=$TEST_ACCESS_TOKEN_CITEST \
  37. -e TEST_ACCESS_TOKEN_SDKDEV=$TEST_ACCESS_TOKEN_SDKDEV \
  38. -e TEST_LEVEL=$TEST_LEVEL \
  39. -e MODELSCOPE_ENVIRONMENT='ci' \
  40. -e TEST_UPLOAD_MS_TOKEN=$TEST_UPLOAD_MS_TOKEN \
  41. -e MODEL_TAG_URL=$MODEL_TAG_URL \
  42. --workdir=$CODE_DIR_IN_CONTAINER \
  43. --net host \
  44. ${IMAGE_NAME}:${IMAGE_VERSION} \
  45. $CI_COMMAND
  46. else
  47. docker run --rm --name $CONTAINER_NAME --shm-size=16gb \
  48. --cpuset-cpus=${cpu_sets_arr[$gpu]} \
  49. --gpus="device=$gpu" \
  50. -v $CODE_DIR:$CODE_DIR_IN_CONTAINER \
  51. -v $MODELSCOPE_CACHE:$MODELSCOPE_CACHE_DIR_IN_CONTAINER \
  52. -v $MODELSCOPE_HOME_CACHE/$gpu:/root \
  53. -v /home/admin/pre-commit:/home/admin/pre-commit \
  54. -e CI_TEST=True \
  55. -e TEST_LEVEL=$TEST_LEVEL \
  56. -e MODELSCOPE_CACHE=$MODELSCOPE_CACHE_DIR_IN_CONTAINER \
  57. -e MODELSCOPE_DOMAIN=$MODELSCOPE_DOMAIN \
  58. -e HUB_DATASET_ENDPOINT=$HUB_DATASET_ENDPOINT \
  59. -e TEST_ACCESS_TOKEN_CITEST=$TEST_ACCESS_TOKEN_CITEST \
  60. -e TEST_ACCESS_TOKEN_SDKDEV=$TEST_ACCESS_TOKEN_SDKDEV \
  61. -e TEST_LEVEL=$TEST_LEVEL \
  62. -e MODELSCOPE_ENVIRONMENT='ci' \
  63. -e TEST_UPLOAD_MS_TOKEN=$TEST_UPLOAD_MS_TOKEN \
  64. -e MODEL_TAG_URL=$MODEL_TAG_URL \
  65. --workdir=$CODE_DIR_IN_CONTAINER \
  66. --net host \
  67. ${IMAGE_NAME}:${IMAGE_VERSION} \
  68. $CI_COMMAND
  69. fi
  70. if [ $? -ne 0 ]; then
  71. echo "Running test case failed, please check the log!"
  72. exit -1
  73. fi
  74. break
  75. done
  76. if [ "$is_get_file_lock" = false ] ; then
  77. echo 'No free GPU!'
  78. exit 1
  79. fi