You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

Dockerfile.ubuntu 4.2 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485
  1. ARG BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.3.0-cudnn8-devel
  2. FROM $BASE_IMAGE
  3. ARG DEBIAN_FRONTEND=noninteractive
  4. ENV TZ=Asia/Shanghai
  5. ENV CONDA_DIR /opt/conda
  6. ENV PATH="${CONDA_DIR}/bin:${PATH}"
  7. ENV arch=x86_64
  8. SHELL ["/bin/bash", "-c"]
  9. COPY docker/rcfiles /tmp/resources
  10. RUN apt-get update && apt-get install -y --reinstall ca-certificates && \
  11. cp /tmp/resources/ubuntu20.04_sources.tuna /etc/apt/sources.list && \
  12. apt-get update && \
  13. apt-get install -y locales wget git vim ffmpeg libsm6 tzdata language-pack-zh-hans ttf-wqy-microhei ttf-wqy-zenhei xfonts-wqy libxext6 build-essential ninja-build && \
  14. wget https://packagecloud.io/github/git-lfs/packages/debian/bullseye/git-lfs_3.2.0_amd64.deb/download -O ./git-lfs_3.2.0_amd64.deb && \
  15. dpkg -i ./git-lfs_3.2.0_amd64.deb && \
  16. rm -f ./git-lfs_3.2.0_amd64.deb && \
  17. locale-gen zh_CN && \
  18. locale-gen zh_CN.utf8 && \
  19. update-locale LANG=zh_CN.UTF-8 LC_ALL=zh_CN.UTF-8 LANGUAGE=zh_CN.UTF-8 && \
  20. ln -fs /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
  21. dpkg-reconfigure --frontend noninteractive tzdata && \
  22. apt-get clean && \
  23. rm -rf /var/lib/apt/lists/*
  24. ENV LANG=zh_CN.UTF-8 LANGUAGE=zh_CN.UTF-8 LC_ALL=zh_CN.UTF-8
  25. #install and config python
  26. ARG PYTHON_VERSION=3.7.13
  27. RUN wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-${arch}.sh -O ./miniconda.sh && \
  28. /bin/bash miniconda.sh -b -p /opt/conda && \
  29. rm -f miniconda.sh && \
  30. ln -s /opt/conda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
  31. echo ". /opt/conda/etc/profile.d/conda.sh" >> ~/.bashrc && \
  32. cp /tmp/resources/conda.tuna ~/.condarc && \
  33. source /root/.bashrc && \
  34. conda install --yes python==${PYTHON_VERSION} && \
  35. pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple && \
  36. pip config set install.trusted-host pypi.tuna.tsinghua.edu.cn
  37. ARG USE_GPU=True
  38. # install pytorch
  39. ARG TORCH_VERSION=1.12.0
  40. ARG CUDATOOLKIT_VERSION=11.3
  41. RUN if [ "$USE_GPU" = "True" ] ; then \
  42. pip install --no-cache-dir torch==$TORCH_VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113; \
  43. else \
  44. pip install --no-cache-dir torch==$TORCH_VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu; \
  45. fi
  46. # install tensorflow
  47. ARG TENSORFLOW_VERSION=1.15.5
  48. RUN if [ "$USE_GPU" = "True" ] ; then \
  49. pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html; \
  50. else \
  51. pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION; \
  52. fi
  53. RUN if [ "$USE_GPU" = "True" ] ; then \
  54. CUDA_HOME=/usr/local/cuda TORCH_CUDA_ARCH_LIST="5.0 5.2 6.0 6.1 7.0 7.5 8.0 8.6" MMCV_WITH_OPS=1 MAX_JOBS=8 FORCE_CUDA=1 pip install --no-cache-dir mmcv-full && pip cache purge; \
  55. else \
  56. MMCV_WITH_OPS=1 MAX_JOBS=8 pip install --no-cache-dir mmcv-full && pip cache purge; \
  57. fi
  58. # install modelscope
  59. COPY requirements /var/modelscope
  60. RUN pip install --no-cache-dir --upgrade pip && \
  61. pip install --no-cache-dir -r /var/modelscope/framework.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
  62. pip install --no-cache-dir -r /var/modelscope/audio.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
  63. pip install --no-cache-dir -r /var/modelscope/cv.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
  64. pip install --no-cache-dir -r /var/modelscope/multi-modal.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
  65. pip install --no-cache-dir -r /var/modelscope/nlp.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
  66. pip cache purge
  67. # default shell bash
  68. ENV SHELL=/bin/bash
  69. # install special package
  70. RUN pip install --no-cache-dir mmcls>=0.21.0 mmdet>=2.25.0 decord>=0.6.0 datasets==2.1.0 numpy==1.18.5 ipykernel fairseq
  71. RUN if [ "$USE_GPU" = "True" ] ; then \
  72. pip install --no-cache-dir dgl-cu113 dglgo -f https://data.dgl.ai/wheels/repo.html; \
  73. else \
  74. pip install --no-cache-dir dgl dglgo -f https://data.dgl.ai/wheels/repo.html; \
  75. fi