You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

Dockerfile.ubuntu 4.6 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990
  1. ARG BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.3.0-cudnn8-devel
  2. FROM $BASE_IMAGE
  3. ARG DEBIAN_FRONTEND=noninteractive
  4. ENV TZ=Asia/Shanghai
  5. ENV CONDA_DIR /opt/conda
  6. ENV PATH="${CONDA_DIR}/bin:${PATH}"
  7. ENV arch=x86_64
  8. SHELL ["/bin/bash", "-c"]
  9. COPY docker/rcfiles /tmp/resources
  10. COPY docker/jupyter_plugins /tmp/resources/jupyter_plugins
  11. RUN apt-get update && apt-get install -y --reinstall ca-certificates && \
  12. cp /tmp/resources/ubuntu20.04_sources.tuna /etc/apt/sources.list && \
  13. apt-get update && \
  14. apt-get install -y locales wget git vim ffmpeg libsm6 tzdata language-pack-zh-hans ttf-wqy-microhei ttf-wqy-zenhei xfonts-wqy libxext6 build-essential ninja-build && \
  15. wget https://packagecloud.io/github/git-lfs/packages/debian/bullseye/git-lfs_3.2.0_amd64.deb/download -O ./git-lfs_3.2.0_amd64.deb && \
  16. dpkg -i ./git-lfs_3.2.0_amd64.deb && \
  17. rm -f ./git-lfs_3.2.0_amd64.deb && \
  18. locale-gen zh_CN && \
  19. locale-gen zh_CN.utf8 && \
  20. update-locale LANG=zh_CN.UTF-8 LC_ALL=zh_CN.UTF-8 LANGUAGE=zh_CN.UTF-8 && \
  21. ln -fs /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
  22. dpkg-reconfigure --frontend noninteractive tzdata && \
  23. apt-get clean && \
  24. rm -rf /var/lib/apt/lists/*
  25. ENV LANG=zh_CN.UTF-8 LANGUAGE=zh_CN.UTF-8 LC_ALL=zh_CN.UTF-8
  26. #install and config python
  27. ARG PYTHON_VERSION=3.7.13
  28. RUN wget --quiet https://mirrors.aliyun.com/anaconda/miniconda/Miniconda3-latest-Linux-${arch}.sh -O ./miniconda.sh && \
  29. /bin/bash miniconda.sh -b -p /opt/conda && \
  30. rm -f miniconda.sh && \
  31. ln -s /opt/conda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
  32. echo ". /opt/conda/etc/profile.d/conda.sh" >> ~/.bashrc && \
  33. cp /tmp/resources/conda.tuna ~/.condarc && \
  34. source /root/.bashrc && \
  35. conda install --yes python==${PYTHON_VERSION} && \
  36. pip config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
  37. pip config set install.trusted-host mirrors.aliyun.com
  38. ARG USE_GPU=True
  39. # install pytorch
  40. ARG TORCH_VERSION=1.12.0
  41. ARG CUDATOOLKIT_VERSION=11.3
  42. RUN if [ "$USE_GPU" = "True" ] ; then \
  43. pip install --no-cache-dir torch==$TORCH_VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113; \
  44. else \
  45. pip install --no-cache-dir torch==$TORCH_VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu; \
  46. fi
  47. # install tensorflow
  48. ARG TENSORFLOW_VERSION=1.15.5
  49. RUN if [ "$USE_GPU" = "True" ] ; then \
  50. pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html; \
  51. else \
  52. pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION; \
  53. fi
  54. RUN if [ "$USE_GPU" = "True" ] ; then \
  55. CUDA_HOME=/usr/local/cuda TORCH_CUDA_ARCH_LIST="5.0 5.2 6.0 6.1 7.0 7.5 8.0 8.6" MMCV_WITH_OPS=1 MAX_JOBS=8 FORCE_CUDA=1 pip install --no-cache-dir mmcv-full && pip cache purge; \
  56. else \
  57. MMCV_WITH_OPS=1 MAX_JOBS=8 pip install --no-cache-dir mmcv-full && pip cache purge; \
  58. fi
  59. # install modelscope
  60. COPY requirements /var/modelscope
  61. RUN pip install --no-cache-dir --upgrade pip && \
  62. pip install --no-cache-dir -r /var/modelscope/framework.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
  63. pip install --no-cache-dir -r /var/modelscope/audio.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
  64. pip install --no-cache-dir -r /var/modelscope/cv.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
  65. pip install --no-cache-dir -r /var/modelscope/multi-modal.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
  66. pip install --no-cache-dir -r /var/modelscope/nlp.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
  67. pip cache purge
  68. # default shell bash
  69. ENV SHELL=/bin/bash
  70. # install special package
  71. RUN pip install --no-cache-dir mmcls>=0.21.0 mmdet>=2.25.0 decord>=0.6.0 datasets==2.1.0 numpy==1.18.5 ipykernel fairseq fasttext https://modelscope.oss-cn-beijing.aliyuncs.com/releases/dependencies/xtcocotools-1.12-cp37-cp37m-linux_x86_64.whl
  72. RUN if [ "$USE_GPU" = "True" ] ; then \
  73. pip install --no-cache-dir dgl-cu113 dglgo -f https://data.dgl.ai/wheels/repo.html; \
  74. else \
  75. pip install --no-cache-dir dgl dglgo -f https://data.dgl.ai/wheels/repo.html; \
  76. fi
  77. # install jupyter plugin
  78. RUN mkdir -p /root/.local/share/jupyter/labextensions/ && \
  79. cp -r /tmp/resources/jupyter_plugins/* /root/.local/share/jupyter/labextensions/