| |
| |
| FROM nvcr.io/nvidia/pytorch:24.08-py3 |
|
|
| |
| ENV MAX_JOBS=32 |
| ENV VLLM_WORKER_MULTIPROC_METHOD=spawn |
| ENV DEBIAN_FRONTEND=noninteractive |
| ENV NODE_OPTIONS="" |
| ENV HF_HUB_ENABLE_HF_TRANSFER="1" |
|
|
| |
| ARG APT_SOURCE=https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ |
| ARG PIP_INDEX=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple |
| ARG VLLM_COMMIT=227578480d71fc94ef46ca77fb69496412158d68 |
|
|
| |
| RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \ |
| { \ |
| echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \ |
| echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \ |
| echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \ |
| echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \ |
| } > /etc/apt/sources.list |
|
|
| |
| RUN apt-get update && \ |
| apt-get install -y -o Dpkg::Options::="--force-confdef" systemd && \ |
| apt-get clean |
|
|
| |
| RUN apt-get update && \ |
| apt-get install -y tini && \ |
| apt-get clean |
|
|
| |
| RUN pip config set global.index-url "${PIP_INDEX}" && \ |
| pip config set global.extra-index-url "${PIP_INDEX}" && \ |
| python -m pip install --upgrade pip |
|
|
| |
| RUN pip uninstall -y torch torchvision torchaudio \ |
| pytorch-quantization pytorch-triton torch-tensorrt \ |
| xgboost transformer_engine flash_attn apex megatron-core |
|
|
| |
| RUN pip install --no-cache-dir vllm --pre --extra-index-url "https://wheels.vllm.ai/${VLLM_COMMIT}" && \ |
| git clone -b verl_v1 https://github.com/hiyouga/vllm.git && \ |
| cp -r vllm/vllm/ /usr/local/lib/python3.10/dist-packages/ |
|
|
| |
| RUN pip install --no-cache-dir torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 tensordict torchdata \ |
| transformers>=4.49.0 accelerate datasets peft hf-transfer \ |
| ray[default] codetiming hydra-core pandas pyarrow>=15.0.0 pylatexenc qwen-vl-utils wandb liger-kernel mathruler \ |
| pytest yapf py-spy pyext pre-commit ruff |
|
|
| |
| RUN wget -nv https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.5cxx11abiFALSE-cp310-cp310-linux_x86_64.whl && \ |
| pip install --no-cache-dir flash_attn-2.7.4.post1+cu12torch2.5cxx11abiFALSE-cp310-cp310-linux_x86_64.whl |
|
|
| |
| RUN pip uninstall -y pynvml nvidia-ml-py && \ |
| pip install --no-cache-dir nvidia-ml-py>=12.560.30 opencv-python-headless==4.8.0.74 fastapi==0.115.6 && \ |
| pip install --no-cache-dir --upgrade optree>=0.13.0 |
|
|
| |
| RUN pip config unset global.index-url && \ |
| pip config unset global.extra-index-url |
|
|