mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-07-31 10:42:50 +08:00
56 lines
2.2 KiB
Docker
56 lines
2.2 KiB
Docker
# Start from the pytorch official image (ubuntu-22.04 + cuda-12.4.1 + python-3.11)
|
|
# https://hub.docker.com/r/pytorch/pytorch/tags
|
|
FROM pytorch/pytorch:2.6.0-cuda12.4-cudnn9-runtime
|
|
|
|
# Define environments
|
|
ENV MAX_JOBS=16
|
|
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
|
ENV DEBIAN_FRONTEND=noninteractive
|
|
ENV NODE_OPTIONS=""
|
|
ENV PIP_ROOT_USER_ACTION=ignore
|
|
|
|
# Define installation arguments
|
|
ARG APT_SOURCE=https://mirrors.tuna.tsinghua.edu.cn/ubuntu/
|
|
ARG PIP_INDEX=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
|
|
|
# Set apt source
|
|
RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \
|
|
{ \
|
|
echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \
|
|
echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \
|
|
echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \
|
|
echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \
|
|
} > /etc/apt/sources.list
|
|
|
|
# Install systemctl and wget
|
|
RUN apt-get update && \
|
|
apt-get install -y -o Dpkg::Options::="--force-confdef" systemd wget && \
|
|
apt-get clean
|
|
|
|
# Install git and vim
|
|
RUN apt-get update && \
|
|
apt-get install -y git vim && \
|
|
apt-get clean
|
|
|
|
# Install gcc and g++
|
|
RUN apt-get update && \
|
|
apt-get install -y gcc g++ && \
|
|
apt-get clean
|
|
|
|
# Change pip source
|
|
RUN pip config set global.index-url "${PIP_INDEX}" && \
|
|
pip config set global.extra-index-url "${PIP_INDEX}" && \
|
|
python -m pip install --upgrade pip
|
|
|
|
# Install flash-attn-2.7.4.post1 (cxx11abi=False)
|
|
RUN wget -nv https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp311-cp311-linux_x86_64.whl && \
|
|
pip install --no-cache-dir flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp311-cp311-linux_x86_64.whl
|
|
|
|
# Install flashinfer-0.2.2.post1+cu124 (cxx11abi=False)
|
|
RUN wget -nv https://github.com/flashinfer-ai/flashinfer/releases/download/v0.2.2.post1/flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl && \
|
|
pip install --no-cache-dir flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl
|
|
|
|
# Reset pip config
|
|
RUN pip config unset global.index-url && \
|
|
pip config unset global.extra-index-url
|