mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-03 04:02:49 +08:00
add flash-attn installation flag in Dockerfile
Former-commit-id: e19491b0f0446f2fb2154cf14e0b2fbba5b54808
This commit is contained in:
parent
08a221443c
commit
c662c2e56f
@ -444,6 +444,7 @@ docker build -f ./docker/docker-cuda/Dockerfile \
|
|||||||
--build-arg INSTALL_BNB=false \
|
--build-arg INSTALL_BNB=false \
|
||||||
--build-arg INSTALL_VLLM=false \
|
--build-arg INSTALL_VLLM=false \
|
||||||
--build-arg INSTALL_DEEPSPEED=false \
|
--build-arg INSTALL_DEEPSPEED=false \
|
||||||
|
--build-arg INSTALL_FLASH_ATTN=false \
|
||||||
--build-arg PIP_INDEX=https://pypi.org/simple \
|
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||||
-t llamafactory:latest .
|
-t llamafactory:latest .
|
||||||
|
|
||||||
|
@ -444,6 +444,7 @@ docker build -f ./docker/docker-cuda/Dockerfile \
|
|||||||
--build-arg INSTALL_BNB=false \
|
--build-arg INSTALL_BNB=false \
|
||||||
--build-arg INSTALL_VLLM=false \
|
--build-arg INSTALL_VLLM=false \
|
||||||
--build-arg INSTALL_DEEPSPEED=false \
|
--build-arg INSTALL_DEEPSPEED=false \
|
||||||
|
--build-arg INSTALL_FLASH_ATTN=false \
|
||||||
--build-arg PIP_INDEX=https://pypi.org/simple \
|
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||||
-t llamafactory:latest .
|
-t llamafactory:latest .
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@ FROM nvcr.io/nvidia/pytorch:24.02-py3
|
|||||||
ARG INSTALL_BNB=false
|
ARG INSTALL_BNB=false
|
||||||
ARG INSTALL_VLLM=false
|
ARG INSTALL_VLLM=false
|
||||||
ARG INSTALL_DEEPSPEED=false
|
ARG INSTALL_DEEPSPEED=false
|
||||||
|
ARG INSTALL_FLASH_ATTN=false
|
||||||
ARG PIP_INDEX=https://pypi.org/simple
|
ARG PIP_INDEX=https://pypi.org/simple
|
||||||
|
|
||||||
# Set the working directory
|
# Set the working directory
|
||||||
@ -36,9 +37,11 @@ RUN EXTRA_PACKAGES="metrics"; \
|
|||||||
pip uninstall -y transformer-engine flash-attn
|
pip uninstall -y transformer-engine flash-attn
|
||||||
|
|
||||||
# Rebuild flash-attn
|
# Rebuild flash-attn
|
||||||
RUN ninja --version || \
|
RUN if [ "$INSTALL_FLASH_ATTN" = "true" ]; then \
|
||||||
(pip uninstall -y ninja && pip install ninja) && \
|
ninja --version || \
|
||||||
MAX_JOBS=4 pip install --no-cache-dir flash-attn --no-build-isolation
|
(pip uninstall -y ninja && pip install ninja) && \
|
||||||
|
MAX_JOBS=4 pip install --no-cache-dir flash-attn --no-build-isolation \
|
||||||
|
fi;
|
||||||
|
|
||||||
# Set up volumes
|
# Set up volumes
|
||||||
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ]
|
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ]
|
||||||
|
@ -7,6 +7,7 @@ services:
|
|||||||
INSTALL_BNB: false
|
INSTALL_BNB: false
|
||||||
INSTALL_VLLM: false
|
INSTALL_VLLM: false
|
||||||
INSTALL_DEEPSPEED: false
|
INSTALL_DEEPSPEED: false
|
||||||
|
INSTALL_FLASH_ATTN: false
|
||||||
PIP_INDEX: https://pypi.org/simple
|
PIP_INDEX: https://pypi.org/simple
|
||||||
container_name: llamafactory
|
container_name: llamafactory
|
||||||
volumes:
|
volumes:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user