From 4d3ffa2ec4e386c85fc4cfc643893fe45cd3e1c8 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Tue, 27 May 2025 19:01:31 +0800 Subject: [PATCH] [assets] fix docker image (#8180) --- docker/docker-cuda/Dockerfile | 14 +++++++------- docker/docker-npu/Dockerfile | 13 ++++++------- docker/docker-rocm/Dockerfile | 14 +++++++------- src/llamafactory/extras/env.py | 6 ++++++ 4 files changed, 26 insertions(+), 21 deletions(-) diff --git a/docker/docker-cuda/Dockerfile b/docker/docker-cuda/Dockerfile index f0e2c5f0..a35122b0 100644 --- a/docker/docker-cuda/Dockerfile +++ b/docker/docker-cuda/Dockerfile @@ -1,13 +1,13 @@ -# Installation arguments +# https://hub.docker.com/r/hiyouga/pytorch/tags ARG BASE_IMAGE=hiyouga/pytorch:th2.6.0-cu124-flashattn2.7.4-cxx11abi0 +FROM ${BASE_IMAGE} + +# Installation arguments ARG PIP_INDEX=https://pypi.org/simple ARG EXTRAS=metrics ARG INSTALL_FLASHATTN=false ARG HTTP_PROXY="" -# https://hub.docker.com/r/hiyouga/pytorch/tags -FROM "${BASE_IMAGE}" - # Define environments ENV MAX_JOBS=16 ENV FLASH_ATTENTION_FORCE_BUILD=TRUE @@ -37,7 +37,7 @@ COPY . /app RUN pip install --no-cache-dir -e ".[${EXTRAS}]" --no-build-isolation # Rebuild flash attention -RUN if [ "$INSTALL_FLASHATTN" == "true" ]; then \ +RUN if [ "${INSTALL_FLASHATTN}" == "true" ]; then \ pip uninstall -y ninja && \ pip install --no-cache-dir ninja && \ pip install --no-cache-dir flash-attn --no-build-isolation; \ @@ -47,11 +47,11 @@ RUN if [ "$INSTALL_FLASHATTN" == "true" ]; then \ VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/root/.cache/openmind", "/app/data", "/app/output" ] # Expose port 7860 for LLaMA Board -ENV GRADIO_SERVER_PORT 7860 +ENV GRADIO_SERVER_PORT=7860 EXPOSE 7860 # Expose port 8000 for API service -ENV API_PORT 8000 +ENV API_PORT=8000 EXPOSE 8000 # unset proxy diff --git a/docker/docker-npu/Dockerfile b/docker/docker-npu/Dockerfile index 9b507737..4a0ff6a5 100644 --- a/docker/docker-npu/Dockerfile +++ b/docker/docker-npu/Dockerfile @@ -1,13 +1,12 @@ -# Installation arguments +# https://hub.docker.com/r/ascendai/cann/tags ARG BASE_IMAGE=ascendai/cann:8.0.0-910b-ubuntu22.04-py3.11 +FROM ${BASE_IMAGE} + +# Installation arguments ARG PIP_INDEX=https://pypi.org/simple ARG EXTRAS=metrics -ARG INSTALL_FLASHATTN=false ARG HTTP_PROXY="" -# https://hub.docker.com/r/ascendai/cann/tags -FROM "${BASE_IMAGE}" - # Define environments ENV MAX_JOBS=16 ENV FLASH_ATTENTION_FORCE_BUILD=TRUE @@ -40,11 +39,11 @@ RUN pip install --no-cache-dir -e ".[${EXTRAS}]" --no-build-isolation VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/root/.cache/openmind", "/app/data", "/app/output" ] # Expose port 7860 for LLaMA Board -ENV GRADIO_SERVER_PORT 7860 +ENV GRADIO_SERVER_PORT=7860 EXPOSE 7860 # Expose port 8000 for API service -ENV API_PORT 8000 +ENV API_PORT=8000 EXPOSE 8000 # unset proxy diff --git a/docker/docker-rocm/Dockerfile b/docker/docker-rocm/Dockerfile index a552abfb..58abb9a7 100644 --- a/docker/docker-rocm/Dockerfile +++ b/docker/docker-rocm/Dockerfile @@ -1,14 +1,14 @@ -# Installation arguments +# https://hub.docker.com/r/rocm/pytorch/tags ARG BASE_IMAGE=rocm/pytorch:rocm6.4.1_ubuntu22.04_py3.10_pytorch_release_2.6.0 +FROM ${BASE_IMAGE} + +# Installation arguments ARG PIP_INDEX=https://pypi.org/simple ARG EXTRAS=metrics ARG INSTALL_FLASHATTN=false ARG HTTP_PROXY="" ARG PYTORCH_INDEX=https://download.pytorch.org/whl/rocm6.3 -# https://hub.docker.com/r/rocm/pytorch/tags -FROM "${BASE_IMAGE}" - # Define environments ENV MAX_JOBS=16 ENV FLASH_ATTENTION_FORCE_BUILD=TRUE @@ -45,7 +45,7 @@ COPY . /app RUN pip install --no-cache-dir -e ".[${EXTRAS}]" --no-build-isolation # Rebuild flash attention -RUN if [ "$INSTALL_FLASHATTN" == "true" ]; then \ +RUN if [ "${INSTALL_FLASHATTN}" == "true" ]; then \ pip uninstall -y ninja && \ pip install --no-cache-dir ninja && \ pip install --no-cache-dir flash-attn --no-build-isolation; \ @@ -55,11 +55,11 @@ RUN if [ "$INSTALL_FLASHATTN" == "true" ]; then \ VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/root/.cache/openmind", "/app/data", "/app/output" ] # Expose port 7860 for LLaMA Board -ENV GRADIO_SERVER_PORT 7860 +ENV GRADIO_SERVER_PORT=7860 EXPOSE 7860 # Expose port 8000 for API service -ENV API_PORT 8000 +ENV API_PORT=8000 EXPOSE 8000 # unset proxy diff --git a/src/llamafactory/extras/env.py b/src/llamafactory/extras/env.py index ab0dfb8f..c4872ea4 100644 --- a/src/llamafactory/extras/env.py +++ b/src/llamafactory/extras/env.py @@ -15,6 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import platform import accelerate @@ -83,4 +84,9 @@ def print_env() -> None: except Exception: pass + if os.path.exists("data"): + info["Default data directory"] = "detected" + else: + info["Default data directory"] = "not detected" + print("\n" + "\n".join([f"- {key}: {value}" for key, value in info.items()]) + "\n")