diff --git a/README.md b/README.md
index 4510b583..5516a1b7 100644
--- a/README.md
+++ b/README.md
@@ -55,7 +55,7 @@ Choose your path:
- **Colab (free)**: https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing
- **Local machine**: Please refer to [usage](#getting-started)
- **PAI-DSW (free trial)**: https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory
-- **Alaya NeW (offer)**: https://docs.alayanew.com/docs/documents/newActivities/llamafactory/?utm_source=LLaMA-Factory
+- **Alaya NeW (cloud GPU deal)**: https://docs.alayanew.com/docs/documents/newActivities/llamafactory/?utm_source=LLaMA-Factory
> [!NOTE]
> Except for the above links, all other websites are unauthorized third-party websites. Please carefully use them.
@@ -680,11 +680,6 @@ docker build -f ./docker/docker-cuda/Dockerfile \
-t llamafactory:latest .
docker run -dit --ipc=host --gpus=all \
- -v ./hf_cache:/root/.cache/huggingface \
- -v ./ms_cache:/root/.cache/modelscope \
- -v ./om_cache:/root/.cache/openmind \
- -v ./shared_data:/app/shared_data \
- -v ./output:/app/output \
-p 7860:7860 \
-p 8000:8000 \
--name llamafactory \
@@ -702,11 +697,6 @@ docker build -f ./docker/docker-npu/Dockerfile \
-t llamafactory:latest .
docker run -dit --ipc=host \
- -v ./hf_cache:/root/.cache/huggingface \
- -v ./ms_cache:/root/.cache/modelscope \
- -v ./om_cache:/root/.cache/openmind \
- -v ./shared_data:/app/shared_data \
- -v ./output:/app/output \
-v /usr/local/dcmi:/usr/local/dcmi \
-v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
-v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
@@ -732,11 +722,6 @@ docker build -f ./docker/docker-rocm/Dockerfile \
-t llamafactory:latest .
docker run -dit --ipc=host \
- -v ./hf_cache:/root/.cache/huggingface \
- -v ./ms_cache:/root/.cache/modelscope \
- -v ./om_cache:/root/.cache/openmind \
- -v ./shared_data:/app/shared_data \
- -v ./output:/app/output \
-p 7860:7860 \
-p 8000:8000 \
--device /dev/kfd \
@@ -749,12 +734,14 @@ docker exec -it llamafactory bash
-Details about volume
+Use Docker volumes
-- `hf_cache`: Utilize Hugging Face cache on the host machine. Reassignable if a cache already exists in a different directory.
-- `ms_cache`: Similar to Hugging Face cache but for ModelScope users.
-- `om_cache`: Similar to Hugging Face cache but for Modelers users.
-- `shared_data`: Place datasets on this dir of the host machine so that they can be selected on LLaMA Board GUI.
+You can uncomment `VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]` in the Dockerfile to use data volumes.
+
+When building the Docker image, use `-v ./hf_cache:/root/.cache/huggingface` argument to mount the local directory to the container. The following data volumes are available.
+
+- `hf_cache`: Utilize Hugging Face cache on the host machine.
+- `shared_data`: The directionary to store datasets on the host machine.
- `output`: Set export dir to this location so that the merged result can be accessed directly on the host machine.
diff --git a/README_zh.md b/README_zh.md
index 6f5bab79..abd2f503 100644
--- a/README_zh.md
+++ b/README_zh.md
@@ -57,7 +57,7 @@ https://github.com/user-attachments/assets/43b700c6-a178-41db-b1f8-8190a5d3fcfc
- **Colab(免费)**:https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing
- **本地机器**:请见[如何使用](#如何使用)
- **PAI-DSW(免费试用)**:https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory
-- **Alaya NeW(优惠活动)**:https://docs.alayanew.com/docs/documents/newActivities/llamafactory/?utm_source=LLaMA-Factory
+- **Alaya NeW(算力优惠活动)**:https://docs.alayanew.com/docs/documents/newActivities/llamafactory/?utm_source=LLaMA-Factory
> [!NOTE]
> 除上述链接以外的其他网站均为未经许可的第三方网站,请小心甄别。
@@ -682,11 +682,6 @@ docker build -f ./docker/docker-cuda/Dockerfile \
-t llamafactory:latest .
docker run -dit --ipc=host --gpus=all \
- -v ./hf_cache:/root/.cache/huggingface \
- -v ./ms_cache:/root/.cache/modelscope \
- -v ./om_cache:/root/.cache/openmind \
- -v ./shared_data:/app/shared_data \
- -v ./output:/app/output \
-p 7860:7860 \
-p 8000:8000 \
--name llamafactory \
@@ -704,11 +699,6 @@ docker build -f ./docker/docker-npu/Dockerfile \
-t llamafactory:latest .
docker run -dit --ipc=host \
- -v ./hf_cache:/root/.cache/huggingface \
- -v ./ms_cache:/root/.cache/modelscope \
- -v ./om_cache:/root/.cache/openmind \
- -v ./shared_data:/app/shared_data \
- -v ./output:/app/output \
-v /usr/local/dcmi:/usr/local/dcmi \
-v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
-v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
@@ -734,11 +724,6 @@ docker build -f ./docker/docker-rocm/Dockerfile \
-t llamafactory:latest .
docker run -dit --ipc=host \
- -v ./hf_cache:/root/.cache/huggingface \
- -v ./ms_cache:/root/.cache/modelscope \
- -v ./om_cache:/root/.cache/openmind \
- -v ./shared_data:/app/shared_data \
- -v ./output:/app/output \
-p 7860:7860 \
-p 8000:8000 \
--device /dev/kfd \
@@ -751,11 +736,13 @@ docker exec -it llamafactory bash
-数据卷详情
+使用数据卷
-- `hf_cache`:使用宿主机的 Hugging Face 缓存文件夹,允许更改为新的目录。
-- `ms_cache`:类似 Hugging Face 缓存文件夹,为 ModelScope 用户提供。
-- `om_cache`:类似 Hugging Face 缓存文件夹,为 Modelers 用户提供。
+您可以通过移除 Dockerfile 中 `VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]` 的注释来使用数据卷。
+
+在构建 Docker 时使用参数 `-v ./hf_cache:/root/.cache/huggingface` 来挂载数据卷。各个数据卷的含义表示如下。
+
+- `hf_cache`:使用宿主机的 Hugging Face 缓存文件夹。
- `shared_data`:宿主机中存放数据集的文件夹路径。
- `output`:将导出目录设置为该路径后,即可在宿主机中访问导出后的模型。
diff --git a/docker/docker-cuda/Dockerfile b/docker/docker-cuda/Dockerfile
index 7aaa0d0a..b2798e07 100644
--- a/docker/docker-cuda/Dockerfile
+++ b/docker/docker-cuda/Dockerfile
@@ -47,7 +47,7 @@ RUN if [ "${INSTALL_FLASHATTN}" == "true" ]; then \
fi
# Set up volumes
-VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/root/.cache/openmind", "/app/shared_data", "/app/output" ]
+# VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]
# Expose port 7860 for LLaMA Board
ENV GRADIO_SERVER_PORT=7860
diff --git a/docker/docker-cuda/docker-compose.yml b/docker/docker-cuda/docker-compose.yml
index 1232f3e4..ab0da4d8 100644
--- a/docker/docker-cuda/docker-compose.yml
+++ b/docker/docker-cuda/docker-compose.yml
@@ -7,12 +7,6 @@ services:
PIP_INDEX: https://pypi.org/simple
EXTRAS: metrics
container_name: llamafactory
- volumes:
- - ../../hf_cache:/root/.cache/huggingface
- - ../../ms_cache:/root/.cache/modelscope
- - ../../om_cache:/root/.cache/openmind
- - ../../shared_data:/app/shared_data
- - ../../output:/app/output
ports:
- "7860:7860"
- "8000:8000"
diff --git a/docker/docker-npu/Dockerfile b/docker/docker-npu/Dockerfile
index e00f34a5..3053b689 100644
--- a/docker/docker-npu/Dockerfile
+++ b/docker/docker-npu/Dockerfile
@@ -39,7 +39,7 @@ COPY . /app
RUN pip install --no-cache-dir -e ".[${EXTRAS}]" --no-build-isolation
# Set up volumes
-VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/root/.cache/openmind", "/app/shared_data", "/app/output" ]
+# VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]
# Expose port 7860 for LLaMA Board
ENV GRADIO_SERVER_PORT=7860
diff --git a/docker/docker-npu/docker-compose.yml b/docker/docker-npu/docker-compose.yml
index 0b45d3a5..659f8d1b 100644
--- a/docker/docker-npu/docker-compose.yml
+++ b/docker/docker-npu/docker-compose.yml
@@ -8,11 +8,6 @@ services:
EXTRAS: torch-npu,metrics
container_name: llamafactory
volumes:
- - ../../hf_cache:/root/.cache/huggingface
- - ../../ms_cache:/root/.cache/modelscope
- - ../../om_cache:/root/.cache/openmind
- - ../../shared_data:/app/shared_data
- - ../../output:/app/output
- /usr/local/dcmi:/usr/local/dcmi
- /usr/local/bin/npu-smi:/usr/local/bin/npu-smi
- /usr/local/Ascend/driver:/usr/local/Ascend/driver
diff --git a/docker/docker-rocm/Dockerfile b/docker/docker-rocm/Dockerfile
index 58162f3d..424037db 100644
--- a/docker/docker-rocm/Dockerfile
+++ b/docker/docker-rocm/Dockerfile
@@ -52,7 +52,7 @@ RUN if [ "${INSTALL_FLASHATTN}" == "true" ]; then \
fi
# Set up volumes
-VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/root/.cache/openmind", "/app/shared_data", "/app/output" ]
+# VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]
# Expose port 7860 for LLaMA Board
ENV GRADIO_SERVER_PORT=7860
diff --git a/docker/docker-rocm/docker-compose.yml b/docker/docker-rocm/docker-compose.yml
index 33567cc4..32cdf563 100644
--- a/docker/docker-rocm/docker-compose.yml
+++ b/docker/docker-rocm/docker-compose.yml
@@ -7,12 +7,6 @@ services:
PIP_INDEX: https://pypi.org/simple
EXTRAS: metrics
container_name: llamafactory
- volumes:
- - ../../hf_cache:/root/.cache/huggingface
- - ../../ms_cache:/root/.cache/modelscope
- - ../../om_cache:/root/.cache/openmind
- - ../../shared_data:/app/shared_data
- - ../../output:/app/output
ports:
- "7860:7860"
- "8000:8000"