mirror of
				https://github.com/hiyouga/LLaMA-Factory.git
				synced 2025-11-04 18:02:19 +08:00 
			
		
		
		
	update docker files
1. add docker-npu (Dockerfile and docker-compose.yml) 2. move cuda docker to docker-cuda and tiny changes to adapt to the new path Former-commit-id: 5431c1f18aadb072208efe7fd8e36fdcfbf807c2
This commit is contained in:
		
							parent
							
								
									826d7808b4
								
							
						
					
					
						commit
						af2607de1a
					
				
							
								
								
									
										61
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										61
									
								
								README.md
									
									
									
									
									
								
							@ -383,10 +383,11 @@ source /usr/local/Ascend/ascend-toolkit/set_env.sh
 | 
			
		||||
| torch-npu    | 2.1.0   | 2.1.0.post3 |
 | 
			
		||||
| deepspeed    | 0.13.2  | 0.13.2      |
 | 
			
		||||
 | 
			
		||||
Docker image:
 | 
			
		||||
Docker users please refer to [Build Docker](#Build-Docker).
 | 
			
		||||
 | 
			
		||||
- 32GB: [Download page](http://mirrors.cn-central-221.ovaijisuan.com/detail/130.html)
 | 
			
		||||
- 64GB: [Download page](http://mirrors.cn-central-221.ovaijisuan.com/detail/131.html)
 | 
			
		||||
**NOTE**
 | 
			
		||||
 | 
			
		||||
The default docker image is [cosdt/cann:8.0.rc1-910b-ubuntu22.04](https://hub.docker.com/layers/cosdt/cann/8.0.rc1-910b-ubuntu22.04/images/sha256-29ef8aacf6b2babd292f06f00b9190c212e7c79a947411e213135e4d41a178a9?context=explore). More options can be found at [cosdt/cann](https://hub.docker.com/r/cosdt/cann/tags).
 | 
			
		||||
 | 
			
		||||
Remember to use `ASCEND_RT_VISIBLE_DEVICES` instead of `CUDA_VISIBLE_DEVICES` to specify the device to use.
 | 
			
		||||
 | 
			
		||||
@ -426,7 +427,10 @@ llamafactory-cli webui
 | 
			
		||||
 | 
			
		||||
#### Use Docker
 | 
			
		||||
 | 
			
		||||
<details><summary>For NVIDIA GPU users:</summary>
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
cd ./docker/docker-cuda
 | 
			
		||||
docker build -f ./Dockerfile \
 | 
			
		||||
    --build-arg INSTALL_BNB=false \
 | 
			
		||||
    --build-arg INSTALL_VLLM=false \
 | 
			
		||||
@ -435,18 +439,63 @@ docker build -f ./Dockerfile \
 | 
			
		||||
    -t llamafactory:latest .
 | 
			
		||||
 | 
			
		||||
docker run -it --gpus=all \
 | 
			
		||||
    -v ./hf_cache:/root/.cache/huggingface/ \
 | 
			
		||||
    -v ./data:/app/data \
 | 
			
		||||
    -v ./output:/app/output \
 | 
			
		||||
    -v /$(dirname $(dirname "$PWD"))/hf_cache:/root/.cache/huggingface/ \
 | 
			
		||||
    -v /$(dirname $(dirname "$PWD"))/data:/app/data \
 | 
			
		||||
    -v /$(dirname $(dirname "$PWD"))/output:/app/output \
 | 
			
		||||
    -p 7860:7860 \
 | 
			
		||||
    -p 8000:8000 \
 | 
			
		||||
    --shm-size 16G \
 | 
			
		||||
    --name llamafactory \
 | 
			
		||||
    llamafactory:latest
 | 
			
		||||
```
 | 
			
		||||
</details>
 | 
			
		||||
 | 
			
		||||
<details><summary>For Ascend NPU users:</summary>
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
cd ./docker/docker-npu
 | 
			
		||||
docker build -f ./Dockerfile \
 | 
			
		||||
    --build-arg INSTALL_DEEPSPEED=false \
 | 
			
		||||
    --build-arg PIP_INDEX=https://pypi.org/simple \
 | 
			
		||||
    -t llamafactory:latest .
 | 
			
		||||
 | 
			
		||||
# add --device for multi-npu usage
 | 
			
		||||
# or modify --device to change npu card
 | 
			
		||||
docker run -it \
 | 
			
		||||
    -v /$(dirname $(dirname "$PWD"))/hf_cache:/root/.cache/huggingface/ \
 | 
			
		||||
    -v /$(dirname $(dirname "$PWD"))/data:/app/data \
 | 
			
		||||
    -v /$(dirname $(dirname "$PWD"))/output:/app/output \
 | 
			
		||||
    -v /usr/local/dcmi:/usr/local/dcmi \
 | 
			
		||||
    -v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
 | 
			
		||||
    -v /usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64 \
 | 
			
		||||
    -v /usr/local/Ascend/driver/version.info:/usr/local/Ascend/driver/version.info \
 | 
			
		||||
    -v /etc/ascend_install.info:/etc/ascend_install.info \
 | 
			
		||||
    -p 7860:7860 \
 | 
			
		||||
    -p 8000:8000 \
 | 
			
		||||
    --device /dev/davinci0 \
 | 
			
		||||
    --device /dev/davinci_manager \
 | 
			
		||||
    --device /dev/devmm_svm \
 | 
			
		||||
    --device /dev/hisi_hdc \
 | 
			
		||||
    --shm-size 16G \
 | 
			
		||||
    --name llamafactory \
 | 
			
		||||
    llamafactory:latest
 | 
			
		||||
```
 | 
			
		||||
</details>
 | 
			
		||||
 | 
			
		||||
#### Use Docker Compose
 | 
			
		||||
 | 
			
		||||
Firstly enter your docker path:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
# for NVIDIA GPU users
 | 
			
		||||
cd ./docker/docker-cuda
 | 
			
		||||
 | 
			
		||||
# for Ascend NPU users
 | 
			
		||||
cd ./docker/docker-npu
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Then run the following command to build docker image and start the container:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
docker-compose up -d
 | 
			
		||||
docker-compose exec llamafactory bash
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										58
									
								
								README_zh.md
									
									
									
									
									
								
							
							
						
						
									
										58
									
								
								README_zh.md
									
									
									
									
									
								
							@ -383,10 +383,11 @@ source /usr/local/Ascend/ascend-toolkit/set_env.sh
 | 
			
		||||
| torch-npu    | 2.1.0   | 2.1.0.post3 |
 | 
			
		||||
| deepspeed    | 0.13.2  | 0.13.2      |
 | 
			
		||||
 | 
			
		||||
Docker 镜像:
 | 
			
		||||
Docker用户请参考 [构建 Docker](#构建-Docker).
 | 
			
		||||
 | 
			
		||||
- 32GB:[下载地址](http://mirrors.cn-central-221.ovaijisuan.com/detail/130.html)
 | 
			
		||||
- 64GB:[下载地址](http://mirrors.cn-central-221.ovaijisuan.com/detail/131.html)
 | 
			
		||||
**NOTE**
 | 
			
		||||
 | 
			
		||||
默认镜像为 [cosdt/cann:8.0.rc1-910b-ubuntu22.04](https://hub.docker.com/layers/cosdt/cann/8.0.rc1-910b-ubuntu22.04/images/sha256-29ef8aacf6b2babd292f06f00b9190c212e7c79a947411e213135e4d41a178a9?context=explore). 更多选择见 [cosdt/cann](https://hub.docker.com/r/cosdt/cann/tags).
 | 
			
		||||
 | 
			
		||||
请使用 `ASCEND_RT_VISIBLE_DEVICES` 而非 `CUDA_VISIBLE_DEVICES` 来指定运算设备。
 | 
			
		||||
 | 
			
		||||
@ -426,7 +427,10 @@ llamafactory-cli webui
 | 
			
		||||
 | 
			
		||||
#### 使用 Docker
 | 
			
		||||
 | 
			
		||||
<details><summary>NVIDIA GPU 用户:</summary>
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
cd ./docker/docker-cuda
 | 
			
		||||
docker build -f ./Dockerfile \
 | 
			
		||||
    --build-arg INSTALL_BNB=false \
 | 
			
		||||
    --build-arg INSTALL_VLLM=false \
 | 
			
		||||
@ -435,18 +439,60 @@ docker build -f ./Dockerfile \
 | 
			
		||||
    -t llamafactory:latest .
 | 
			
		||||
 | 
			
		||||
docker run -it --gpus=all \
 | 
			
		||||
    -v ./hf_cache:/root/.cache/huggingface/ \
 | 
			
		||||
    -v ./data:/app/data \
 | 
			
		||||
    -v ./output:/app/output \
 | 
			
		||||
    -v /$(dirname $(dirname "$PWD"))/hf_cache:/root/.cache/huggingface/ \
 | 
			
		||||
    -v /$(dirname $(dirname "$PWD"))/data:/app/data \
 | 
			
		||||
    -v /$(dirname $(dirname "$PWD"))/output:/app/output \
 | 
			
		||||
    -p 7860:7860 \
 | 
			
		||||
    -p 8000:8000 \
 | 
			
		||||
    --shm-size 16G \
 | 
			
		||||
    --name llamafactory \
 | 
			
		||||
    llamafactory:latest
 | 
			
		||||
```
 | 
			
		||||
</details>
 | 
			
		||||
 | 
			
		||||
<details><summary>Ascend NPU 用户:</summary>
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
cd ./docker/docker-npu
 | 
			
		||||
docker build -f ./Dockerfile \
 | 
			
		||||
    --build-arg INSTALL_DEEPSPEED=false \
 | 
			
		||||
    --build-arg PIP_INDEX=https://pypi.org/simple \
 | 
			
		||||
    -t llamafactory:latest .
 | 
			
		||||
 | 
			
		||||
# 增加 --device 来使用多卡 NPU 或修改第一个 --device 来更改 NPU 卡
 | 
			
		||||
docker run -it \
 | 
			
		||||
    -v /$(dirname $(dirname "$PWD"))/hf_cache:/root/.cache/huggingface/ \
 | 
			
		||||
    -v /$(dirname $(dirname "$PWD"))/data:/app/data \
 | 
			
		||||
    -v /$(dirname $(dirname "$PWD"))/output:/app/output \
 | 
			
		||||
    -v /usr/local/dcmi:/usr/local/dcmi \
 | 
			
		||||
    -v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
 | 
			
		||||
    -v /usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64 \
 | 
			
		||||
    -v /usr/local/Ascend/driver/version.info:/usr/local/Ascend/driver/version.info \
 | 
			
		||||
    -v /etc/ascend_install.info:/etc/ascend_install.info \
 | 
			
		||||
    -p 7860:7860 \
 | 
			
		||||
    -p 8000:8000 \
 | 
			
		||||
    --device /dev/davinci0 \
 | 
			
		||||
    --device /dev/davinci_manager \
 | 
			
		||||
    --device /dev/devmm_svm \
 | 
			
		||||
    --device /dev/hisi_hdc \
 | 
			
		||||
    --shm-size 16G \
 | 
			
		||||
    --name llamafactory \
 | 
			
		||||
    llamafactory:latest
 | 
			
		||||
```
 | 
			
		||||
</details>
 | 
			
		||||
 | 
			
		||||
#### 使用 Docker Compose
 | 
			
		||||
 | 
			
		||||
首先进入 docker 目录:
 | 
			
		||||
```bash
 | 
			
		||||
# NVIDIA GPU 用户
 | 
			
		||||
cd ./docker/docker-cuda
 | 
			
		||||
 | 
			
		||||
# Ascend NPU 用户
 | 
			
		||||
cd ./docker/docker-npu
 | 
			
		||||
```
 | 
			
		||||
然后运行以下命令创建 docker 镜像并启动容器:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
docker-compose up -d
 | 
			
		||||
docker-compose exec llamafactory bash
 | 
			
		||||
 | 
			
		||||
@ -9,16 +9,18 @@ ARG INSTALL_DEEPSPEED=false
 | 
			
		||||
ARG PIP_INDEX=https://pypi.org/simple
 | 
			
		||||
 | 
			
		||||
# Set the working directory
 | 
			
		||||
WORKDIR /app
 | 
			
		||||
WORKDIR /app/LLaMA-Factory
 | 
			
		||||
 | 
			
		||||
RUN cd /app && \
 | 
			
		||||
    git config --global http.version HTTP/1.1 && \
 | 
			
		||||
    git clone https://github.com/hiyouga/LLaMA-Factory.git && \
 | 
			
		||||
    cd /app/LLaMA-Factory
 | 
			
		||||
 | 
			
		||||
# Install the requirements
 | 
			
		||||
COPY requirements.txt /app/
 | 
			
		||||
RUN pip config set global.index-url $PIP_INDEX
 | 
			
		||||
RUN python -m pip install --upgrade pip
 | 
			
		||||
RUN python -m pip install -r requirements.txt
 | 
			
		||||
 | 
			
		||||
# Copy the rest of the application into the image
 | 
			
		||||
COPY . /app/
 | 
			
		||||
 | 
			
		||||
# Install the LLaMA Factory
 | 
			
		||||
RUN EXTRA_PACKAGES="metrics"; \
 | 
			
		||||
@ -10,9 +10,9 @@ services:
 | 
			
		||||
        PIP_INDEX: https://pypi.org/simple
 | 
			
		||||
    container_name: llamafactory
 | 
			
		||||
    volumes:
 | 
			
		||||
      - ./hf_cache:/root/.cache/huggingface/
 | 
			
		||||
      - ./data:/app/data
 | 
			
		||||
      - ./output:/app/output
 | 
			
		||||
      - ../../hf_cache:/root/.cache/huggingface/
 | 
			
		||||
      - ../../data:/app/LLaMA-Factory/data
 | 
			
		||||
      - ../../output:/app/LLaMA-Factory/output
 | 
			
		||||
    ports:
 | 
			
		||||
      - "7860:7860"
 | 
			
		||||
      - "8000:8000"
 | 
			
		||||
							
								
								
									
										40
									
								
								docker/docker-npu/Dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								docker/docker-npu/Dockerfile
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,40 @@
 | 
			
		||||
# Using ubuntu 22.04 images with cann 8.0.rc1
 | 
			
		||||
# More options can be found at https://hub.docker.com/r/cosdt/cann/tags
 | 
			
		||||
FROM cosdt/cann:8.0.rc1-910b-ubuntu22.04
 | 
			
		||||
 | 
			
		||||
ENV DEBIAN_FRONTEND=noninteractive
 | 
			
		||||
 | 
			
		||||
# Define installation arguments
 | 
			
		||||
ARG INSTALL_DEEPSPEED=false
 | 
			
		||||
ARG PIP_INDEX=https://pypi.org/simple
 | 
			
		||||
 | 
			
		||||
# Set the working directory
 | 
			
		||||
WORKDIR /app/LLaMA-Factory
 | 
			
		||||
 | 
			
		||||
RUN cd /app && \
 | 
			
		||||
    git config --global http.version HTTP/1.1 && \
 | 
			
		||||
    git clone https://github.com/hiyouga/LLaMA-Factory.git && \
 | 
			
		||||
    cd /app/LLaMA-Factory
 | 
			
		||||
 | 
			
		||||
RUN pip config set global.index-url $PIP_INDEX
 | 
			
		||||
RUN python3 -m pip install --upgrade pip
 | 
			
		||||
 | 
			
		||||
# Install the LLaMA Factory
 | 
			
		||||
RUN EXTRA_PACKAGES="torch-npu,metrics"; \
 | 
			
		||||
    if [ "$INSTALL_DEEPSPEED" = "true" ]; then \
 | 
			
		||||
        EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \
 | 
			
		||||
    fi; \
 | 
			
		||||
    pip install -e .[$EXTRA_PACKAGES] && \
 | 
			
		||||
    pip uninstall -y transformer-engine flash-attn
 | 
			
		||||
 | 
			
		||||
# Set up volumes
 | 
			
		||||
VOLUME [ "/root/.cache/huggingface/", "/app/data", "/app/output" ]
 | 
			
		||||
 | 
			
		||||
# Expose port 7860 for the LLaMA Board
 | 
			
		||||
EXPOSE 7860
 | 
			
		||||
 | 
			
		||||
# Expose port 8000 for the API service
 | 
			
		||||
EXPOSE 8000
 | 
			
		||||
 | 
			
		||||
# Launch LLaMA Board
 | 
			
		||||
CMD [ "llamafactory-cli", "webui" ]
 | 
			
		||||
							
								
								
									
										31
									
								
								docker/docker-npu/docker-compose.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										31
									
								
								docker/docker-npu/docker-compose.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,31 @@
 | 
			
		||||
services:
 | 
			
		||||
  llamafactory:
 | 
			
		||||
    build:
 | 
			
		||||
      dockerfile: Dockerfile
 | 
			
		||||
      context: .
 | 
			
		||||
      args:
 | 
			
		||||
        INSTALL_DEEPSPEED: false
 | 
			
		||||
        PIP_INDEX: https://pypi.org/simple
 | 
			
		||||
    container_name: llamafactory
 | 
			
		||||
    volumes:
 | 
			
		||||
      - ../../hf_cache:/root/.cache/huggingface/
 | 
			
		||||
      - ../../data:/app/LLaMA-Factory/data
 | 
			
		||||
      - ../../output:/app/LLaMA-Factory/output
 | 
			
		||||
      - /usr/local/dcmi:/usr/local/dcmi
 | 
			
		||||
      - /usr/local/bin/npu-smi:/usr/local/bin/npu-smi
 | 
			
		||||
      - /usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64
 | 
			
		||||
      - /usr/local/Ascend/driver/version.info:/usr/local/Ascend/driver/version.info
 | 
			
		||||
      - /etc/ascend_install.info:/etc/ascend_install.info
 | 
			
		||||
    ports:
 | 
			
		||||
      - "7860:7860"
 | 
			
		||||
      - "8000:8000"
 | 
			
		||||
    ipc: host
 | 
			
		||||
    tty: true
 | 
			
		||||
    stdin_open: true
 | 
			
		||||
    command: bash
 | 
			
		||||
    devices:
 | 
			
		||||
      - /dev/davinci0
 | 
			
		||||
      - /dev/davinci_manager
 | 
			
		||||
      - /dev/devmm_svm
 | 
			
		||||
      - /dev/hisi_hdc
 | 
			
		||||
    restart: unless-stopped
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user