mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-02 19:52:50 +08:00
1. add docker-npu (Dockerfile and docker-compose.yml) 2. move cuda docker to docker-cuda and tiny changes to adapt to the new path Former-commit-id: d7207e8ad10c7df6dcb1f5e59ff8eb06f9d77e67
41 lines
1.1 KiB
Docker
41 lines
1.1 KiB
Docker
# Using ubuntu 22.04 images with cann 8.0.rc1
|
|
# More options can be found at https://hub.docker.com/r/cosdt/cann/tags
|
|
FROM cosdt/cann:8.0.rc1-910b-ubuntu22.04
|
|
|
|
ENV DEBIAN_FRONTEND=noninteractive
|
|
|
|
# Define installation arguments
|
|
ARG INSTALL_DEEPSPEED=false
|
|
ARG PIP_INDEX=https://pypi.org/simple
|
|
|
|
# Set the working directory
|
|
WORKDIR /app/LLaMA-Factory
|
|
|
|
RUN cd /app && \
|
|
git config --global http.version HTTP/1.1 && \
|
|
git clone https://github.com/hiyouga/LLaMA-Factory.git && \
|
|
cd /app/LLaMA-Factory
|
|
|
|
RUN pip config set global.index-url $PIP_INDEX
|
|
RUN python3 -m pip install --upgrade pip
|
|
|
|
# Install the LLaMA Factory
|
|
RUN EXTRA_PACKAGES="torch-npu,metrics"; \
|
|
if [ "$INSTALL_DEEPSPEED" = "true" ]; then \
|
|
EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \
|
|
fi; \
|
|
pip install -e .[$EXTRA_PACKAGES] && \
|
|
pip uninstall -y transformer-engine flash-attn
|
|
|
|
# Set up volumes
|
|
VOLUME [ "/root/.cache/huggingface/", "/app/data", "/app/output" ]
|
|
|
|
# Expose port 7860 for the LLaMA Board
|
|
EXPOSE 7860
|
|
|
|
# Expose port 8000 for the API service
|
|
EXPOSE 8000
|
|
|
|
# Launch LLaMA Board
|
|
CMD [ "llamafactory-cli", "webui" ]
|