LLaMA-Factory/docker/docker-cuda/docker-compose.yml
2025-05-28 22:26:05 +08:00

32 lines
794 B
YAML

services:
llamafactory:
build:
dockerfile: ./docker/docker-cuda/Dockerfile
context: ../..
args:
PIP_INDEX: https://pypi.org/simple
EXTRAS: metrics
container_name: llamafactory
volumes:
- ../../hf_cache:/root/.cache/huggingface
- ../../ms_cache:/root/.cache/modelscope
- ../../om_cache:/root/.cache/openmind
- ../../shared_data:/app/shared_data
- ../../output:/app/output
ports:
- "7860:7860"
- "8000:8000"
ipc: host
tty: true
# shm_size: "16gb" # ipc: host is set
stdin_open: true
command: bash
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: "all"
capabilities: [ gpu ]
restart: unless-stopped