mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-03 20:22:49 +08:00
This pull request increases the shm_size parameter in docker-compose.yml to 16GB. The goal is to enhance the LLaMA-Factory framework’s performance for large model fine-tuning tasks by providing sufficient shared memory for efficient data loading and parallel processing. This PR also addresses the issues discussed in [this comment](https://github.com/hiyouga/LLaMA-Factory/issues/4316#issuecomment-2466270708) regarding Shared Memory Limit error. Former-commit-id: 64414905a3728abf3c51968177ffc42cfc653310
38 lines
944 B
YAML
38 lines
944 B
YAML
services:
|
|
llamafactory:
|
|
build:
|
|
dockerfile: ./docker/docker-cuda/Dockerfile
|
|
context: ../..
|
|
args:
|
|
INSTALL_BNB: false
|
|
INSTALL_VLLM: false
|
|
INSTALL_DEEPSPEED: false
|
|
INSTALL_FLASHATTN: false
|
|
INSTALL_LIGER_KERNEL: false
|
|
INSTALL_HQQ: false
|
|
INSTALL_EETQ: false
|
|
PIP_INDEX: https://pypi.org/simple
|
|
container_name: llamafactory
|
|
volumes:
|
|
- ../../hf_cache:/root/.cache/huggingface
|
|
- ../../ms_cache:/root/.cache/modelscope
|
|
- ../../om_cache:/root/.cache/openmind
|
|
- ../../data:/app/data
|
|
- ../../output:/app/output
|
|
ports:
|
|
- "7860:7860"
|
|
- "8000:8000"
|
|
ipc: host
|
|
tty: true
|
|
shm_size: '16gb'
|
|
stdin_open: true
|
|
command: bash
|
|
deploy:
|
|
resources:
|
|
reservations:
|
|
devices:
|
|
- driver: nvidia
|
|
count: "all"
|
|
capabilities: [gpu]
|
|
restart: unless-stopped
|