services: llamafactory: build: dockerfile: ./docker/docker-cuda/Dockerfile context: ../.. args: PIP_INDEX: https://pypi.org/simple EXTRAS: metrics container_name: llamafactory ports: - "7860:7860" - "8000:8000" ipc: host tty: true # shm_size: "16gb" # ipc: host is set stdin_open: true command: bash deploy: resources: reservations: devices: - driver: nvidia count: "all" capabilities: [ gpu ] restart: unless-stopped