mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-07-31 10:42:50 +08:00
[assets] fix docker images (#8203)
This commit is contained in:
parent
73b12baaaf
commit
a4048b7bb6
@ -3,12 +3,12 @@
|
|||||||
.github
|
.github
|
||||||
.venv
|
.venv
|
||||||
cache
|
cache
|
||||||
data
|
|
||||||
docker
|
docker
|
||||||
saves
|
saves
|
||||||
hf_cache
|
hf_cache
|
||||||
ms_cache
|
ms_cache
|
||||||
om_cache
|
om_cache
|
||||||
|
shared_data
|
||||||
output
|
output
|
||||||
.dockerignore
|
.dockerignore
|
||||||
.gitattributes
|
.gitattributes
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
name: push_docker
|
name: docker
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
@ -32,7 +32,15 @@ jobs:
|
|||||||
url: https://hub.docker.com/r/hiyouga/llamafactory
|
url: https://hub.docker.com/r/hiyouga/llamafactory
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Free up disk space
|
||||||
|
run: |
|
||||||
|
df -h
|
||||||
|
sudo rm -rf /usr/share/dotnet
|
||||||
|
sudo rm -rf /opt/ghc
|
||||||
|
sudo rm -rf /opt/hostedtoolcache
|
||||||
|
df -h
|
||||||
|
|
||||||
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
25
README.md
25
README.md
@ -474,16 +474,25 @@ huggingface-cli login
|
|||||||
> [!IMPORTANT]
|
> [!IMPORTANT]
|
||||||
> Installation is mandatory.
|
> Installation is mandatory.
|
||||||
|
|
||||||
|
#### Install from Source
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone --depth 1 https://github.com/hiyouga/LLaMA-Factory.git
|
git clone --depth 1 https://github.com/hiyouga/LLaMA-Factory.git
|
||||||
cd LLaMA-Factory
|
cd LLaMA-Factory
|
||||||
pip install -e ".[torch,metrics]" --no-build-isolation
|
pip install -e ".[torch,metrics]" --no-build-isolation
|
||||||
```
|
```
|
||||||
|
|
||||||
Extra dependencies available: torch, torch-npu, metrics, deepspeed, liger-kernel, bitsandbytes, hqq, eetq, gptq, aqlm, vllm, sglang, galore, apollo, badam, adam-mini, qwen, minicpm_v, modelscope, openmind, swanlab, quality
|
Extra dependencies available: torch, torch-npu, metrics, deepspeed, liger-kernel, bitsandbytes, hqq, eetq, gptq, aqlm, vllm, sglang, galore, apollo, badam, adam-mini, qwen, minicpm_v, modelscope, openmind, swanlab, dev
|
||||||
|
|
||||||
> [!TIP]
|
#### Install from Docker Image
|
||||||
> Use `pip install -e . --no-deps --no-build-isolation` to resolve package conflicts.
|
|
||||||
|
```bash
|
||||||
|
docker run -it --rm --gpus=all --ipc=host hiyouga/llamafactory:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
Find the pre-built images: https://hub.docker.com/r/hiyouga/llamafactory/tags
|
||||||
|
|
||||||
|
Please refer to [build docker](#build-docker) to build the image yourself.
|
||||||
|
|
||||||
<details><summary>Setting up a virtual environment with <b>uv</b></summary>
|
<details><summary>Setting up a virtual environment with <b>uv</b></summary>
|
||||||
|
|
||||||
@ -671,7 +680,7 @@ docker run -dit --ipc=host --gpus=all \
|
|||||||
-v ./hf_cache:/root/.cache/huggingface \
|
-v ./hf_cache:/root/.cache/huggingface \
|
||||||
-v ./ms_cache:/root/.cache/modelscope \
|
-v ./ms_cache:/root/.cache/modelscope \
|
||||||
-v ./om_cache:/root/.cache/openmind \
|
-v ./om_cache:/root/.cache/openmind \
|
||||||
-v ./data:/app/data \
|
-v ./shared_data:/app/shared_data \
|
||||||
-v ./output:/app/output \
|
-v ./output:/app/output \
|
||||||
-p 7860:7860 \
|
-p 7860:7860 \
|
||||||
-p 8000:8000 \
|
-p 8000:8000 \
|
||||||
@ -686,14 +695,14 @@ For Ascend NPU users:
|
|||||||
```bash
|
```bash
|
||||||
docker build -f ./docker/docker-npu/Dockerfile \
|
docker build -f ./docker/docker-npu/Dockerfile \
|
||||||
--build-arg PIP_INDEX=https://pypi.org/simple \
|
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||||
--build-arg EXTRAS=metrics \
|
--build-arg EXTRAS=torch-npu,metrics \
|
||||||
-t llamafactory:latest .
|
-t llamafactory:latest .
|
||||||
|
|
||||||
docker run -dit --ipc=host \
|
docker run -dit --ipc=host \
|
||||||
-v ./hf_cache:/root/.cache/huggingface \
|
-v ./hf_cache:/root/.cache/huggingface \
|
||||||
-v ./ms_cache:/root/.cache/modelscope \
|
-v ./ms_cache:/root/.cache/modelscope \
|
||||||
-v ./om_cache:/root/.cache/openmind \
|
-v ./om_cache:/root/.cache/openmind \
|
||||||
-v ./data:/app/data \
|
-v ./shared_data:/app/shared_data \
|
||||||
-v ./output:/app/output \
|
-v ./output:/app/output \
|
||||||
-v /usr/local/dcmi:/usr/local/dcmi \
|
-v /usr/local/dcmi:/usr/local/dcmi \
|
||||||
-v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
|
-v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
|
||||||
@ -723,7 +732,7 @@ docker run -dit --ipc=host \
|
|||||||
-v ./hf_cache:/root/.cache/huggingface \
|
-v ./hf_cache:/root/.cache/huggingface \
|
||||||
-v ./ms_cache:/root/.cache/modelscope \
|
-v ./ms_cache:/root/.cache/modelscope \
|
||||||
-v ./om_cache:/root/.cache/openmind \
|
-v ./om_cache:/root/.cache/openmind \
|
||||||
-v ./data:/app/data \
|
-v ./shared_data:/app/shared_data \
|
||||||
-v ./output:/app/output \
|
-v ./output:/app/output \
|
||||||
-p 7860:7860 \
|
-p 7860:7860 \
|
||||||
-p 8000:8000 \
|
-p 8000:8000 \
|
||||||
@ -742,7 +751,7 @@ docker exec -it llamafactory bash
|
|||||||
- `hf_cache`: Utilize Hugging Face cache on the host machine. Reassignable if a cache already exists in a different directory.
|
- `hf_cache`: Utilize Hugging Face cache on the host machine. Reassignable if a cache already exists in a different directory.
|
||||||
- `ms_cache`: Similar to Hugging Face cache but for ModelScope users.
|
- `ms_cache`: Similar to Hugging Face cache but for ModelScope users.
|
||||||
- `om_cache`: Similar to Hugging Face cache but for Modelers users.
|
- `om_cache`: Similar to Hugging Face cache but for Modelers users.
|
||||||
- `data`: Place datasets on this dir of the host machine so that they can be selected on LLaMA Board GUI.
|
- `shared_data`: Place datasets on this dir of the host machine so that they can be selected on LLaMA Board GUI.
|
||||||
- `output`: Set export dir to this location so that the merged result can be accessed directly on the host machine.
|
- `output`: Set export dir to this location so that the merged result can be accessed directly on the host machine.
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
25
README_zh.md
25
README_zh.md
@ -476,16 +476,25 @@ huggingface-cli login
|
|||||||
> [!IMPORTANT]
|
> [!IMPORTANT]
|
||||||
> 此步骤为必需。
|
> 此步骤为必需。
|
||||||
|
|
||||||
|
#### 从源码安装
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone --depth 1 https://github.com/hiyouga/LLaMA-Factory.git
|
git clone --depth 1 https://github.com/hiyouga/LLaMA-Factory.git
|
||||||
cd LLaMA-Factory
|
cd LLaMA-Factory
|
||||||
pip install -e ".[torch,metrics]" --no-build-isolation
|
pip install -e ".[torch,metrics]" --no-build-isolation
|
||||||
```
|
```
|
||||||
|
|
||||||
可选的额外依赖项:torch、torch-npu、metrics、deepspeed、liger-kernel、bitsandbytes、hqq、eetq、gptq、aqlm、vllm、sglang、galore、apollo、badam、adam-mini、qwen、minicpm_v、modelscope、openmind、swanlab、quality
|
可选的额外依赖项:torch、torch-npu、metrics、deepspeed、liger-kernel、bitsandbytes、hqq、eetq、gptq、aqlm、vllm、sglang、galore、apollo、badam、adam-mini、qwen、minicpm_v、modelscope、openmind、swanlab、dev
|
||||||
|
|
||||||
> [!TIP]
|
#### 从镜像安装
|
||||||
> 遇到包冲突时,可使用 `pip install -e . --no-deps --no-build-isolation` 解决。
|
|
||||||
|
```bash
|
||||||
|
docker run -it --rm --gpus=all --ipc=host hiyouga/llamafactory:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
查看全部镜像:https://hub.docker.com/r/hiyouga/llamafactory/tags
|
||||||
|
|
||||||
|
请参阅[构建 Docker](#构建-docker) 来重新构建镜像。
|
||||||
|
|
||||||
<details><summary>使用 <b>uv</b> 构建虚拟环境</summary>
|
<details><summary>使用 <b>uv</b> 构建虚拟环境</summary>
|
||||||
|
|
||||||
@ -673,7 +682,7 @@ docker run -dit --ipc=host --gpus=all \
|
|||||||
-v ./hf_cache:/root/.cache/huggingface \
|
-v ./hf_cache:/root/.cache/huggingface \
|
||||||
-v ./ms_cache:/root/.cache/modelscope \
|
-v ./ms_cache:/root/.cache/modelscope \
|
||||||
-v ./om_cache:/root/.cache/openmind \
|
-v ./om_cache:/root/.cache/openmind \
|
||||||
-v ./data:/app/data \
|
-v ./shared_data:/app/shared_data \
|
||||||
-v ./output:/app/output \
|
-v ./output:/app/output \
|
||||||
-p 7860:7860 \
|
-p 7860:7860 \
|
||||||
-p 8000:8000 \
|
-p 8000:8000 \
|
||||||
@ -688,14 +697,14 @@ docker exec -it llamafactory bash
|
|||||||
```bash
|
```bash
|
||||||
docker build -f ./docker/docker-npu/Dockerfile \
|
docker build -f ./docker/docker-npu/Dockerfile \
|
||||||
--build-arg PIP_INDEX=https://pypi.org/simple \
|
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||||
--build-arg EXTRAS=metrics \
|
--build-arg EXTRAS=torch-npu,metrics \
|
||||||
-t llamafactory:latest .
|
-t llamafactory:latest .
|
||||||
|
|
||||||
docker run -dit --ipc=host \
|
docker run -dit --ipc=host \
|
||||||
-v ./hf_cache:/root/.cache/huggingface \
|
-v ./hf_cache:/root/.cache/huggingface \
|
||||||
-v ./ms_cache:/root/.cache/modelscope \
|
-v ./ms_cache:/root/.cache/modelscope \
|
||||||
-v ./om_cache:/root/.cache/openmind \
|
-v ./om_cache:/root/.cache/openmind \
|
||||||
-v ./data:/app/data \
|
-v ./shared_data:/app/shared_data \
|
||||||
-v ./output:/app/output \
|
-v ./output:/app/output \
|
||||||
-v /usr/local/dcmi:/usr/local/dcmi \
|
-v /usr/local/dcmi:/usr/local/dcmi \
|
||||||
-v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
|
-v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
|
||||||
@ -725,7 +734,7 @@ docker run -dit --ipc=host \
|
|||||||
-v ./hf_cache:/root/.cache/huggingface \
|
-v ./hf_cache:/root/.cache/huggingface \
|
||||||
-v ./ms_cache:/root/.cache/modelscope \
|
-v ./ms_cache:/root/.cache/modelscope \
|
||||||
-v ./om_cache:/root/.cache/openmind \
|
-v ./om_cache:/root/.cache/openmind \
|
||||||
-v ./data:/app/data \
|
-v ./shared_data:/app/shared_data \
|
||||||
-v ./output:/app/output \
|
-v ./output:/app/output \
|
||||||
-p 7860:7860 \
|
-p 7860:7860 \
|
||||||
-p 8000:8000 \
|
-p 8000:8000 \
|
||||||
@ -744,7 +753,7 @@ docker exec -it llamafactory bash
|
|||||||
- `hf_cache`:使用宿主机的 Hugging Face 缓存文件夹,允许更改为新的目录。
|
- `hf_cache`:使用宿主机的 Hugging Face 缓存文件夹,允许更改为新的目录。
|
||||||
- `ms_cache`:类似 Hugging Face 缓存文件夹,为 ModelScope 用户提供。
|
- `ms_cache`:类似 Hugging Face 缓存文件夹,为 ModelScope 用户提供。
|
||||||
- `om_cache`:类似 Hugging Face 缓存文件夹,为 Modelers 用户提供。
|
- `om_cache`:类似 Hugging Face 缓存文件夹,为 Modelers 用户提供。
|
||||||
- `data`:宿主机中存放数据集的文件夹路径。
|
- `shared_data`:宿主机中存放数据集的文件夹路径。
|
||||||
- `output`:将导出目录设置为该路径后,即可在宿主机中访问导出后的模型。
|
- `output`:将导出目录设置为该路径后,即可在宿主机中访问导出后的模型。
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# https://hub.docker.com/r/hiyouga/pytorch/tags
|
# https://hub.docker.com/r/hiyouga/pytorch/tags
|
||||||
ARG BASE_IMAGE=hiyouga/pytorch:th2.6.0-cu124-flashattn2.7.4-cxx11abi0
|
ARG BASE_IMAGE=hiyouga/pytorch:th2.6.0-cu124-flashattn2.7.4-cxx11abi0-devel
|
||||||
FROM ${BASE_IMAGE}
|
FROM ${BASE_IMAGE}
|
||||||
|
|
||||||
# Installation arguments
|
# Installation arguments
|
||||||
@ -47,7 +47,7 @@ RUN if [ "${INSTALL_FLASHATTN}" == "true" ]; then \
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Set up volumes
|
# Set up volumes
|
||||||
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/root/.cache/openmind", "/app/data", "/app/output" ]
|
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/root/.cache/openmind", "/app/shared_data", "/app/output" ]
|
||||||
|
|
||||||
# Expose port 7860 for LLaMA Board
|
# Expose port 7860 for LLaMA Board
|
||||||
ENV GRADIO_SERVER_PORT=7860
|
ENV GRADIO_SERVER_PORT=7860
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Start from the pytorch official image (ubuntu-22.04 + cuda-12.4.1 + python-3.11)
|
# Start from the pytorch official image (ubuntu-22.04 + cuda-12.4.1 + python-3.11)
|
||||||
# https://hub.docker.com/r/pytorch/pytorch/tags
|
# https://hub.docker.com/r/pytorch/pytorch/tags
|
||||||
FROM pytorch/pytorch:2.6.0-cuda12.4-cudnn9-runtime
|
FROM pytorch/pytorch:2.6.0-cuda12.4-cudnn9-devel
|
||||||
|
|
||||||
# Define environments
|
# Define environments
|
||||||
ENV MAX_JOBS=16
|
ENV MAX_JOBS=16
|
||||||
|
@ -11,7 +11,7 @@ services:
|
|||||||
- ../../hf_cache:/root/.cache/huggingface
|
- ../../hf_cache:/root/.cache/huggingface
|
||||||
- ../../ms_cache:/root/.cache/modelscope
|
- ../../ms_cache:/root/.cache/modelscope
|
||||||
- ../../om_cache:/root/.cache/openmind
|
- ../../om_cache:/root/.cache/openmind
|
||||||
- ../../data:/app/data
|
- ../../shared_data:/app/shared_data
|
||||||
- ../../output:/app/output
|
- ../../output:/app/output
|
||||||
ports:
|
ports:
|
||||||
- "7860:7860"
|
- "7860:7860"
|
||||||
|
@ -4,7 +4,7 @@ FROM ${BASE_IMAGE}
|
|||||||
|
|
||||||
# Installation arguments
|
# Installation arguments
|
||||||
ARG PIP_INDEX=https://pypi.org/simple
|
ARG PIP_INDEX=https://pypi.org/simple
|
||||||
ARG EXTRAS=metrics
|
ARG EXTRAS=torch-npu,metrics
|
||||||
ARG HTTP_PROXY=""
|
ARG HTTP_PROXY=""
|
||||||
|
|
||||||
# Define environments
|
# Define environments
|
||||||
@ -39,7 +39,7 @@ COPY . /app
|
|||||||
RUN pip install --no-cache-dir -e ".[${EXTRAS}]" --no-build-isolation
|
RUN pip install --no-cache-dir -e ".[${EXTRAS}]" --no-build-isolation
|
||||||
|
|
||||||
# Set up volumes
|
# Set up volumes
|
||||||
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/root/.cache/openmind", "/app/data", "/app/output" ]
|
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/root/.cache/openmind", "/app/shared_data", "/app/output" ]
|
||||||
|
|
||||||
# Expose port 7860 for LLaMA Board
|
# Expose port 7860 for LLaMA Board
|
||||||
ENV GRADIO_SERVER_PORT=7860
|
ENV GRADIO_SERVER_PORT=7860
|
||||||
|
@ -5,13 +5,13 @@ services:
|
|||||||
context: ../..
|
context: ../..
|
||||||
args:
|
args:
|
||||||
PIP_INDEX: https://pypi.org/simple
|
PIP_INDEX: https://pypi.org/simple
|
||||||
EXTRAS: metrics
|
EXTRAS: torch-npu,metrics
|
||||||
container_name: llamafactory
|
container_name: llamafactory
|
||||||
volumes:
|
volumes:
|
||||||
- ../../hf_cache:/root/.cache/huggingface
|
- ../../hf_cache:/root/.cache/huggingface
|
||||||
- ../../ms_cache:/root/.cache/modelscope
|
- ../../ms_cache:/root/.cache/modelscope
|
||||||
- ../../om_cache:/root/.cache/openmind
|
- ../../om_cache:/root/.cache/openmind
|
||||||
- ../../data:/app/data
|
- ../../shared_data:/app/shared_data
|
||||||
- ../../output:/app/output
|
- ../../output:/app/output
|
||||||
- /usr/local/dcmi:/usr/local/dcmi
|
- /usr/local/dcmi:/usr/local/dcmi
|
||||||
- /usr/local/bin/npu-smi:/usr/local/bin/npu-smi
|
- /usr/local/bin/npu-smi:/usr/local/bin/npu-smi
|
||||||
|
@ -52,7 +52,7 @@ RUN if [ "${INSTALL_FLASHATTN}" == "true" ]; then \
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Set up volumes
|
# Set up volumes
|
||||||
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/root/.cache/openmind", "/app/data", "/app/output" ]
|
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/root/.cache/openmind", "/app/shared_data", "/app/output" ]
|
||||||
|
|
||||||
# Expose port 7860 for LLaMA Board
|
# Expose port 7860 for LLaMA Board
|
||||||
ENV GRADIO_SERVER_PORT=7860
|
ENV GRADIO_SERVER_PORT=7860
|
||||||
|
@ -11,7 +11,7 @@ services:
|
|||||||
- ../../hf_cache:/root/.cache/huggingface
|
- ../../hf_cache:/root/.cache/huggingface
|
||||||
- ../../ms_cache:/root/.cache/modelscope
|
- ../../ms_cache:/root/.cache/modelscope
|
||||||
- ../../om_cache:/root/.cache/openmind
|
- ../../om_cache:/root/.cache/openmind
|
||||||
- ../../data:/app/data
|
- ../../shared_data:/app/shared_data
|
||||||
- ../../output:/app/output
|
- ../../output:/app/output
|
||||||
ports:
|
ports:
|
||||||
- "7860:7860"
|
- "7860:7860"
|
||||||
|
@ -92,12 +92,4 @@ conflicts = [
|
|||||||
{ extra = "torch-npu" },
|
{ extra = "torch-npu" },
|
||||||
{ extra = "sglang" },
|
{ extra = "sglang" },
|
||||||
],
|
],
|
||||||
[
|
|
||||||
{ extra = "vllm" },
|
|
||||||
{ extra = "sglang" },
|
|
||||||
],
|
|
||||||
[
|
|
||||||
{ extra = "sglang" },
|
|
||||||
{ extra = "minicpm_v" },
|
|
||||||
],
|
|
||||||
]
|
]
|
||||||
|
4
setup.py
4
setup.py
@ -45,7 +45,7 @@ extra_require = {
|
|||||||
"torch": ["torch>=2.0.0", "torchvision>=0.15.0"],
|
"torch": ["torch>=2.0.0", "torchvision>=0.15.0"],
|
||||||
"torch-npu": ["torch==2.4.0", "torch-npu==2.4.0.post2", "decorator"],
|
"torch-npu": ["torch==2.4.0", "torch-npu==2.4.0.post2", "decorator"],
|
||||||
"metrics": ["nltk", "jieba", "rouge-chinese"],
|
"metrics": ["nltk", "jieba", "rouge-chinese"],
|
||||||
"deepspeed": ["deepspeed>=0.10.0,<=0.16.5"],
|
"deepspeed": ["deepspeed>=0.10.0,<=0.16.9"],
|
||||||
"liger-kernel": ["liger-kernel>=0.5.5"],
|
"liger-kernel": ["liger-kernel>=0.5.5"],
|
||||||
"bitsandbytes": ["bitsandbytes>=0.39.0"],
|
"bitsandbytes": ["bitsandbytes>=0.39.0"],
|
||||||
"hqq": ["hqq"],
|
"hqq": ["hqq"],
|
||||||
@ -58,7 +58,6 @@ extra_require = {
|
|||||||
"apollo": ["apollo-torch"],
|
"apollo": ["apollo-torch"],
|
||||||
"badam": ["badam>=1.2.1"],
|
"badam": ["badam>=1.2.1"],
|
||||||
"adam-mini": ["adam-mini"],
|
"adam-mini": ["adam-mini"],
|
||||||
"qwen": ["transformers_stream_generator"],
|
|
||||||
"minicpm_v": [
|
"minicpm_v": [
|
||||||
"soundfile",
|
"soundfile",
|
||||||
"torchvision",
|
"torchvision",
|
||||||
@ -68,7 +67,6 @@ extra_require = {
|
|||||||
"msgpack",
|
"msgpack",
|
||||||
"referencing",
|
"referencing",
|
||||||
"jsonschema_specifications",
|
"jsonschema_specifications",
|
||||||
"transformers==4.48.3",
|
|
||||||
],
|
],
|
||||||
"modelscope": ["modelscope"],
|
"modelscope": ["modelscope"],
|
||||||
"openmind": ["openmind"],
|
"openmind": ["openmind"],
|
||||||
|
Loading…
x
Reference in New Issue
Block a user