hiyouga 289d1f3679 update webui and add CLIs
Former-commit-id: 245fe47ece22a4b7822449b126715aaa8ec25aba
2024-05-03 02:58:23 +08:00

13 lines
430 B
Bash

#!/bin/bash
# DO NOT use quantized model or quantization_bit when merging lora weights
CUDA_VISIBLE_DEVICES=0 llamafactory-cli export \
--model_name_or_path meta-llama/Llama-2-7b-hf \
--adapter_name_or_path ../../saves/LLaMA2-7B/lora/sft \
--template default \
--finetuning_type lora \
--export_dir ../../models/llama2-7b-sft \
--export_size 2 \
--export_device cpu \
--export_legacy_format False