mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-14 10:56:56 +08:00
@@ -1,4 +1,5 @@
|
||||
#!/bin/bash
|
||||
# DO NOT use quantized model or quantization_bit when merging lora weights
|
||||
|
||||
CUDA_VISIBLE_DEVICES= python ../../src/export_model.py \
|
||||
--model_name_or_path meta-llama/Llama-2-7b-hf \
|
||||
|
||||
Reference in New Issue
Block a user