mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-15 03:10:35 +08:00
Update tuner.py
This commit is contained in:
@@ -65,7 +65,7 @@ def export_model(args: Optional[Dict[str, Any]] = None) -> None:
|
|||||||
raise ValueError("The model is not a `PreTrainedModel`, export aborted.")
|
raise ValueError("The model is not a `PreTrainedModel`, export aborted.")
|
||||||
|
|
||||||
if getattr(model, "quantization_method", None) is None: # cannot convert dtype of a quantized model
|
if getattr(model, "quantization_method", None) is None: # cannot convert dtype of a quantized model
|
||||||
output_dtype = torch.float16
|
output_dtype = getattr(model.config, "torch_dtype", torch.float16)
|
||||||
setattr(model.config, "torch_dtype", output_dtype)
|
setattr(model.config, "torch_dtype", output_dtype)
|
||||||
model = model.to(output_dtype)
|
model = model.to(output_dtype)
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user