From 433a96b8c2442257dd3febb189e1f167a9df9e64 Mon Sep 17 00:00:00 2001 From: codemayq Date: Wed, 23 Aug 2023 10:08:17 +0800 Subject: [PATCH] fix quantization bit is "" Former-commit-id: a7cc6c4140c23f3b41985a481af69964b87e0feb --- src/llmtuner/webui/runner.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/llmtuner/webui/runner.py b/src/llmtuner/webui/runner.py index 5793001a..9e13a651 100644 --- a/src/llmtuner/webui/runner.py +++ b/src/llmtuner/webui/runner.py @@ -112,7 +112,7 @@ class Runner: overwrite_cache=True, checkpoint_dir=checkpoint_dir, finetuning_type=finetuning_type, - quantization_bit=int(quantization_bit) if quantization_bit & quantization_bit != "None" else None, + quantization_bit=int(quantization_bit) if quantization_bit and quantization_bit != "None" else None, template=template, system_prompt=system_prompt, dataset_dir=dataset_dir, @@ -196,7 +196,7 @@ class Runner: predict_with_generate=True, checkpoint_dir=checkpoint_dir, finetuning_type=finetuning_type, - quantization_bit=int(quantization_bit) if quantization_bit & quantization_bit != "None" else None, + quantization_bit=int(quantization_bit) if quantization_bit and quantization_bit != "None" else None, template=template, system_prompt=system_prompt, dataset_dir=dataset_dir,