mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-10-14 23:58:11 +08:00
tiny fix
Former-commit-id: c6747a39dbbdda8decaa104499918bc7ac5f02e4
This commit is contained in:
parent
d1cda4ec68
commit
5a7cb9af4e
@ -43,7 +43,7 @@ def create_top() -> Dict[str, "Component"]:
|
||||
|
||||
with gr.Accordion(open=False) as advanced_tab:
|
||||
with gr.Row():
|
||||
quantization_bit = gr.Dropdown(choices=["none", "8", "4"], value="none", scale=1)
|
||||
quantization_bit = gr.Dropdown(choices=["none", "8", "4"], value="none", allow_custom_value=True, scale=1)
|
||||
quantization_method = gr.Dropdown(choices=["bitsandbytes", "hqq", "eetq"], value="bitsandbytes", scale=1)
|
||||
template = gr.Dropdown(choices=list(TEMPLATES.keys()), value="default", scale=1)
|
||||
rope_scaling = gr.Radio(choices=["none", "linear", "dynamic"], value="none", scale=2)
|
||||
|
@ -61,11 +61,13 @@ def can_quantize_to(quantization_method: str) -> "gr.Dropdown":
|
||||
Returns the available quantization bits.
|
||||
"""
|
||||
if quantization_method == QuantizationMethod.BITS_AND_BYTES.value:
|
||||
return gr.Dropdown(choices=["none", "8", "4"], value="none")
|
||||
available_bits = ["none", "8", "4"]
|
||||
elif quantization_method == QuantizationMethod.HQQ.value:
|
||||
return gr.Dropdown(choices=["none", "8", "6", "5", "4", "3", "2", "1"], value="none")
|
||||
available_bits = ["none", "8", "6", "5", "4", "3", "2", "1"]
|
||||
elif quantization_method == QuantizationMethod.EETQ.value:
|
||||
return gr.Dropdown(choices=["none", "8"], value="none")
|
||||
available_bits = ["none", "8"]
|
||||
|
||||
return gr.Dropdown(choices=available_bits)
|
||||
|
||||
|
||||
def change_stage(training_stage: str = list(TRAINING_STAGES.keys())[0]) -> Tuple[List[str], bool]:
|
||||
|
Loading…
x
Reference in New Issue
Block a user