mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-15 03:10:35 +08:00
fix version checking
This commit is contained in:
18
README.md
18
README.md
@@ -502,10 +502,13 @@ use_cpu: false
|
||||
|
||||
</details>
|
||||
|
||||
> [!TIP]
|
||||
> We commend using Accelerate for LoRA tuning.
|
||||
|
||||
#### Use DeepSpeed
|
||||
|
||||
```bash
|
||||
deepspeed --num_gpus 8 --master_port=9901 src/train_bash.py \
|
||||
deepspeed --num_gpus 8 src/train_bash.py \
|
||||
--deepspeed ds_config.json \
|
||||
... # arguments (same as above)
|
||||
```
|
||||
@@ -522,25 +525,32 @@ deepspeed --num_gpus 8 --master_port=9901 src/train_bash.py \
|
||||
"fp16": {
|
||||
"enabled": "auto",
|
||||
"loss_scale": 0,
|
||||
"initial_scale_power": 16,
|
||||
"loss_scale_window": 1000,
|
||||
"initial_scale_power": 16,
|
||||
"hysteresis": 2,
|
||||
"min_loss_scale": 1
|
||||
},
|
||||
"bf16": {
|
||||
"enabled": "auto"
|
||||
},
|
||||
"zero_optimization": {
|
||||
"stage": 2,
|
||||
"allgather_partitions": true,
|
||||
"allgather_bucket_size": 5e8,
|
||||
"overlap_comm": true,
|
||||
"reduce_scatter": true,
|
||||
"reduce_bucket_size": 5e8,
|
||||
"overlap_comm": false,
|
||||
"contiguous_gradients": true
|
||||
"contiguous_gradients": true,
|
||||
"round_robin_gradients": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
> [!TIP]
|
||||
> Refer to [examples](examples) for more training scripts.
|
||||
|
||||
### Merge LoRA weights and export model
|
||||
|
||||
```bash
|
||||
|
||||
Reference in New Issue
Block a user