mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-01 11:12:50 +08:00
[assets] update wechat (#8385)
This commit is contained in:
parent
1cfe42916d
commit
bb84c3c83e
@ -5,7 +5,7 @@
|
||||
[](https://github.com/hiyouga/LLaMA-Factory/graphs/contributors)
|
||||
[](https://github.com/hiyouga/LLaMA-Factory/actions/workflows/tests.yml)
|
||||
[](https://pypi.org/project/llamafactory/)
|
||||
[](https://scholar.google.com/scholar?cites=12620864006390196564)
|
||||
[](https://scholar.google.com/scholar?cites=12620864006390196564)
|
||||
[](https://hub.docker.com/r/hiyouga/llamafactory/tags)
|
||||
|
||||
[](https://twitter.com/llamafactory_ai)
|
||||
@ -55,7 +55,7 @@ Choose your path:
|
||||
- **Colab (free)**: https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing
|
||||
- **Local machine**: Please refer to [usage](#getting-started)
|
||||
- **PAI-DSW (free trial)**: https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory
|
||||
- **Alaya NeW (cloud GPU deal)**: https://docs.alayanew.com/docs/documents/newActivities/llamafactory/?utm_source=LLaMA-Factory
|
||||
- **Alaya NeW (cloud GPU deal)**: https://docs.alayanew.com/docs/documents/useGuide/LLaMAFactory/mutiple/?utm_source=LLaMA-Factory
|
||||
|
||||
> [!NOTE]
|
||||
> Except for the above links, all other websites are unauthorized third-party websites. Please carefully use them.
|
||||
|
@ -5,7 +5,7 @@
|
||||
[](https://github.com/hiyouga/LLaMA-Factory/graphs/contributors)
|
||||
[](https://github.com/hiyouga/LLaMA-Factory/actions/workflows/tests.yml)
|
||||
[](https://pypi.org/project/llamafactory/)
|
||||
[](https://scholar.google.com/scholar?cites=12620864006390196564)
|
||||
[](https://scholar.google.com/scholar?cites=12620864006390196564)
|
||||
[](https://hub.docker.com/r/hiyouga/llamafactory/tags)
|
||||
|
||||
[](https://twitter.com/llamafactory_ai)
|
||||
@ -41,7 +41,7 @@
|
||||
|
||||
</div>
|
||||
|
||||
👋 加入我们的[微信群](assets/wechat.jpg)、[NPU 用户群](assets/wechat_npu.jpg)或 [Alaya NeW 算力优惠群](assets/wechat_alaya.png)。
|
||||
👋 加入我们的[微信群](assets/wechat.jpg)、[NPU 用户群](assets/wechat_npu.jpg)或 [九章智算云算力优惠群](assets/wechat_alaya.png)。
|
||||
|
||||
\[ [English](README.md) | 中文 \]
|
||||
|
||||
@ -57,7 +57,7 @@ https://github.com/user-attachments/assets/43b700c6-a178-41db-b1f8-8190a5d3fcfc
|
||||
- **Colab(免费)**:https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing
|
||||
- **本地机器**:请见[如何使用](#如何使用)
|
||||
- **PAI-DSW(免费试用)**:https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory
|
||||
- **Alaya NeW(算力优惠活动)**:https://docs.alayanew.com/docs/documents/newActivities/llamafactory/?utm_source=LLaMA-Factory
|
||||
- **九章智算云(算力优惠活动)**:https://docs.alayanew.com/docs/documents/useGuide/LLaMAFactory/mutiple/?utm_source=LLaMA-Factory
|
||||
|
||||
> [!NOTE]
|
||||
> 除上述链接以外的其他网站均为未经许可的第三方网站,请小心甄别。
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 169 KiB After Width: | Height: | Size: 168 KiB |
Binary file not shown.
Before Width: | Height: | Size: 167 KiB After Width: | Height: | Size: 168 KiB |
@ -204,7 +204,12 @@ class RLHFArguments:
|
||||
)
|
||||
ld_alpha: Optional[float] = field(
|
||||
default=None,
|
||||
metadata={"help": "α parameter from the LD-DPO paper, which controls the weighting of the verbose token log-probabilities in responses"},
|
||||
metadata={
|
||||
"help": (
|
||||
"Alpha parameter from the LD-DPO paper, which controls the weighting of"
|
||||
" the verbose token log-probabilities in responses."
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
|
@ -188,8 +188,9 @@ class CustomDPOTrainer(DPOTrainer):
|
||||
batch = nested_detach(batch, clone=True) # avoid error
|
||||
|
||||
all_logits: torch.Tensor = model(**batch, return_dict=True, use_cache=False).logits.to(torch.float32)
|
||||
all_logps, valid_length = get_batch_logps(logits=all_logits, labels=batch["labels"],
|
||||
ld_alpha=(self.ld_alpha if not is_ref_model else None))
|
||||
all_logps, valid_length = get_batch_logps(
|
||||
logits=all_logits, labels=batch["labels"], ld_alpha=(self.ld_alpha if not is_ref_model else None)
|
||||
)
|
||||
if self.loss_type in ["ipo", "orpo", "simpo"]:
|
||||
all_logps = all_logps / valid_length
|
||||
|
||||
@ -219,8 +220,9 @@ class CustomDPOTrainer(DPOTrainer):
|
||||
ref_context = nullcontext()
|
||||
|
||||
with torch.no_grad(), ref_context:
|
||||
reference_chosen_logps, reference_rejected_logps, *_ = self.concatenated_forward(ref_model, batch,
|
||||
is_ref_model=True)
|
||||
reference_chosen_logps, reference_rejected_logps, *_ = self.concatenated_forward(
|
||||
ref_model, batch, is_ref_model=True
|
||||
)
|
||||
|
||||
return reference_chosen_logps, reference_rejected_logps
|
||||
|
||||
|
@ -585,7 +585,10 @@ def create_custom_scheduler(
|
||||
|
||||
|
||||
def get_batch_logps(
|
||||
logits: "torch.Tensor", labels: "torch.Tensor", label_pad_token_id: int = IGNORE_INDEX, ld_alpha: Optional[float] = None
|
||||
logits: "torch.Tensor",
|
||||
labels: "torch.Tensor",
|
||||
label_pad_token_id: int = IGNORE_INDEX,
|
||||
ld_alpha: Optional[float] = None,
|
||||
) -> tuple["torch.Tensor", "torch.Tensor"]:
|
||||
r"""Compute the log probabilities of the given labels under the given logits.
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user