mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-05 05:02:50 +08:00
Update trainer.py
Former-commit-id: 6700a1b9fa0cbd965ac45d3f2de1088727235c25
This commit is contained in:
parent
b92f690190
commit
5226c4fa97
@ -1,5 +1,6 @@
|
|||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
|
from types import MethodType
|
||||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@ -9,8 +10,7 @@ from transformers import Seq2SeqTrainer
|
|||||||
from ...extras.constants import IGNORE_INDEX
|
from ...extras.constants import IGNORE_INDEX
|
||||||
from ...extras.logging import get_logger
|
from ...extras.logging import get_logger
|
||||||
from ..utils import create_custom_optimzer, create_custom_scheduler
|
from ..utils import create_custom_optimzer, create_custom_scheduler
|
||||||
from types import MethodType
|
|
||||||
from packaging import version
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from transformers.trainer import PredictionOutput
|
from transformers.trainer import PredictionOutput
|
||||||
@ -31,6 +31,7 @@ class CustomSeq2SeqTrainer(Seq2SeqTrainer):
|
|||||||
self.finetuning_args = finetuning_args
|
self.finetuning_args = finetuning_args
|
||||||
if finetuning_args.use_badam:
|
if finetuning_args.use_badam:
|
||||||
from badam import clip_grad_norm_for_sparse_tensor
|
from badam import clip_grad_norm_for_sparse_tensor
|
||||||
|
|
||||||
self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_for_sparse_tensor, self.accelerator)
|
self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_for_sparse_tensor, self.accelerator)
|
||||||
|
|
||||||
def create_optimizer(self) -> "torch.optim.Optimizer":
|
def create_optimizer(self) -> "torch.optim.Optimizer":
|
||||||
|
Loading…
x
Reference in New Issue
Block a user