mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-10-15 16:18:10 +08:00
Update trainer.py
Former-commit-id: c6163be1444c00dd000f288e2f834968bd932981
This commit is contained in:
parent
c7c216069c
commit
5978427ae0
@ -1,5 +1,6 @@
|
||||
import json
|
||||
import os
|
||||
from types import MethodType
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
@ -9,8 +10,7 @@ from transformers import Seq2SeqTrainer
|
||||
from ...extras.constants import IGNORE_INDEX
|
||||
from ...extras.logging import get_logger
|
||||
from ..utils import create_custom_optimzer, create_custom_scheduler
|
||||
from types import MethodType
|
||||
from packaging import version
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from transformers.trainer import PredictionOutput
|
||||
@ -31,6 +31,7 @@ class CustomSeq2SeqTrainer(Seq2SeqTrainer):
|
||||
self.finetuning_args = finetuning_args
|
||||
if finetuning_args.use_badam:
|
||||
from badam import clip_grad_norm_for_sparse_tensor
|
||||
|
||||
self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_for_sparse_tensor, self.accelerator)
|
||||
|
||||
def create_optimizer(self) -> "torch.optim.Optimizer":
|
||||
|
Loading…
x
Reference in New Issue
Block a user