mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-23 15:20:36 +08:00
[misc] lint (#9636)
This commit is contained in:
2
.gitignore
vendored
2
.gitignore
vendored
@@ -85,7 +85,7 @@ ipython_config.py
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
.python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
|
||||
@@ -1624,7 +1624,12 @@ class Qwen3VLPlugin(Qwen2VLPlugin):
|
||||
for video, duration in zip(videos["videos"], videos["durations"])
|
||||
]
|
||||
mm_inputs.update(
|
||||
video_processor(videos=videos["videos"], video_metadata=video_metadata, fps=getattr(processor, "video_fps", 2.0), return_metadata=True)
|
||||
video_processor(
|
||||
videos=videos["videos"],
|
||||
video_metadata=video_metadata,
|
||||
fps=getattr(processor, "video_fps", 2.0),
|
||||
return_metadata=True,
|
||||
)
|
||||
)
|
||||
temporal_patch_size: int = getattr(image_processor, "temporal_patch_size", 2)
|
||||
if "second_per_grid_ts" in processor.model_input_names:
|
||||
|
||||
@@ -15,32 +15,24 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from transformers import Trainer
|
||||
from trl import DPOTrainer
|
||||
from trl.trainer import disable_dropout_in_model
|
||||
from ktransformers.sft.lora import KTrainer # type: ignore
|
||||
from typing_extensions import override
|
||||
|
||||
from ...extras.constants import IGNORE_INDEX
|
||||
from ...extras.packages import is_transformers_version_greater_than
|
||||
from ..callbacks import SaveProcessorCallback
|
||||
from ..trainer_utils import create_custom_optimizer, create_custom_scheduler, get_batch_logps, nested_detach
|
||||
from .trainer import CustomDPOTrainer as BaseDPOTrainer
|
||||
from ktransformers.sft.lora import KTrainer
|
||||
from ..trainer_utils import get_batch_logps, nested_detach
|
||||
from .trainer import CustomDPOTrainer
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from transformers import PreTrainedModel, ProcessorMixin
|
||||
|
||||
from ...hparams import FinetuningArguments
|
||||
from transformers import PreTrainedModel
|
||||
|
||||
|
||||
class CustomDPOTrainer(KTrainer, BaseDPOTrainer):
|
||||
class KDPOTrainer(KTrainer, CustomDPOTrainer):
|
||||
@override
|
||||
def concatenated_forward(
|
||||
self, model: "PreTrainedModel", batch: dict[str, "torch.Tensor"], is_ref_model: bool = False
|
||||
self, model: "PreTrainedModel", batch: dict[str, "torch.Tensor"], is_ref_model: bool = False
|
||||
) -> tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor"]:
|
||||
r"""Compute the sum log probabilities of the labels under given logits if loss_type is not IPO, ORPO or SimPO.
|
||||
|
||||
@@ -48,9 +40,8 @@ class CustomDPOTrainer(KTrainer, BaseDPOTrainer):
|
||||
"""
|
||||
if self.finetuning_args.use_ref_model:
|
||||
batch = nested_detach(batch, clone=True) # avoid error
|
||||
labels = batch["labels"]
|
||||
# dpo not need compute loss in forward, waste mem
|
||||
del batch["labels"]
|
||||
|
||||
labels = batch.pop("labels") # dpo do not need compute loss in forward
|
||||
all_logits: torch.Tensor = model(**batch, return_dict=True, use_cache=False).logits.to(torch.float32)
|
||||
all_logits = all_logits.to("cpu")
|
||||
labels = labels.to(all_logits.device)
|
||||
|
||||
@@ -218,9 +218,10 @@ class CustomDPOTrainer(DPOTrainer):
|
||||
if self.finetuning_args.use_ref_model:
|
||||
batch = nested_detach(batch, clone=True) # avoid error
|
||||
|
||||
labels = batch.pop("labels") # dpo do not need compute loss in forward
|
||||
all_logits: torch.Tensor = model(**batch, return_dict=True, use_cache=False).logits.to(torch.float32)
|
||||
all_logps, valid_length = get_batch_logps(
|
||||
logits=all_logits, labels=batch["labels"], ld_alpha=(self.ld_alpha if not is_ref_model else None)
|
||||
logits=all_logits, labels=labels, ld_alpha=(self.ld_alpha if not is_ref_model else None)
|
||||
)
|
||||
if self.loss_type in ["ipo", "orpo", "simpo"]:
|
||||
all_logps = all_logps / valid_length
|
||||
|
||||
@@ -62,13 +62,13 @@ def run_dpo(
|
||||
else:
|
||||
ref_model = None
|
||||
|
||||
|
||||
if model_args.use_kt:
|
||||
from ktransformers.util.globals import GLOBAL_CONFIG
|
||||
from ktransformers.util.globals import GLOBAL_CONFIG # type: ignore
|
||||
|
||||
from .ktrainer import KDPOTrainer as CustomDPOTrainer
|
||||
|
||||
GLOBAL_CONFIG._config["mod"] = "sft"
|
||||
|
||||
from .ktrainer import CustomDPOTrainer
|
||||
else:
|
||||
from .trainer import CustomDPOTrainer
|
||||
|
||||
|
||||
@@ -99,8 +99,8 @@ def run_sft(
|
||||
|
||||
# Initialize our Trainer
|
||||
if model_args.use_kt:
|
||||
from ktransformers.util.globals import GLOBAL_CONFIG
|
||||
from ktransformers.sft.lora import KTrainer
|
||||
from ktransformers.sft.lora import KTrainer # type: ignore
|
||||
from ktransformers.util.globals import GLOBAL_CONFIG # type: ignore
|
||||
|
||||
GLOBAL_CONFIG._config["mod"] = "sft"
|
||||
|
||||
|
||||
Reference in New Issue
Block a user