mirror of
				https://github.com/hiyouga/LLaMA-Factory.git
				synced 2025-11-04 18:02:19 +08:00 
			
		
		
		
	[data] Fix wrong position ids with packed attention masks (#7754)
Co-authored-by: hoshi-hiyouga <hiyouga@buaa.edu.cn>
This commit is contained in:
		
							parent
							
								
									0ac641326b
								
							
						
					
					
						commit
						bd7bc31c79
					
				@ -176,7 +176,7 @@ class MultiModalDataCollatorForSeq2Seq(DataCollatorForSeq2Seq):
 | 
			
		||||
                "input_ids": features["input_ids"],
 | 
			
		||||
                "image_grid_thw": mm_inputs.get("image_grid_thw"),
 | 
			
		||||
                "video_grid_thw": mm_inputs.get("video_grid_thw"),
 | 
			
		||||
                "attention_mask": features["attention_mask"],
 | 
			
		||||
                "attention_mask": (features["attention_mask"] >= 1).float(),
 | 
			
		||||
            }
 | 
			
		||||
            if "second_per_grid_ts" in mm_inputs:  # for qwen2vl
 | 
			
		||||
                rope_index_kwargs["second_per_grid_ts"] = mm_inputs.get("second_per_grid_ts")
 | 
			
		||||
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user