mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	drm/format-helper: Optimize 32-to-16-bpp conversion
For ease of implementation, existing line-conversion functions for 16-bit formats write each pixel individually. Optimize the performance by writing multiple pixels in single 64-bit and 32-bit stores. v2: - simplify address calculation (Jani) - fix typo in commit message (Jocelyn) Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de> Reviewed-by: Jocelyn Falempe <jfalempe@redhat.com> Link: https://lore.kernel.org/r/20250328141709.217283-8-tzimmermann@suse.de
This commit is contained in:
		
							parent
							
								
									58523a25cb
								
							
						
					
					
						commit
						3f31a017dd
					
				
					 1 changed files with 36 additions and 1 deletions
				
			
		| 
						 | 
				
			
			@ -265,10 +265,45 @@ static __always_inline void drm_fb_xfrm_line_32to16(void *dbuf, const void *sbuf
 | 
			
		|||
						    unsigned int pixels,
 | 
			
		||||
						    u32 (*xfrm_pixel)(u32))
 | 
			
		||||
{
 | 
			
		||||
	__le16 *dbuf16 = dbuf;
 | 
			
		||||
	__le64 *dbuf64 = dbuf;
 | 
			
		||||
	__le32 *dbuf32;
 | 
			
		||||
	__le16 *dbuf16;
 | 
			
		||||
	const __le32 *sbuf32 = sbuf;
 | 
			
		||||
	const __le32 *send32 = sbuf32 + pixels;
 | 
			
		||||
 | 
			
		||||
#if defined(CONFIG_64BIT)
 | 
			
		||||
	/* write 4 pixels at once */
 | 
			
		||||
	while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 4)) {
 | 
			
		||||
		u32 pix[4] = {
 | 
			
		||||
			le32_to_cpup(sbuf32++),
 | 
			
		||||
			le32_to_cpup(sbuf32++),
 | 
			
		||||
			le32_to_cpup(sbuf32++),
 | 
			
		||||
			le32_to_cpup(sbuf32++),
 | 
			
		||||
		};
 | 
			
		||||
		/* write output bytes in reverse order for little endianness */
 | 
			
		||||
		u64 val64 = ((u64)xfrm_pixel(pix[0])) |
 | 
			
		||||
			    ((u64)xfrm_pixel(pix[1]) << 16) |
 | 
			
		||||
			    ((u64)xfrm_pixel(pix[2]) << 32) |
 | 
			
		||||
			    ((u64)xfrm_pixel(pix[3]) << 48);
 | 
			
		||||
		*dbuf64++ = cpu_to_le64(val64);
 | 
			
		||||
	}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
	/* write 2 pixels at once */
 | 
			
		||||
	dbuf32 = (__le32 __force *)dbuf64;
 | 
			
		||||
	while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 2)) {
 | 
			
		||||
		u32 pix[2] = {
 | 
			
		||||
			le32_to_cpup(sbuf32++),
 | 
			
		||||
			le32_to_cpup(sbuf32++),
 | 
			
		||||
		};
 | 
			
		||||
		/* write output bytes in reverse order for little endianness */
 | 
			
		||||
		u32 val32 = xfrm_pixel(pix[0]) |
 | 
			
		||||
			   (xfrm_pixel(pix[1]) << 16);
 | 
			
		||||
		*dbuf32++ = cpu_to_le32(val32);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* write trailing pixel */
 | 
			
		||||
	dbuf16 = (__le16 __force *)dbuf32;
 | 
			
		||||
	while (sbuf32 < send32)
 | 
			
		||||
		*dbuf16++ = cpu_to_le16(xfrm_pixel(le32_to_cpup(sbuf32++)));
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue