mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	Make the architecture-optimized CRC code do its CPU feature checks in subsys_initcalls instead of arch_initcalls. This makes it consistent with arch/*/lib/crypto/ and ensures that it runs after initcalls that possibly could be a prerequisite for kernel-mode FPU, such as x86's xfd_update_static_branch() and loongarch's init_euen_mask(). Note: as far as I can tell, x86's xfd_update_static_branch() isn't *actually* needed for kernel-mode FPU. loongarch's init_euen_mask() is needed to enable save/restore of the vector registers, but loongarch doesn't yet have any CRC or crypto code that uses vector registers anyway. Regardless, let's be consistent with arch/*/lib/crypto/ and robust against any potential future dependency on an arch_initcall. Link: https://lore.kernel.org/r/20250510035959.87995-1-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@google.com>
		
			
				
	
	
		
			50 lines
		
	
	
	
		
			1.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			50 lines
		
	
	
	
		
			1.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
// SPDX-License-Identifier: GPL-2.0-or-later
 | 
						|
/*
 | 
						|
 * CRC64 using [V]PCLMULQDQ instructions
 | 
						|
 *
 | 
						|
 * Copyright 2025 Google LLC
 | 
						|
 */
 | 
						|
 | 
						|
#include <linux/crc64.h>
 | 
						|
#include <linux/module.h>
 | 
						|
#include "crc-pclmul-template.h"
 | 
						|
 | 
						|
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pclmulqdq);
 | 
						|
 | 
						|
DECLARE_CRC_PCLMUL_FUNCS(crc64_msb, u64);
 | 
						|
DECLARE_CRC_PCLMUL_FUNCS(crc64_lsb, u64);
 | 
						|
 | 
						|
u64 crc64_be_arch(u64 crc, const u8 *p, size_t len)
 | 
						|
{
 | 
						|
	CRC_PCLMUL(crc, p, len, crc64_msb, crc64_msb_0x42f0e1eba9ea3693_consts,
 | 
						|
		   have_pclmulqdq);
 | 
						|
	return crc64_be_generic(crc, p, len);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(crc64_be_arch);
 | 
						|
 | 
						|
u64 crc64_nvme_arch(u64 crc, const u8 *p, size_t len)
 | 
						|
{
 | 
						|
	CRC_PCLMUL(crc, p, len, crc64_lsb, crc64_lsb_0x9a6c9329ac4bc9b5_consts,
 | 
						|
		   have_pclmulqdq);
 | 
						|
	return crc64_nvme_generic(crc, p, len);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(crc64_nvme_arch);
 | 
						|
 | 
						|
static int __init crc64_x86_init(void)
 | 
						|
{
 | 
						|
	if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) {
 | 
						|
		static_branch_enable(&have_pclmulqdq);
 | 
						|
		INIT_CRC_PCLMUL(crc64_msb);
 | 
						|
		INIT_CRC_PCLMUL(crc64_lsb);
 | 
						|
	}
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
subsys_initcall(crc64_x86_init);
 | 
						|
 | 
						|
static void __exit crc64_x86_exit(void)
 | 
						|
{
 | 
						|
}
 | 
						|
module_exit(crc64_x86_exit);
 | 
						|
 | 
						|
MODULE_DESCRIPTION("CRC64 using [V]PCLMULQDQ instructions");
 | 
						|
MODULE_LICENSE("GPL");
 |