forked from mirrors/linux
		
	 3b0ba54d5f
			
		
	
	
		3b0ba54d5f
		
	
	
	
	
		
			
			A number of allocation helper functions were converted into macros to
account them at the call sites.  Add a comment for each converted
allocation helper explaining why it has to be a macro and why we typecast
the return value wherever required.  The patch also moves
acpi_os_acquire_object() closer to other allocation helpers to group them
together under the same comment.  The patch has no functional changes.
Link: https://lkml.kernel.org/r/20240703174225.3891393-1-surenb@google.com
Fixes: 2c321f3f70 ("mm: change inlined allocation helpers to account at the call site")
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Suggested-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Christian König <christian.koenig@amd.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Jan Kara <jack@suse.cz>
Cc: Kent Overstreet <kent.overstreet@linux.dev>
Cc: Thorsten Blum <thorsten.blum@toblux.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
		
	
			
		
			
				
	
	
		
			140 lines
		
	
	
	
		
			3.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			140 lines
		
	
	
	
		
			3.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
 | |
| /******************************************************************************
 | |
|  *
 | |
|  * Name: aclinuxex.h - Extra OS specific defines, etc. for Linux
 | |
|  *
 | |
|  * Copyright (C) 2000 - 2023, Intel Corp.
 | |
|  *
 | |
|  *****************************************************************************/
 | |
| 
 | |
| #ifndef __ACLINUXEX_H__
 | |
| #define __ACLINUXEX_H__
 | |
| 
 | |
| #ifdef __KERNEL__
 | |
| 
 | |
| #ifndef ACPI_USE_NATIVE_DIVIDE
 | |
| 
 | |
| #ifndef ACPI_DIV_64_BY_32
 | |
| #define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
 | |
| 	do { \
 | |
| 		u64 (__n) = ((u64) n_hi) << 32 | (n_lo); \
 | |
| 		(r32) = do_div ((__n), (d32)); \
 | |
| 		(q32) = (u32) (__n); \
 | |
| 	} while (0)
 | |
| #endif
 | |
| 
 | |
| #ifndef ACPI_SHIFT_RIGHT_64
 | |
| #define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
 | |
| 	do { \
 | |
| 		(n_lo) >>= 1; \
 | |
| 		(n_lo) |= (((n_hi) & 1) << 31); \
 | |
| 		(n_hi) >>= 1; \
 | |
| 	} while (0)
 | |
| #endif
 | |
| 
 | |
| #endif
 | |
| 
 | |
| /*
 | |
|  * Overrides for in-kernel ACPICA
 | |
|  */
 | |
| acpi_status ACPI_INIT_FUNCTION acpi_os_initialize(void);
 | |
| 
 | |
| acpi_status acpi_os_terminate(void);
 | |
| 
 | |
| /*
 | |
|  * The irqs_disabled() check is for resume from RAM.
 | |
|  * Interrupts are off during resume, just like they are for boot.
 | |
|  * However, boot has  (system_state != SYSTEM_RUNNING)
 | |
|  * to quiet __might_sleep() in kmalloc() and resume does not.
 | |
|  *
 | |
|  * These specialized allocators have to be macros for their allocations to be
 | |
|  * accounted separately (to have separate alloc_tag).
 | |
|  */
 | |
| #define acpi_os_allocate(_size)	\
 | |
| 		kmalloc(_size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
 | |
| 
 | |
| #define acpi_os_allocate_zeroed(_size)	\
 | |
| 		kzalloc(_size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
 | |
| 
 | |
| #define acpi_os_acquire_object(_cache)	\
 | |
| 		kmem_cache_zalloc(_cache, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
 | |
| 
 | |
| static inline void acpi_os_free(void *memory)
 | |
| {
 | |
| 	kfree(memory);
 | |
| }
 | |
| 
 | |
| static inline acpi_thread_id acpi_os_get_thread_id(void)
 | |
| {
 | |
| 	return (acpi_thread_id) (unsigned long)current;
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * When lockdep is enabled, the spin_lock_init() macro stringifies it's
 | |
|  * argument and uses that as a name for the lock in debugging.
 | |
|  * By executing spin_lock_init() in a macro the key changes from "lock" for
 | |
|  * all locks to the name of the argument of acpi_os_create_lock(), which
 | |
|  * prevents lockdep from reporting false positives for ACPICA locks.
 | |
|  */
 | |
| #define acpi_os_create_lock(__handle) \
 | |
| 	({ \
 | |
| 		spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
 | |
| 		if (lock) { \
 | |
| 			*(__handle) = lock; \
 | |
| 			spin_lock_init(*(__handle)); \
 | |
| 		} \
 | |
| 		lock ? AE_OK : AE_NO_MEMORY; \
 | |
| 	})
 | |
| 
 | |
| 
 | |
| #define acpi_os_create_raw_lock(__handle) \
 | |
| 	({ \
 | |
| 		raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
 | |
| 		if (lock) { \
 | |
| 			*(__handle) = lock; \
 | |
| 			raw_spin_lock_init(*(__handle)); \
 | |
| 		} \
 | |
| 		lock ? AE_OK : AE_NO_MEMORY; \
 | |
| 	})
 | |
| 
 | |
| static inline acpi_cpu_flags acpi_os_acquire_raw_lock(acpi_raw_spinlock lockp)
 | |
| {
 | |
| 	acpi_cpu_flags flags;
 | |
| 
 | |
| 	raw_spin_lock_irqsave(lockp, flags);
 | |
| 	return flags;
 | |
| }
 | |
| 
 | |
| static inline void acpi_os_release_raw_lock(acpi_raw_spinlock lockp,
 | |
| 					    acpi_cpu_flags flags)
 | |
| {
 | |
| 	raw_spin_unlock_irqrestore(lockp, flags);
 | |
| }
 | |
| 
 | |
| static inline void acpi_os_delete_raw_lock(acpi_raw_spinlock handle)
 | |
| {
 | |
| 	ACPI_FREE(handle);
 | |
| }
 | |
| 
 | |
| static inline u8 acpi_os_readable(void *pointer, acpi_size length)
 | |
| {
 | |
| 	return TRUE;
 | |
| }
 | |
| 
 | |
| static inline acpi_status acpi_os_initialize_debugger(void)
 | |
| {
 | |
| 	return AE_OK;
 | |
| }
 | |
| 
 | |
| static inline void acpi_os_terminate_debugger(void)
 | |
| {
 | |
| 	return;
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * OSL interfaces added by Linux
 | |
|  */
 | |
| 
 | |
| #endif				/* __KERNEL__ */
 | |
| 
 | |
| #endif				/* __ACLINUXEX_H__ */
 |