forked from mirrors/linux
		
	 0c3ce2f502
			
		
	
	
		0c3ce2f502
		
	
	
	
	
		
			
			Currently there is one 'struct page_frag' for every 'struct sock' and 'struct task_struct', we are about to replace the 'struct page_frag' with 'struct page_frag_cache' for them. Before begin the replacing, we need to ensure the size of 'struct page_frag_cache' is not bigger than the size of 'struct page_frag', as there may be tens of thousands of 'struct sock' and 'struct task_struct' instances in the system. By or'ing the page order & pfmemalloc with lower bits of 'va' instead of using 'u16' or 'u32' for page size and 'u8' for pfmemalloc, we are able to avoid 3 or 5 bytes space waste. And page address & pfmemalloc & order is unchanged for the same page in the same 'page_frag_cache' instance, it makes sense to fit them together. After this patch, the size of 'struct page_frag_cache' should be the same as the size of 'struct page_frag'. CC: Andrew Morton <akpm@linux-foundation.org> CC: Linux-MM <linux-mm@kvack.org> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com> Reviewed-by: Alexander Duyck <alexanderduyck@fb.com> Link: https://patch.msgid.link/20241028115343.3405838-7-linyunsheng@huawei.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
		
			
				
	
	
		
			61 lines
		
	
	
	
		
			1.7 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			61 lines
		
	
	
	
		
			1.7 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* SPDX-License-Identifier: GPL-2.0 */
 | |
| 
 | |
| #ifndef _LINUX_PAGE_FRAG_CACHE_H
 | |
| #define _LINUX_PAGE_FRAG_CACHE_H
 | |
| 
 | |
| #include <linux/bits.h>
 | |
| #include <linux/log2.h>
 | |
| #include <linux/mm_types_task.h>
 | |
| #include <linux/types.h>
 | |
| 
 | |
| #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
 | |
| /* Use a full byte here to enable assembler optimization as the shift
 | |
|  * operation is usually expecting a byte.
 | |
|  */
 | |
| #define PAGE_FRAG_CACHE_ORDER_MASK		GENMASK(7, 0)
 | |
| #else
 | |
| /* Compiler should be able to figure out we don't read things as any value
 | |
|  * ANDed with 0 is 0.
 | |
|  */
 | |
| #define PAGE_FRAG_CACHE_ORDER_MASK		0
 | |
| #endif
 | |
| 
 | |
| #define PAGE_FRAG_CACHE_PFMEMALLOC_BIT		(PAGE_FRAG_CACHE_ORDER_MASK + 1)
 | |
| 
 | |
| static inline bool encoded_page_decode_pfmemalloc(unsigned long encoded_page)
 | |
| {
 | |
| 	return !!(encoded_page & PAGE_FRAG_CACHE_PFMEMALLOC_BIT);
 | |
| }
 | |
| 
 | |
| static inline void page_frag_cache_init(struct page_frag_cache *nc)
 | |
| {
 | |
| 	nc->encoded_page = 0;
 | |
| }
 | |
| 
 | |
| static inline bool page_frag_cache_is_pfmemalloc(struct page_frag_cache *nc)
 | |
| {
 | |
| 	return encoded_page_decode_pfmemalloc(nc->encoded_page);
 | |
| }
 | |
| 
 | |
| void page_frag_cache_drain(struct page_frag_cache *nc);
 | |
| void __page_frag_cache_drain(struct page *page, unsigned int count);
 | |
| void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz,
 | |
| 			      gfp_t gfp_mask, unsigned int align_mask);
 | |
| 
 | |
| static inline void *page_frag_alloc_align(struct page_frag_cache *nc,
 | |
| 					  unsigned int fragsz, gfp_t gfp_mask,
 | |
| 					  unsigned int align)
 | |
| {
 | |
| 	WARN_ON_ONCE(!is_power_of_2(align));
 | |
| 	return __page_frag_alloc_align(nc, fragsz, gfp_mask, -align);
 | |
| }
 | |
| 
 | |
| static inline void *page_frag_alloc(struct page_frag_cache *nc,
 | |
| 				    unsigned int fragsz, gfp_t gfp_mask)
 | |
| {
 | |
| 	return __page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
 | |
| }
 | |
| 
 | |
| void page_frag_free(void *addr);
 | |
| 
 | |
| #endif
 |