forked from mirrors/linux
		
	mm: enable page allocation tagging
Redefine page allocators to record allocation tags upon their invocation. Instrument post_alloc_hook and free_pages_prepare to modify current allocation tag. [surenb@google.com: undo _noprof additions in the documentation] Link: https://lkml.kernel.org/r/20240326231453.1206227-3-surenb@google.com Link: https://lkml.kernel.org/r/20240321163705.3067592-19-surenb@google.com Signed-off-by: Suren Baghdasaryan <surenb@google.com> Co-developed-by: Kent Overstreet <kent.overstreet@linux.dev> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev> Reviewed-by: Kees Cook <keescook@chromium.org> Tested-by: Kees Cook <keescook@chromium.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Alex Gaynor <alex.gaynor@gmail.com> Cc: Alice Ryhl <aliceryhl@google.com> Cc: Andreas Hindborg <a.hindborg@samsung.com> Cc: Benno Lossin <benno.lossin@proton.me> Cc: "Björn Roy Baron" <bjorn3_gh@protonmail.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Christoph Lameter <cl@linux.com> Cc: Dennis Zhou <dennis@kernel.org> Cc: Gary Guo <gary@garyguo.net> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Pasha Tatashin <pasha.tatashin@soleen.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Wedson Almeida Filho <wedsonaf@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									8a2f118787
								
							
						
					
					
						commit
						b951aaff50
					
				
					 7 changed files with 159 additions and 105 deletions
				
			
		|  | @ -153,4 +153,18 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {} | |||
| 
 | ||||
| #endif /* CONFIG_MEM_ALLOC_PROFILING */ | ||||
| 
 | ||||
| #define alloc_hooks_tag(_tag, _do_alloc)				\ | ||||
| ({									\ | ||||
| 	struct alloc_tag * __maybe_unused _old = alloc_tag_save(_tag);	\ | ||||
| 	typeof(_do_alloc) _res = _do_alloc;				\ | ||||
| 	alloc_tag_restore(_tag, _old);					\ | ||||
| 	_res;								\ | ||||
| }) | ||||
| 
 | ||||
| #define alloc_hooks(_do_alloc)						\ | ||||
| ({									\ | ||||
| 	DEFINE_ALLOC_TAG(_alloc_tag);					\ | ||||
| 	alloc_hooks_tag(&_alloc_tag, _do_alloc);			\ | ||||
| }) | ||||
| 
 | ||||
| #endif /* _LINUX_ALLOC_TAG_H */ | ||||
|  |  | |||
|  | @ -6,6 +6,8 @@ | |||
| 
 | ||||
| #include <linux/mmzone.h> | ||||
| #include <linux/topology.h> | ||||
| #include <linux/alloc_tag.h> | ||||
| #include <linux/sched.h> | ||||
| 
 | ||||
| struct vm_area_struct; | ||||
| struct mempolicy; | ||||
|  | @ -175,42 +177,46 @@ static inline void arch_free_page(struct page *page, int order) { } | |||
| static inline void arch_alloc_page(struct page *page, int order) { } | ||||
| #endif | ||||
| 
 | ||||
| struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, | ||||
| 		nodemask_t *nodemask); | ||||
| struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, | ||||
| struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid, | ||||
| 		nodemask_t *nodemask); | ||||
| #define __alloc_pages(...)			alloc_hooks(__alloc_pages_noprof(__VA_ARGS__)) | ||||
| 
 | ||||
| unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, | ||||
| struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, | ||||
| 		nodemask_t *nodemask); | ||||
| #define __folio_alloc(...)			alloc_hooks(__folio_alloc_noprof(__VA_ARGS__)) | ||||
| 
 | ||||
| unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, | ||||
| 				nodemask_t *nodemask, int nr_pages, | ||||
| 				struct list_head *page_list, | ||||
| 				struct page **page_array); | ||||
| #define __alloc_pages_bulk(...)			alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__)) | ||||
| 
 | ||||
| unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp, | ||||
| unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp, | ||||
| 				unsigned long nr_pages, | ||||
| 				struct page **page_array); | ||||
| #define  alloc_pages_bulk_array_mempolicy(...)				\ | ||||
| 	alloc_hooks(alloc_pages_bulk_array_mempolicy_noprof(__VA_ARGS__)) | ||||
| 
 | ||||
| /* Bulk allocate order-0 pages */ | ||||
| static inline unsigned long | ||||
| alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list) | ||||
| { | ||||
| 	return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list, NULL); | ||||
| } | ||||
| #define alloc_pages_bulk_list(_gfp, _nr_pages, _list)			\ | ||||
| 	__alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _list, NULL) | ||||
| 
 | ||||
| #define alloc_pages_bulk_array(_gfp, _nr_pages, _page_array)		\ | ||||
| 	__alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, NULL, _page_array) | ||||
| 
 | ||||
| static inline unsigned long | ||||
| alloc_pages_bulk_array(gfp_t gfp, unsigned long nr_pages, struct page **page_array) | ||||
| { | ||||
| 	return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, NULL, page_array); | ||||
| } | ||||
| 
 | ||||
| static inline unsigned long | ||||
| alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct page **page_array) | ||||
| alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages, | ||||
| 				   struct page **page_array) | ||||
| { | ||||
| 	if (nid == NUMA_NO_NODE) | ||||
| 		nid = numa_mem_id(); | ||||
| 
 | ||||
| 	return __alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array); | ||||
| 	return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, NULL, page_array); | ||||
| } | ||||
| 
 | ||||
| #define alloc_pages_bulk_array_node(...)				\ | ||||
| 	alloc_hooks(alloc_pages_bulk_array_node_noprof(__VA_ARGS__)) | ||||
| 
 | ||||
| static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) | ||||
| { | ||||
| 	gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN); | ||||
|  | @ -230,76 +236,98 @@ static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) | |||
|  * online. For more general interface, see alloc_pages_node(). | ||||
|  */ | ||||
| static inline struct page * | ||||
| __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) | ||||
| __alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order) | ||||
| { | ||||
| 	VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); | ||||
| 	warn_if_node_offline(nid, gfp_mask); | ||||
| 
 | ||||
| 	return __alloc_pages(gfp_mask, order, nid, NULL); | ||||
| 	return __alloc_pages_noprof(gfp_mask, order, nid, NULL); | ||||
| } | ||||
| 
 | ||||
| #define  __alloc_pages_node(...)		alloc_hooks(__alloc_pages_node_noprof(__VA_ARGS__)) | ||||
| 
 | ||||
| static inline | ||||
| struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid) | ||||
| struct folio *__folio_alloc_node_noprof(gfp_t gfp, unsigned int order, int nid) | ||||
| { | ||||
| 	VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); | ||||
| 	warn_if_node_offline(nid, gfp); | ||||
| 
 | ||||
| 	return __folio_alloc(gfp, order, nid, NULL); | ||||
| 	return __folio_alloc_noprof(gfp, order, nid, NULL); | ||||
| } | ||||
| 
 | ||||
| #define  __folio_alloc_node(...)		alloc_hooks(__folio_alloc_node_noprof(__VA_ARGS__)) | ||||
| 
 | ||||
| /*
 | ||||
|  * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE, | ||||
|  * prefer the current CPU's closest node. Otherwise node must be valid and | ||||
|  * online. | ||||
|  */ | ||||
| static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, | ||||
| static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask, | ||||
| 						   unsigned int order) | ||||
| { | ||||
| 	if (nid == NUMA_NO_NODE) | ||||
| 		nid = numa_mem_id(); | ||||
| 
 | ||||
| 	return __alloc_pages_node(nid, gfp_mask, order); | ||||
| 	return __alloc_pages_node_noprof(nid, gfp_mask, order); | ||||
| } | ||||
| 
 | ||||
| #define  alloc_pages_node(...)			alloc_hooks(alloc_pages_node_noprof(__VA_ARGS__)) | ||||
| 
 | ||||
| #ifdef CONFIG_NUMA | ||||
| struct page *alloc_pages(gfp_t gfp, unsigned int order); | ||||
| struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, | ||||
| struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order); | ||||
| struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order, | ||||
| 		struct mempolicy *mpol, pgoff_t ilx, int nid); | ||||
| struct folio *folio_alloc(gfp_t gfp, unsigned int order); | ||||
| struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, | ||||
| struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order); | ||||
| struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma, | ||||
| 		unsigned long addr, bool hugepage); | ||||
| #else | ||||
| static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order) | ||||
| static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order) | ||||
| { | ||||
| 	return alloc_pages_node(numa_node_id(), gfp_mask, order); | ||||
| 	return alloc_pages_node_noprof(numa_node_id(), gfp_mask, order); | ||||
| } | ||||
| static inline struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, | ||||
| static inline struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order, | ||||
| 		struct mempolicy *mpol, pgoff_t ilx, int nid) | ||||
| { | ||||
| 	return alloc_pages(gfp, order); | ||||
| 	return alloc_pages_noprof(gfp, order); | ||||
| } | ||||
| static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order) | ||||
| static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order) | ||||
| { | ||||
| 	return __folio_alloc_node(gfp, order, numa_node_id()); | ||||
| } | ||||
| #define vma_alloc_folio(gfp, order, vma, addr, hugepage)		\ | ||||
| 	folio_alloc(gfp, order) | ||||
| #define vma_alloc_folio_noprof(gfp, order, vma, addr, hugepage)		\ | ||||
| 	folio_alloc_noprof(gfp, order) | ||||
| #endif | ||||
| 
 | ||||
| #define alloc_pages(...)			alloc_hooks(alloc_pages_noprof(__VA_ARGS__)) | ||||
| #define alloc_pages_mpol(...)			alloc_hooks(alloc_pages_mpol_noprof(__VA_ARGS__)) | ||||
| #define folio_alloc(...)			alloc_hooks(folio_alloc_noprof(__VA_ARGS__)) | ||||
| #define vma_alloc_folio(...)			alloc_hooks(vma_alloc_folio_noprof(__VA_ARGS__)) | ||||
| 
 | ||||
| #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) | ||||
| static inline struct page *alloc_page_vma(gfp_t gfp, | ||||
| 
 | ||||
| static inline struct page *alloc_page_vma_noprof(gfp_t gfp, | ||||
| 		struct vm_area_struct *vma, unsigned long addr) | ||||
| { | ||||
| 	struct folio *folio = vma_alloc_folio(gfp, 0, vma, addr, false); | ||||
| 	struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr, false); | ||||
| 
 | ||||
| 	return &folio->page; | ||||
| } | ||||
| #define alloc_page_vma(...)			alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__)) | ||||
| 
 | ||||
| extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); | ||||
| extern unsigned long get_zeroed_page(gfp_t gfp_mask); | ||||
| extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order); | ||||
| #define __get_free_pages(...)			alloc_hooks(get_free_pages_noprof(__VA_ARGS__)) | ||||
| 
 | ||||
| extern unsigned long get_zeroed_page_noprof(gfp_t gfp_mask); | ||||
| #define get_zeroed_page(...)			alloc_hooks(get_zeroed_page_noprof(__VA_ARGS__)) | ||||
| 
 | ||||
| void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) __alloc_size(1); | ||||
| #define alloc_pages_exact(...)			alloc_hooks(alloc_pages_exact_noprof(__VA_ARGS__)) | ||||
| 
 | ||||
| void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1); | ||||
| void free_pages_exact(void *virt, size_t size); | ||||
| __meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2); | ||||
| 
 | ||||
| __meminit void *alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2); | ||||
| #define alloc_pages_exact_nid(...)					\ | ||||
| 	alloc_hooks(alloc_pages_exact_nid_noprof(__VA_ARGS__)) | ||||
| 
 | ||||
| #define __get_free_page(gfp_mask)					\ | ||||
| 	__get_free_pages((gfp_mask), 0) | ||||
|  | @ -374,10 +402,14 @@ extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma); | |||
| 
 | ||||
| #ifdef CONFIG_CONTIG_ALLOC | ||||
| /* The below functions must be run on a range from a single zone. */ | ||||
| extern int alloc_contig_range(unsigned long start, unsigned long end, | ||||
| extern int alloc_contig_range_noprof(unsigned long start, unsigned long end, | ||||
| 			      unsigned migratetype, gfp_t gfp_mask); | ||||
| extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, | ||||
| #define alloc_contig_range(...)			alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__)) | ||||
| 
 | ||||
| extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, | ||||
| 					      int nid, nodemask_t *nodemask); | ||||
| #define alloc_contig_pages(...)			alloc_hooks(alloc_contig_pages_noprof(__VA_ARGS__)) | ||||
| 
 | ||||
| #endif | ||||
| void free_contig_range(unsigned long pfn, unsigned long nr_pages); | ||||
| 
 | ||||
|  |  | |||
|  | @ -542,14 +542,17 @@ static inline void *detach_page_private(struct page *page) | |||
| #endif | ||||
| 
 | ||||
| #ifdef CONFIG_NUMA | ||||
| struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order); | ||||
| struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order); | ||||
| #else | ||||
| static inline struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) | ||||
| static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) | ||||
| { | ||||
| 	return folio_alloc(gfp, order); | ||||
| 	return folio_alloc_noprof(gfp, order); | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
| #define filemap_alloc_folio(...)				\ | ||||
| 	alloc_hooks(filemap_alloc_folio_noprof(__VA_ARGS__)) | ||||
| 
 | ||||
| static inline struct page *__page_cache_alloc(gfp_t gfp) | ||||
| { | ||||
| 	return &filemap_alloc_folio(gfp, 0)->page; | ||||
|  |  | |||
|  | @ -1851,7 +1851,7 @@ static void isolate_freepages(struct compact_control *cc) | |||
|  * This is a migrate-callback that "allocates" freepages by taking pages | ||||
|  * from the isolated freelists in the block we are migrating to. | ||||
|  */ | ||||
| static struct folio *compaction_alloc(struct folio *src, unsigned long data) | ||||
| static struct folio *compaction_alloc_noprof(struct folio *src, unsigned long data) | ||||
| { | ||||
| 	struct compact_control *cc = (struct compact_control *)data; | ||||
| 	struct folio *dst; | ||||
|  | @ -1898,6 +1898,11 @@ static struct folio *compaction_alloc(struct folio *src, unsigned long data) | |||
| 	return page_rmappable_folio(&dst->page); | ||||
| } | ||||
| 
 | ||||
| static struct folio *compaction_alloc(struct folio *src, unsigned long data) | ||||
| { | ||||
| 	return alloc_hooks(compaction_alloc_noprof(src, data)); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * This is a migrate-callback that "frees" freepages back to the isolated | ||||
|  * freelist.  All pages on the freelist are from the same zone, so there is no | ||||
|  |  | |||
|  | @ -966,7 +966,7 @@ int filemap_add_folio(struct address_space *mapping, struct folio *folio, | |||
| EXPORT_SYMBOL_GPL(filemap_add_folio); | ||||
| 
 | ||||
| #ifdef CONFIG_NUMA | ||||
| struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) | ||||
| struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) | ||||
| { | ||||
| 	int n; | ||||
| 	struct folio *folio; | ||||
|  | @ -981,9 +981,9 @@ struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) | |||
| 
 | ||||
| 		return folio; | ||||
| 	} | ||||
| 	return folio_alloc(gfp, order); | ||||
| 	return folio_alloc_noprof(gfp, order); | ||||
| } | ||||
| EXPORT_SYMBOL(filemap_alloc_folio); | ||||
| EXPORT_SYMBOL(filemap_alloc_folio_noprof); | ||||
| #endif | ||||
| 
 | ||||
| /*
 | ||||
|  |  | |||
|  | @ -2201,9 +2201,9 @@ static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, | |||
| 	 */ | ||||
| 	preferred_gfp = gfp | __GFP_NOWARN; | ||||
| 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); | ||||
| 	page = __alloc_pages(preferred_gfp, order, nid, nodemask); | ||||
| 	page = __alloc_pages_noprof(preferred_gfp, order, nid, nodemask); | ||||
| 	if (!page) | ||||
| 		page = __alloc_pages(gfp, order, nid, NULL); | ||||
| 		page = __alloc_pages_noprof(gfp, order, nid, NULL); | ||||
| 
 | ||||
| 	return page; | ||||
| } | ||||
|  | @ -2218,7 +2218,7 @@ static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, | |||
|  * | ||||
|  * Return: The page on success or NULL if allocation fails. | ||||
|  */ | ||||
| struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, | ||||
| struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order, | ||||
| 		struct mempolicy *pol, pgoff_t ilx, int nid) | ||||
| { | ||||
| 	nodemask_t *nodemask; | ||||
|  | @ -2249,7 +2249,7 @@ struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, | |||
| 			 * First, try to allocate THP only on local node, but | ||||
| 			 * don't reclaim unnecessarily, just compact. | ||||
| 			 */ | ||||
| 			page = __alloc_pages_node(nid, | ||||
| 			page = __alloc_pages_node_noprof(nid, | ||||
| 				gfp | __GFP_THISNODE | __GFP_NORETRY, order); | ||||
| 			if (page || !(gfp & __GFP_DIRECT_RECLAIM)) | ||||
| 				return page; | ||||
|  | @ -2262,7 +2262,7 @@ struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, | |||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	page = __alloc_pages(gfp, order, nid, nodemask); | ||||
| 	page = __alloc_pages_noprof(gfp, order, nid, nodemask); | ||||
| 
 | ||||
| 	if (unlikely(pol->mode == MPOL_INTERLEAVE) && page) { | ||||
| 		/* skip NUMA_INTERLEAVE_HIT update if numa stats is disabled */ | ||||
|  | @ -2293,7 +2293,7 @@ struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, | |||
|  * | ||||
|  * Return: The folio on success or NULL if allocation fails. | ||||
|  */ | ||||
| struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, | ||||
| struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma, | ||||
| 		unsigned long addr, bool hugepage) | ||||
| { | ||||
| 	struct mempolicy *pol; | ||||
|  | @ -2301,12 +2301,12 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, | |||
| 	struct page *page; | ||||
| 
 | ||||
| 	pol = get_vma_policy(vma, addr, order, &ilx); | ||||
| 	page = alloc_pages_mpol(gfp | __GFP_COMP, order, | ||||
| 	page = alloc_pages_mpol_noprof(gfp | __GFP_COMP, order, | ||||
| 				       pol, ilx, numa_node_id()); | ||||
| 	mpol_cond_put(pol); | ||||
| 	return page_rmappable_folio(page); | ||||
| } | ||||
| EXPORT_SYMBOL(vma_alloc_folio); | ||||
| EXPORT_SYMBOL(vma_alloc_folio_noprof); | ||||
| 
 | ||||
| /**
 | ||||
|  * alloc_pages - Allocate pages. | ||||
|  | @ -2322,7 +2322,7 @@ EXPORT_SYMBOL(vma_alloc_folio); | |||
|  * flags are used. | ||||
|  * Return: The page on success or NULL if allocation fails. | ||||
|  */ | ||||
| struct page *alloc_pages(gfp_t gfp, unsigned int order) | ||||
| struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order) | ||||
| { | ||||
| 	struct mempolicy *pol = &default_policy; | ||||
| 
 | ||||
|  | @ -2333,16 +2333,16 @@ struct page *alloc_pages(gfp_t gfp, unsigned int order) | |||
| 	if (!in_interrupt() && !(gfp & __GFP_THISNODE)) | ||||
| 		pol = get_task_policy(current); | ||||
| 
 | ||||
| 	return alloc_pages_mpol(gfp, order, | ||||
| 				pol, NO_INTERLEAVE_INDEX, numa_node_id()); | ||||
| 	return alloc_pages_mpol_noprof(gfp, order, pol, NO_INTERLEAVE_INDEX, | ||||
| 				       numa_node_id()); | ||||
| } | ||||
| EXPORT_SYMBOL(alloc_pages); | ||||
| EXPORT_SYMBOL(alloc_pages_noprof); | ||||
| 
 | ||||
| struct folio *folio_alloc(gfp_t gfp, unsigned int order) | ||||
| struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order) | ||||
| { | ||||
| 	return page_rmappable_folio(alloc_pages(gfp | __GFP_COMP, order)); | ||||
| 	return page_rmappable_folio(alloc_pages_noprof(gfp | __GFP_COMP, order)); | ||||
| } | ||||
| EXPORT_SYMBOL(folio_alloc); | ||||
| EXPORT_SYMBOL(folio_alloc_noprof); | ||||
| 
 | ||||
| static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp, | ||||
| 		struct mempolicy *pol, unsigned long nr_pages, | ||||
|  | @ -2361,13 +2361,13 @@ static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp, | |||
| 
 | ||||
| 	for (i = 0; i < nodes; i++) { | ||||
| 		if (delta) { | ||||
| 			nr_allocated = __alloc_pages_bulk(gfp, | ||||
| 			nr_allocated = alloc_pages_bulk_noprof(gfp, | ||||
| 					interleave_nodes(pol), NULL, | ||||
| 					nr_pages_per_node + 1, NULL, | ||||
| 					page_array); | ||||
| 			delta--; | ||||
| 		} else { | ||||
| 			nr_allocated = __alloc_pages_bulk(gfp, | ||||
| 			nr_allocated = alloc_pages_bulk_noprof(gfp, | ||||
| 					interleave_nodes(pol), NULL, | ||||
| 					nr_pages_per_node, NULL, page_array); | ||||
| 		} | ||||
|  | @ -2504,11 +2504,11 @@ static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid, | |||
| 	preferred_gfp = gfp | __GFP_NOWARN; | ||||
| 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); | ||||
| 
 | ||||
| 	nr_allocated  = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes, | ||||
| 	nr_allocated  = alloc_pages_bulk_noprof(preferred_gfp, nid, &pol->nodes, | ||||
| 					   nr_pages, NULL, page_array); | ||||
| 
 | ||||
| 	if (nr_allocated < nr_pages) | ||||
| 		nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL, | ||||
| 		nr_allocated += alloc_pages_bulk_noprof(gfp, numa_node_id(), NULL, | ||||
| 				nr_pages - nr_allocated, NULL, | ||||
| 				page_array + nr_allocated); | ||||
| 	return nr_allocated; | ||||
|  | @ -2520,7 +2520,7 @@ static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid, | |||
|  * It can accelerate memory allocation especially interleaving | ||||
|  * allocate memory. | ||||
|  */ | ||||
| unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp, | ||||
| unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp, | ||||
| 		unsigned long nr_pages, struct page **page_array) | ||||
| { | ||||
| 	struct mempolicy *pol = &default_policy; | ||||
|  | @ -2544,7 +2544,7 @@ unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp, | |||
| 
 | ||||
| 	nid = numa_node_id(); | ||||
| 	nodemask = policy_nodemask(gfp, pol, NO_INTERLEAVE_INDEX, &nid); | ||||
| 	return __alloc_pages_bulk(gfp, nid, nodemask, | ||||
| 	return alloc_pages_bulk_noprof(gfp, nid, nodemask, | ||||
| 				       nr_pages, NULL, page_array); | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -4391,7 +4391,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, | |||
|  * | ||||
|  * Returns the number of pages on the list or array. | ||||
|  */ | ||||
| unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, | ||||
| unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, | ||||
| 			nodemask_t *nodemask, int nr_pages, | ||||
| 			struct list_head *page_list, | ||||
| 			struct page **page_array) | ||||
|  | @ -4527,7 +4527,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, | |||
| 	pcp_trylock_finish(UP_flags); | ||||
| 
 | ||||
| failed: | ||||
| 	page = __alloc_pages(gfp, 0, preferred_nid, nodemask); | ||||
| 	page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask); | ||||
| 	if (page) { | ||||
| 		if (page_list) | ||||
| 			list_add(&page->lru, page_list); | ||||
|  | @ -4538,13 +4538,13 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, | |||
| 
 | ||||
| 	goto out; | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(__alloc_pages_bulk); | ||||
| EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof); | ||||
| 
 | ||||
| /*
 | ||||
|  * This is the 'heart' of the zoned buddy allocator. | ||||
|  */ | ||||
| struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, | ||||
| 							nodemask_t *nodemask) | ||||
| struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, | ||||
| 				      int preferred_nid, nodemask_t *nodemask) | ||||
| { | ||||
| 	struct page *page; | ||||
| 	unsigned int alloc_flags = ALLOC_WMARK_LOW; | ||||
|  | @ -4606,38 +4606,38 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, | |||
| 
 | ||||
| 	return page; | ||||
| } | ||||
| EXPORT_SYMBOL(__alloc_pages); | ||||
| EXPORT_SYMBOL(__alloc_pages_noprof); | ||||
| 
 | ||||
| struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, | ||||
| struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, | ||||
| 		nodemask_t *nodemask) | ||||
| { | ||||
| 	struct page *page = __alloc_pages(gfp | __GFP_COMP, order, | ||||
| 	struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order, | ||||
| 					preferred_nid, nodemask); | ||||
| 	return page_rmappable_folio(page); | ||||
| } | ||||
| EXPORT_SYMBOL(__folio_alloc); | ||||
| EXPORT_SYMBOL(__folio_alloc_noprof); | ||||
| 
 | ||||
| /*
 | ||||
|  * Common helper functions. Never use with __GFP_HIGHMEM because the returned | ||||
|  * address cannot represent highmem pages. Use alloc_pages and then kmap if | ||||
|  * you need to access high mem. | ||||
|  */ | ||||
| unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) | ||||
| unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order) | ||||
| { | ||||
| 	struct page *page; | ||||
| 
 | ||||
| 	page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); | ||||
| 	page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order); | ||||
| 	if (!page) | ||||
| 		return 0; | ||||
| 	return (unsigned long) page_address(page); | ||||
| } | ||||
| EXPORT_SYMBOL(__get_free_pages); | ||||
| EXPORT_SYMBOL(get_free_pages_noprof); | ||||
| 
 | ||||
| unsigned long get_zeroed_page(gfp_t gfp_mask) | ||||
| unsigned long get_zeroed_page_noprof(gfp_t gfp_mask) | ||||
| { | ||||
| 	return __get_free_page(gfp_mask | __GFP_ZERO); | ||||
| 	return get_free_pages_noprof(gfp_mask | __GFP_ZERO, 0); | ||||
| } | ||||
| EXPORT_SYMBOL(get_zeroed_page); | ||||
| EXPORT_SYMBOL(get_zeroed_page_noprof); | ||||
| 
 | ||||
| /**
 | ||||
|  * __free_pages - Free pages allocated with alloc_pages(). | ||||
|  | @ -4853,7 +4853,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order, | |||
|  * | ||||
|  * Return: pointer to the allocated area or %NULL in case of error. | ||||
|  */ | ||||
| void *alloc_pages_exact(size_t size, gfp_t gfp_mask) | ||||
| void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) | ||||
| { | ||||
| 	unsigned int order = get_order(size); | ||||
| 	unsigned long addr; | ||||
|  | @ -4861,10 +4861,10 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask) | |||
| 	if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) | ||||
| 		gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); | ||||
| 
 | ||||
| 	addr = __get_free_pages(gfp_mask, order); | ||||
| 	addr = get_free_pages_noprof(gfp_mask, order); | ||||
| 	return make_alloc_exact(addr, order, size); | ||||
| } | ||||
| EXPORT_SYMBOL(alloc_pages_exact); | ||||
| EXPORT_SYMBOL(alloc_pages_exact_noprof); | ||||
| 
 | ||||
| /**
 | ||||
|  * alloc_pages_exact_nid - allocate an exact number of physically-contiguous | ||||
|  | @ -4878,7 +4878,7 @@ EXPORT_SYMBOL(alloc_pages_exact); | |||
|  * | ||||
|  * Return: pointer to the allocated area or %NULL in case of error. | ||||
|  */ | ||||
| void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) | ||||
| void * __meminit alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) | ||||
| { | ||||
| 	unsigned int order = get_order(size); | ||||
| 	struct page *p; | ||||
|  | @ -4886,7 +4886,7 @@ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) | |||
| 	if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) | ||||
| 		gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); | ||||
| 
 | ||||
| 	p = alloc_pages_node(nid, gfp_mask, order); | ||||
| 	p = alloc_pages_node_noprof(nid, gfp_mask, order); | ||||
| 	if (!p) | ||||
| 		return NULL; | ||||
| 	return make_alloc_exact((unsigned long)page_address(p), order, size); | ||||
|  | @ -6343,7 +6343,7 @@ int __alloc_contig_migrate_range(struct compact_control *cc, | |||
|  * pages which PFN is in [start, end) are allocated for the caller and | ||||
|  * need to be freed with free_contig_range(). | ||||
|  */ | ||||
| int alloc_contig_range(unsigned long start, unsigned long end, | ||||
| int alloc_contig_range_noprof(unsigned long start, unsigned long end, | ||||
| 		       unsigned migratetype, gfp_t gfp_mask) | ||||
| { | ||||
| 	unsigned long outer_start, outer_end; | ||||
|  | @ -6467,14 +6467,14 @@ int alloc_contig_range(unsigned long start, unsigned long end, | |||
| 	undo_isolate_page_range(start, end, migratetype); | ||||
| 	return ret; | ||||
| } | ||||
| EXPORT_SYMBOL(alloc_contig_range); | ||||
| EXPORT_SYMBOL(alloc_contig_range_noprof); | ||||
| 
 | ||||
| static int __alloc_contig_pages(unsigned long start_pfn, | ||||
| 				unsigned long nr_pages, gfp_t gfp_mask) | ||||
| { | ||||
| 	unsigned long end_pfn = start_pfn + nr_pages; | ||||
| 
 | ||||
| 	return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, | ||||
| 	return alloc_contig_range_noprof(start_pfn, end_pfn, MIGRATE_MOVABLE, | ||||
| 				   gfp_mask); | ||||
| } | ||||
| 
 | ||||
|  | @ -6530,7 +6530,7 @@ static bool zone_spans_last_pfn(const struct zone *zone, | |||
|  * | ||||
|  * Return: pointer to contiguous pages on success, or NULL if not successful. | ||||
|  */ | ||||
| struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, | ||||
| struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, | ||||
| 				 int nid, nodemask_t *nodemask) | ||||
| { | ||||
| 	unsigned long ret, pfn, flags; | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Suren Baghdasaryan
						Suren Baghdasaryan