mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	xarray: use kmem_cache_alloc_lru to allocate xa_node
The workingset will add the xa_node to the shadow_nodes list. So the allocation of xa_node should be done by kmem_cache_alloc_lru(). Using xas_set_lru() to pass the list_lru which we want to insert xa_node into to set up the xa_node reclaim context correctly. Link: https://lkml.kernel.org/r/20220228122126.37293-9-songmuchun@bytedance.com Signed-off-by: Muchun Song <songmuchun@bytedance.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Roman Gushchin <roman.gushchin@linux.dev> Cc: Alex Shi <alexs@kernel.org> Cc: Anna Schumaker <Anna.Schumaker@Netapp.com> Cc: Chao Yu <chao@kernel.org> Cc: Dave Chinner <david@fromorbit.com> Cc: Fam Zheng <fam.zheng@bytedance.com> Cc: Jaegeuk Kim <jaegeuk@kernel.org> Cc: Kari Argillander <kari.argillander@gmail.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Qi Zheng <zhengqi.arch@bytedance.com> Cc: Shakeel Butt <shakeelb@google.com> Cc: Theodore Ts'o <tytso@mit.edu> Cc: Trond Myklebust <trond.myklebust@hammerspace.com> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Wei Yang <richard.weiyang@gmail.com> Cc: Xiongchun Duan <duanxiongchun@bytedance.com> Cc: Yang Shi <shy828301@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									f53bf711d4
								
							
						
					
					
						commit
						9bbdc0f324
					
				
					 4 changed files with 18 additions and 8 deletions
				
			
		| 
						 | 
				
			
			@ -334,9 +334,12 @@ void workingset_activation(struct folio *folio);
 | 
			
		|||
 | 
			
		||||
/* Only track the nodes of mappings with shadow entries */
 | 
			
		||||
void workingset_update_node(struct xa_node *node);
 | 
			
		||||
extern struct list_lru shadow_nodes;
 | 
			
		||||
#define mapping_set_update(xas, mapping) do {				\
 | 
			
		||||
	if (!dax_mapping(mapping) && !shmem_mapping(mapping))		\
 | 
			
		||||
	if (!dax_mapping(mapping) && !shmem_mapping(mapping)) {		\
 | 
			
		||||
		xas_set_update(xas, workingset_update_node);		\
 | 
			
		||||
		xas_set_lru(xas, &shadow_nodes);			\
 | 
			
		||||
	}								\
 | 
			
		||||
} while (0)
 | 
			
		||||
 | 
			
		||||
/* linux/mm/page_alloc.c */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1317,6 +1317,7 @@ struct xa_state {
 | 
			
		|||
	struct xa_node *xa_node;
 | 
			
		||||
	struct xa_node *xa_alloc;
 | 
			
		||||
	xa_update_node_t xa_update;
 | 
			
		||||
	struct list_lru *xa_lru;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			@ -1336,7 +1337,8 @@ struct xa_state {
 | 
			
		|||
	.xa_pad = 0,					\
 | 
			
		||||
	.xa_node = XAS_RESTART,				\
 | 
			
		||||
	.xa_alloc = NULL,				\
 | 
			
		||||
	.xa_update = NULL				\
 | 
			
		||||
	.xa_update = NULL,				\
 | 
			
		||||
	.xa_lru = NULL,					\
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
| 
						 | 
				
			
			@ -1631,6 +1633,11 @@ static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update)
 | 
			
		|||
	xas->xa_update = update;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void xas_set_lru(struct xa_state *xas, struct list_lru *lru)
 | 
			
		||||
{
 | 
			
		||||
	xas->xa_lru = lru;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * xas_next_entry() - Advance iterator to next present entry.
 | 
			
		||||
 * @xas: XArray operation state.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										10
									
								
								lib/xarray.c
									
									
									
									
									
								
							
							
						
						
									
										10
									
								
								lib/xarray.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -302,7 +302,7 @@ bool xas_nomem(struct xa_state *xas, gfp_t gfp)
 | 
			
		|||
	}
 | 
			
		||||
	if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
 | 
			
		||||
		gfp |= __GFP_ACCOUNT;
 | 
			
		||||
	xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
 | 
			
		||||
	xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
 | 
			
		||||
	if (!xas->xa_alloc)
 | 
			
		||||
		return false;
 | 
			
		||||
	xas->xa_alloc->parent = NULL;
 | 
			
		||||
| 
						 | 
				
			
			@ -334,10 +334,10 @@ static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
 | 
			
		|||
		gfp |= __GFP_ACCOUNT;
 | 
			
		||||
	if (gfpflags_allow_blocking(gfp)) {
 | 
			
		||||
		xas_unlock_type(xas, lock_type);
 | 
			
		||||
		xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
 | 
			
		||||
		xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
 | 
			
		||||
		xas_lock_type(xas, lock_type);
 | 
			
		||||
	} else {
 | 
			
		||||
		xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
 | 
			
		||||
		xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
 | 
			
		||||
	}
 | 
			
		||||
	if (!xas->xa_alloc)
 | 
			
		||||
		return false;
 | 
			
		||||
| 
						 | 
				
			
			@ -371,7 +371,7 @@ static void *xas_alloc(struct xa_state *xas, unsigned int shift)
 | 
			
		|||
		if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
 | 
			
		||||
			gfp |= __GFP_ACCOUNT;
 | 
			
		||||
 | 
			
		||||
		node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
 | 
			
		||||
		node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
 | 
			
		||||
		if (!node) {
 | 
			
		||||
			xas_set_err(xas, -ENOMEM);
 | 
			
		||||
			return NULL;
 | 
			
		||||
| 
						 | 
				
			
			@ -1014,7 +1014,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
 | 
			
		|||
		void *sibling = NULL;
 | 
			
		||||
		struct xa_node *node;
 | 
			
		||||
 | 
			
		||||
		node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
 | 
			
		||||
		node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
 | 
			
		||||
		if (!node)
 | 
			
		||||
			goto nomem;
 | 
			
		||||
		node->array = xas->xa;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -429,7 +429,7 @@ void workingset_activation(struct folio *folio)
 | 
			
		|||
 * point where they would still be useful.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
static struct list_lru shadow_nodes;
 | 
			
		||||
struct list_lru shadow_nodes;
 | 
			
		||||
 | 
			
		||||
void workingset_update_node(struct xa_node *node)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue