mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm/list_lru: introduce list_lru_shrink_walk_irq()
Provide list_lru_shrink_walk_irq() and let it behave like list_lru_walk_one() except that it locks the spinlock with spin_lock_irq(). This is used by scan_shadow_nodes() because its lock nests within the i_pages lock which is acquired with IRQ. This change allows to use proper locking promitives instead hand crafted lock_irq_disable() plus spin_lock(). There is no EXPORT_SYMBOL provided because the current user is in-kernel only. Add list_lru_shrink_walk_irq() which acquires the spinlock with the proper locking primitives. Link: http://lkml.kernel.org/r/20180716111921.5365-5-bigeasy@linutronix.de Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Reviewed-by: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									6e018968f8
								
							
						
					
					
						commit
						6b51e88199
					
				
					 3 changed files with 42 additions and 6 deletions
				
			
		| 
						 | 
					@ -166,6 +166,23 @@ unsigned long list_lru_walk_one(struct list_lru *lru,
 | 
				
			||||||
				int nid, struct mem_cgroup *memcg,
 | 
									int nid, struct mem_cgroup *memcg,
 | 
				
			||||||
				list_lru_walk_cb isolate, void *cb_arg,
 | 
									list_lru_walk_cb isolate, void *cb_arg,
 | 
				
			||||||
				unsigned long *nr_to_walk);
 | 
									unsigned long *nr_to_walk);
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items.
 | 
				
			||||||
 | 
					 * @lru: the lru pointer.
 | 
				
			||||||
 | 
					 * @nid: the node id to scan from.
 | 
				
			||||||
 | 
					 * @memcg: the cgroup to scan from.
 | 
				
			||||||
 | 
					 * @isolate: callback function that is resposible for deciding what to do with
 | 
				
			||||||
 | 
					 *  the item currently being scanned
 | 
				
			||||||
 | 
					 * @cb_arg: opaque type that will be passed to @isolate
 | 
				
			||||||
 | 
					 * @nr_to_walk: how many items to scan.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Same as @list_lru_walk_one except that the spinlock is acquired with
 | 
				
			||||||
 | 
					 * spin_lock_irq().
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					unsigned long list_lru_walk_one_irq(struct list_lru *lru,
 | 
				
			||||||
 | 
									    int nid, struct mem_cgroup *memcg,
 | 
				
			||||||
 | 
									    list_lru_walk_cb isolate, void *cb_arg,
 | 
				
			||||||
 | 
									    unsigned long *nr_to_walk);
 | 
				
			||||||
unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
 | 
					unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
 | 
				
			||||||
				 list_lru_walk_cb isolate, void *cb_arg,
 | 
									 list_lru_walk_cb isolate, void *cb_arg,
 | 
				
			||||||
				 unsigned long *nr_to_walk);
 | 
									 unsigned long *nr_to_walk);
 | 
				
			||||||
| 
						 | 
					@ -178,6 +195,14 @@ list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
 | 
				
			||||||
				 &sc->nr_to_scan);
 | 
									 &sc->nr_to_scan);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline unsigned long
 | 
				
			||||||
 | 
					list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc,
 | 
				
			||||||
 | 
								 list_lru_walk_cb isolate, void *cb_arg)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg,
 | 
				
			||||||
 | 
									     &sc->nr_to_scan);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline unsigned long
 | 
					static inline unsigned long
 | 
				
			||||||
list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
 | 
					list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
 | 
				
			||||||
	      void *cb_arg, unsigned long nr_to_walk)
 | 
						      void *cb_arg, unsigned long nr_to_walk)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -282,6 +282,21 @@ list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(list_lru_walk_one);
 | 
					EXPORT_SYMBOL_GPL(list_lru_walk_one);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					unsigned long
 | 
				
			||||||
 | 
					list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
 | 
				
			||||||
 | 
							      list_lru_walk_cb isolate, void *cb_arg,
 | 
				
			||||||
 | 
							      unsigned long *nr_to_walk)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct list_lru_node *nlru = &lru->node[nid];
 | 
				
			||||||
 | 
						unsigned long ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						spin_lock_irq(&nlru->lock);
 | 
				
			||||||
 | 
						ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
 | 
				
			||||||
 | 
									  nr_to_walk);
 | 
				
			||||||
 | 
						spin_unlock_irq(&nlru->lock);
 | 
				
			||||||
 | 
						return ret;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
 | 
					unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
 | 
				
			||||||
				 list_lru_walk_cb isolate, void *cb_arg,
 | 
									 list_lru_walk_cb isolate, void *cb_arg,
 | 
				
			||||||
				 unsigned long *nr_to_walk)
 | 
									 unsigned long *nr_to_walk)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -483,13 +483,9 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
 | 
				
			||||||
static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
 | 
					static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
 | 
				
			||||||
				       struct shrink_control *sc)
 | 
									       struct shrink_control *sc)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long ret;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* list_lru lock nests inside the IRQ-safe i_pages lock */
 | 
						/* list_lru lock nests inside the IRQ-safe i_pages lock */
 | 
				
			||||||
	local_irq_disable();
 | 
						return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate,
 | 
				
			||||||
	ret = list_lru_shrink_walk(&shadow_nodes, sc, shadow_lru_isolate, NULL);
 | 
										NULL);
 | 
				
			||||||
	local_irq_enable();
 | 
					 | 
				
			||||||
	return ret;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct shrinker workingset_shadow_shrinker = {
 | 
					static struct shrinker workingset_shadow_shrinker = {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue