mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	xfs: bound maximum wait time for inodegc work
Currently inodegc work can sit queued on the per-cpu queue until the workqueue is either flushed of the queue reaches a depth that triggers work queuing (and later throttling). This means that we could queue work that waits for a long time for some other event to trigger flushing. Hence instead of just queueing work at a specific depth, use a delayed work that queues the work at a bound time. We can still schedule the work immediately at a given depth, but we no long need to worry about leaving a number of items on the list that won't get processed until external events prevail. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Darrick J. Wong <djwong@kernel.org>
This commit is contained in:
		
							parent
							
								
									e89ab76d7e
								
							
						
					
					
						commit
						7cf2b0f961
					
				
					 3 changed files with 24 additions and 16 deletions
				
			
		| 
						 | 
				
			
			@ -440,7 +440,7 @@ xfs_inodegc_queue_all(
 | 
			
		|||
	for_each_online_cpu(cpu) {
 | 
			
		||||
		gc = per_cpu_ptr(mp->m_inodegc, cpu);
 | 
			
		||||
		if (!llist_empty(&gc->list))
 | 
			
		||||
			queue_work_on(cpu, mp->m_inodegc_wq, &gc->work);
 | 
			
		||||
			mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1841,8 +1841,8 @@ void
 | 
			
		|||
xfs_inodegc_worker(
 | 
			
		||||
	struct work_struct	*work)
 | 
			
		||||
{
 | 
			
		||||
	struct xfs_inodegc	*gc = container_of(work, struct xfs_inodegc,
 | 
			
		||||
							work);
 | 
			
		||||
	struct xfs_inodegc	*gc = container_of(to_delayed_work(work),
 | 
			
		||||
						struct xfs_inodegc, work);
 | 
			
		||||
	struct llist_node	*node = llist_del_all(&gc->list);
 | 
			
		||||
	struct xfs_inode	*ip, *n;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -2014,6 +2014,7 @@ xfs_inodegc_queue(
 | 
			
		|||
	struct xfs_inodegc	*gc;
 | 
			
		||||
	int			items;
 | 
			
		||||
	unsigned int		shrinker_hits;
 | 
			
		||||
	unsigned long		queue_delay = 1;
 | 
			
		||||
 | 
			
		||||
	trace_xfs_inode_set_need_inactive(ip);
 | 
			
		||||
	spin_lock(&ip->i_flags_lock);
 | 
			
		||||
| 
						 | 
				
			
			@ -2025,19 +2026,26 @@ xfs_inodegc_queue(
 | 
			
		|||
	items = READ_ONCE(gc->items);
 | 
			
		||||
	WRITE_ONCE(gc->items, items + 1);
 | 
			
		||||
	shrinker_hits = READ_ONCE(gc->shrinker_hits);
 | 
			
		||||
	put_cpu_ptr(gc);
 | 
			
		||||
 | 
			
		||||
	if (!xfs_is_inodegc_enabled(mp))
 | 
			
		||||
	/*
 | 
			
		||||
	 * We queue the work while holding the current CPU so that the work
 | 
			
		||||
	 * is scheduled to run on this CPU.
 | 
			
		||||
	 */
 | 
			
		||||
	if (!xfs_is_inodegc_enabled(mp)) {
 | 
			
		||||
		put_cpu_ptr(gc);
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	if (xfs_inodegc_want_queue_work(ip, items)) {
 | 
			
		||||
		trace_xfs_inodegc_queue(mp, __return_address);
 | 
			
		||||
		queue_work(mp->m_inodegc_wq, &gc->work);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (xfs_inodegc_want_queue_work(ip, items))
 | 
			
		||||
		queue_delay = 0;
 | 
			
		||||
 | 
			
		||||
	trace_xfs_inodegc_queue(mp, __return_address);
 | 
			
		||||
	mod_delayed_work(mp->m_inodegc_wq, &gc->work, queue_delay);
 | 
			
		||||
	put_cpu_ptr(gc);
 | 
			
		||||
 | 
			
		||||
	if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
 | 
			
		||||
		trace_xfs_inodegc_throttle(mp, __return_address);
 | 
			
		||||
		flush_work(&gc->work);
 | 
			
		||||
		flush_delayed_work(&gc->work);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -2054,7 +2062,7 @@ xfs_inodegc_cpu_dead(
 | 
			
		|||
	unsigned int		count = 0;
 | 
			
		||||
 | 
			
		||||
	dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu);
 | 
			
		||||
	cancel_work_sync(&dead_gc->work);
 | 
			
		||||
	cancel_delayed_work_sync(&dead_gc->work);
 | 
			
		||||
 | 
			
		||||
	if (llist_empty(&dead_gc->list))
 | 
			
		||||
		return;
 | 
			
		||||
| 
						 | 
				
			
			@ -2073,12 +2081,12 @@ xfs_inodegc_cpu_dead(
 | 
			
		|||
	llist_add_batch(first, last, &gc->list);
 | 
			
		||||
	count += READ_ONCE(gc->items);
 | 
			
		||||
	WRITE_ONCE(gc->items, count);
 | 
			
		||||
	put_cpu_ptr(gc);
 | 
			
		||||
 | 
			
		||||
	if (xfs_is_inodegc_enabled(mp)) {
 | 
			
		||||
		trace_xfs_inodegc_queue(mp, __return_address);
 | 
			
		||||
		queue_work(mp->m_inodegc_wq, &gc->work);
 | 
			
		||||
		mod_delayed_work(mp->m_inodegc_wq, &gc->work, 0);
 | 
			
		||||
	}
 | 
			
		||||
	put_cpu_ptr(gc);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			@ -2173,7 +2181,7 @@ xfs_inodegc_shrinker_scan(
 | 
			
		|||
			unsigned int	h = READ_ONCE(gc->shrinker_hits);
 | 
			
		||||
 | 
			
		||||
			WRITE_ONCE(gc->shrinker_hits, h + 1);
 | 
			
		||||
			queue_work_on(cpu, mp->m_inodegc_wq, &gc->work);
 | 
			
		||||
			mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
 | 
			
		||||
			no_items = false;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -61,7 +61,7 @@ struct xfs_error_cfg {
 | 
			
		|||
 */
 | 
			
		||||
struct xfs_inodegc {
 | 
			
		||||
	struct llist_head	list;
 | 
			
		||||
	struct work_struct	work;
 | 
			
		||||
	struct delayed_work	work;
 | 
			
		||||
 | 
			
		||||
	/* approximate count of inodes in the list */
 | 
			
		||||
	unsigned int		items;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1074,7 +1074,7 @@ xfs_inodegc_init_percpu(
 | 
			
		|||
		gc = per_cpu_ptr(mp->m_inodegc, cpu);
 | 
			
		||||
		init_llist_head(&gc->list);
 | 
			
		||||
		gc->items = 0;
 | 
			
		||||
		INIT_WORK(&gc->work, xfs_inodegc_worker);
 | 
			
		||||
		INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
 | 
			
		||||
	}
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue