mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	lib/percpu_counter.c: fix __percpu_counter_add()
__percpu_counter_add() may be called in softirq/hardirq handler (such as, blk_mq_queue_exit() is typically called in hardirq/softirq handler), so we need to call this_cpu_add()(irq safe helper) to update percpu counter, otherwise counts may be lost. This fixes the problem that 'rmmod null_blk' hangs in blk_cleanup_queue() because of miscounting of request_queue->mq_usage_counter. This patch is the v1 of previous one of "lib/percpu_counter.c: disable local irq when updating percpu couter", and takes Andrew's approach which may be more efficient for ARCHs(x86, s390) that have optimized this_cpu_add(). Signed-off-by: Ming Lei <tom.leiming@gmail.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Shaohua Li <shli@fusionio.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Fan Du <fan.du@windriver.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									5a610fcc73
								
							
						
					
					
						commit
						74e72f894d
					
				
					 1 changed files with 2 additions and 2 deletions
				
			
		| 
						 | 
				
			
			@ -82,10 +82,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
 | 
			
		|||
		unsigned long flags;
 | 
			
		||||
		raw_spin_lock_irqsave(&fbc->lock, flags);
 | 
			
		||||
		fbc->count += count;
 | 
			
		||||
		 __this_cpu_sub(*fbc->counters, count);
 | 
			
		||||
		raw_spin_unlock_irqrestore(&fbc->lock, flags);
 | 
			
		||||
		__this_cpu_write(*fbc->counters, 0);
 | 
			
		||||
	} else {
 | 
			
		||||
		__this_cpu_write(*fbc->counters, count);
 | 
			
		||||
		this_cpu_add(*fbc->counters, amount);
 | 
			
		||||
	}
 | 
			
		||||
	preempt_enable();
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue