forked from mirrors/linux
		
	percpu_counter: make percpu_counters_lock irq-safe
percpu_counter is scheduled to grow @gfp support to allow atomic initialization. This patch makes percpu_counters_lock irq-safe so that it can be safely used from atomic contexts. Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
		
							parent
							
								
									1a4d76076c
								
							
						
					
					
						commit
						ebd8fef304
					
				
					 1 changed files with 10 additions and 6 deletions
				
			
		|  | @ -115,6 +115,8 @@ EXPORT_SYMBOL(__percpu_counter_sum); | ||||||
| int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, | int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, | ||||||
| 			  struct lock_class_key *key) | 			  struct lock_class_key *key) | ||||||
| { | { | ||||||
|  | 	unsigned long flags __maybe_unused; | ||||||
|  | 
 | ||||||
| 	raw_spin_lock_init(&fbc->lock); | 	raw_spin_lock_init(&fbc->lock); | ||||||
| 	lockdep_set_class(&fbc->lock, key); | 	lockdep_set_class(&fbc->lock, key); | ||||||
| 	fbc->count = amount; | 	fbc->count = amount; | ||||||
|  | @ -126,9 +128,9 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_HOTPLUG_CPU | #ifdef CONFIG_HOTPLUG_CPU | ||||||
| 	INIT_LIST_HEAD(&fbc->list); | 	INIT_LIST_HEAD(&fbc->list); | ||||||
| 	spin_lock(&percpu_counters_lock); | 	spin_lock_irqsave(&percpu_counters_lock, flags); | ||||||
| 	list_add(&fbc->list, &percpu_counters); | 	list_add(&fbc->list, &percpu_counters); | ||||||
| 	spin_unlock(&percpu_counters_lock); | 	spin_unlock_irqrestore(&percpu_counters_lock, flags); | ||||||
| #endif | #endif | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
|  | @ -136,15 +138,17 @@ EXPORT_SYMBOL(__percpu_counter_init); | ||||||
| 
 | 
 | ||||||
| void percpu_counter_destroy(struct percpu_counter *fbc) | void percpu_counter_destroy(struct percpu_counter *fbc) | ||||||
| { | { | ||||||
|  | 	unsigned long flags __maybe_unused; | ||||||
|  | 
 | ||||||
| 	if (!fbc->counters) | 	if (!fbc->counters) | ||||||
| 		return; | 		return; | ||||||
| 
 | 
 | ||||||
| 	debug_percpu_counter_deactivate(fbc); | 	debug_percpu_counter_deactivate(fbc); | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_HOTPLUG_CPU | #ifdef CONFIG_HOTPLUG_CPU | ||||||
| 	spin_lock(&percpu_counters_lock); | 	spin_lock_irqsave(&percpu_counters_lock, flags); | ||||||
| 	list_del(&fbc->list); | 	list_del(&fbc->list); | ||||||
| 	spin_unlock(&percpu_counters_lock); | 	spin_unlock_irqrestore(&percpu_counters_lock, flags); | ||||||
| #endif | #endif | ||||||
| 	free_percpu(fbc->counters); | 	free_percpu(fbc->counters); | ||||||
| 	fbc->counters = NULL; | 	fbc->counters = NULL; | ||||||
|  | @ -173,7 +177,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb, | ||||||
| 		return NOTIFY_OK; | 		return NOTIFY_OK; | ||||||
| 
 | 
 | ||||||
| 	cpu = (unsigned long)hcpu; | 	cpu = (unsigned long)hcpu; | ||||||
| 	spin_lock(&percpu_counters_lock); | 	spin_lock_irq(&percpu_counters_lock); | ||||||
| 	list_for_each_entry(fbc, &percpu_counters, list) { | 	list_for_each_entry(fbc, &percpu_counters, list) { | ||||||
| 		s32 *pcount; | 		s32 *pcount; | ||||||
| 		unsigned long flags; | 		unsigned long flags; | ||||||
|  | @ -184,7 +188,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb, | ||||||
| 		*pcount = 0; | 		*pcount = 0; | ||||||
| 		raw_spin_unlock_irqrestore(&fbc->lock, flags); | 		raw_spin_unlock_irqrestore(&fbc->lock, flags); | ||||||
| 	} | 	} | ||||||
| 	spin_unlock(&percpu_counters_lock); | 	spin_unlock_irq(&percpu_counters_lock); | ||||||
| #endif | #endif | ||||||
| 	return NOTIFY_OK; | 	return NOTIFY_OK; | ||||||
| } | } | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Tejun Heo
						Tejun Heo