mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-03 18:20:25 +02:00 
			
		
		
		
	ila: make lockdep happy again
Previously, alloc_ila_locks() and bucket_table_alloc() call spin_lock_init() separately, therefore they have two different lock names and lock class keys. However, after commitb893281715("ila: Call library function alloc_bucket_locks") they both call helper alloc_bucket_spinlocks() which now only has one lock name and lock class key. This causes a few bogus lockdep warnings as reported by syzbot. Fix this by making alloc_bucket_locks() a macro and pass declaration name as lock name and a static lock class key inside the macro. Fixes:b893281715("ila: Call library function alloc_bucket_locks") Reported-by: <syzbot+b66a5a554991a8ed027c@syzkaller.appspotmail.com> Cc: Tom Herbert <tom@quantonium.net> Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									32039eac4c
								
							
						
					
					
						commit
						ff93bca769
					
				
					 2 changed files with 21 additions and 7 deletions
				
			
		| 
						 | 
					@ -451,9 +451,20 @@ extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
 | 
				
			||||||
#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
 | 
					#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
 | 
				
			||||||
		__cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
 | 
							__cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
 | 
					int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
 | 
				
			||||||
			     size_t max_size, unsigned int cpu_mult,
 | 
								     size_t max_size, unsigned int cpu_mult,
 | 
				
			||||||
			   gfp_t gfp);
 | 
								     gfp_t gfp, const char *name,
 | 
				
			||||||
 | 
								     struct lock_class_key *key);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp)    \
 | 
				
			||||||
 | 
						({								     \
 | 
				
			||||||
 | 
							static struct lock_class_key key;			     \
 | 
				
			||||||
 | 
							int ret;						     \
 | 
				
			||||||
 | 
														     \
 | 
				
			||||||
 | 
							ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size,   \
 | 
				
			||||||
 | 
										       cpu_mult, gfp, #locks, &key); \
 | 
				
			||||||
 | 
							ret;							     \
 | 
				
			||||||
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void free_bucket_spinlocks(spinlock_t *locks);
 | 
					void free_bucket_spinlocks(spinlock_t *locks);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -11,8 +11,9 @@
 | 
				
			||||||
 * to a power of 2 to be suitable as a hash table.
 | 
					 * to a power of 2 to be suitable as a hash table.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
 | 
					int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
 | 
				
			||||||
			   size_t max_size, unsigned int cpu_mult, gfp_t gfp)
 | 
								     size_t max_size, unsigned int cpu_mult, gfp_t gfp,
 | 
				
			||||||
 | 
								     const char *name, struct lock_class_key *key)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	spinlock_t *tlocks = NULL;
 | 
						spinlock_t *tlocks = NULL;
 | 
				
			||||||
	unsigned int i, size;
 | 
						unsigned int i, size;
 | 
				
			||||||
| 
						 | 
					@ -33,8 +34,10 @@ int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
 | 
				
			||||||
		tlocks = kvmalloc_array(size, sizeof(spinlock_t), gfp);
 | 
							tlocks = kvmalloc_array(size, sizeof(spinlock_t), gfp);
 | 
				
			||||||
		if (!tlocks)
 | 
							if (!tlocks)
 | 
				
			||||||
			return -ENOMEM;
 | 
								return -ENOMEM;
 | 
				
			||||||
		for (i = 0; i < size; i++)
 | 
							for (i = 0; i < size; i++) {
 | 
				
			||||||
			spin_lock_init(&tlocks[i]);
 | 
								spin_lock_init(&tlocks[i]);
 | 
				
			||||||
 | 
								lockdep_init_map(&tlocks[i].dep_map, name, key, 0);
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	*locks = tlocks;
 | 
						*locks = tlocks;
 | 
				
			||||||
| 
						 | 
					@ -42,7 +45,7 @@ int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(alloc_bucket_spinlocks);
 | 
					EXPORT_SYMBOL(__alloc_bucket_spinlocks);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void free_bucket_spinlocks(spinlock_t *locks)
 | 
					void free_bucket_spinlocks(spinlock_t *locks)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue