mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	kernel/locking/lockdep.c: convert hash tables to hlists
Mike said: : CONFIG_UBSAN_ALIGNMENT breaks x86-64 kernel with lockdep enabled, i. e : kernel with CONFIG_UBSAN_ALIGNMENT fails to load without even any error : message. : : The problem is that ubsan callbacks use spinlocks and might be called : before lockdep is initialized. Particularly this line in the : reserve_ebda_region function causes problem: : : lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES); : : If i put lockdep_init() before reserve_ebda_region call in : x86_64_start_reservations kernel loads well. Fix this ordering issue permanently: change lockdep so that it uses hlists for the hash tables. Unlike a list_head, an hlist_head is in its initialized state when it is all-zeroes, so lockdep is ready for operation immediately upon boot - lockdep_init() need not have run. The patch will also save some memory. lockdep_init() and lockdep_initialized can be done away with now - a 4.6 patch has been prepared to do this. Reported-by: Mike Krinkin <krinkin.m.u@gmail.com> Suggested-by: Mike Krinkin <krinkin.m.u@gmail.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									6b75d14912
								
							
						
					
					
						commit
						4a389810bc
					
				
					 2 changed files with 21 additions and 25 deletions
				
			
		| 
						 | 
				
			
			@ -66,7 +66,7 @@ struct lock_class {
 | 
			
		|||
	/*
 | 
			
		||||
	 * class-hash:
 | 
			
		||||
	 */
 | 
			
		||||
	struct list_head		hash_entry;
 | 
			
		||||
	struct hlist_node		hash_entry;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * global list of all lock-classes:
 | 
			
		||||
| 
						 | 
				
			
			@ -199,7 +199,7 @@ struct lock_chain {
 | 
			
		|||
	u8				irq_context;
 | 
			
		||||
	u8				depth;
 | 
			
		||||
	u16				base;
 | 
			
		||||
	struct list_head		entry;
 | 
			
		||||
	struct hlist_node		entry;
 | 
			
		||||
	u64				chain_key;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -292,7 +292,7 @@ LIST_HEAD(all_lock_classes);
 | 
			
		|||
#define __classhashfn(key)	hash_long((unsigned long)key, CLASSHASH_BITS)
 | 
			
		||||
#define classhashentry(key)	(classhash_table + __classhashfn((key)))
 | 
			
		||||
 | 
			
		||||
static struct list_head classhash_table[CLASSHASH_SIZE];
 | 
			
		||||
static struct hlist_head classhash_table[CLASSHASH_SIZE];
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * We put the lock dependency chains into a hash-table as well, to cache
 | 
			
		||||
| 
						 | 
				
			
			@ -303,7 +303,7 @@ static struct list_head classhash_table[CLASSHASH_SIZE];
 | 
			
		|||
#define __chainhashfn(chain)	hash_long(chain, CHAINHASH_BITS)
 | 
			
		||||
#define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain)))
 | 
			
		||||
 | 
			
		||||
static struct list_head chainhash_table[CHAINHASH_SIZE];
 | 
			
		||||
static struct hlist_head chainhash_table[CHAINHASH_SIZE];
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * The hash key of the lock dependency chains is a hash itself too:
 | 
			
		||||
| 
						 | 
				
			
			@ -666,7 +666,7 @@ static inline struct lock_class *
 | 
			
		|||
look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
 | 
			
		||||
{
 | 
			
		||||
	struct lockdep_subclass_key *key;
 | 
			
		||||
	struct list_head *hash_head;
 | 
			
		||||
	struct hlist_head *hash_head;
 | 
			
		||||
	struct lock_class *class;
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_DEBUG_LOCKDEP
 | 
			
		||||
| 
						 | 
				
			
			@ -719,7 +719,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
 | 
			
		|||
	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
 | 
			
		||||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	list_for_each_entry_rcu(class, hash_head, hash_entry) {
 | 
			
		||||
	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
 | 
			
		||||
		if (class->key == key) {
 | 
			
		||||
			/*
 | 
			
		||||
			 * Huh! same key, different name? Did someone trample
 | 
			
		||||
| 
						 | 
				
			
			@ -742,7 +742,7 @@ static inline struct lock_class *
 | 
			
		|||
register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 | 
			
		||||
{
 | 
			
		||||
	struct lockdep_subclass_key *key;
 | 
			
		||||
	struct list_head *hash_head;
 | 
			
		||||
	struct hlist_head *hash_head;
 | 
			
		||||
	struct lock_class *class;
 | 
			
		||||
 | 
			
		||||
	DEBUG_LOCKS_WARN_ON(!irqs_disabled());
 | 
			
		||||
| 
						 | 
				
			
			@ -774,7 +774,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 | 
			
		|||
	 * We have to do the hash-walk again, to avoid races
 | 
			
		||||
	 * with another CPU:
 | 
			
		||||
	 */
 | 
			
		||||
	list_for_each_entry_rcu(class, hash_head, hash_entry) {
 | 
			
		||||
	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
 | 
			
		||||
		if (class->key == key)
 | 
			
		||||
			goto out_unlock_set;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -805,7 +805,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 | 
			
		|||
	 * We use RCU's safe list-add method to make
 | 
			
		||||
	 * parallel walking of the hash-list safe:
 | 
			
		||||
	 */
 | 
			
		||||
	list_add_tail_rcu(&class->hash_entry, hash_head);
 | 
			
		||||
	hlist_add_head_rcu(&class->hash_entry, hash_head);
 | 
			
		||||
	/*
 | 
			
		||||
	 * Add it to the global list of classes:
 | 
			
		||||
	 */
 | 
			
		||||
| 
						 | 
				
			
			@ -2017,7 +2017,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
 | 
			
		|||
				     u64 chain_key)
 | 
			
		||||
{
 | 
			
		||||
	struct lock_class *class = hlock_class(hlock);
 | 
			
		||||
	struct list_head *hash_head = chainhashentry(chain_key);
 | 
			
		||||
	struct hlist_head *hash_head = chainhashentry(chain_key);
 | 
			
		||||
	struct lock_chain *chain;
 | 
			
		||||
	struct held_lock *hlock_curr;
 | 
			
		||||
	int i, j;
 | 
			
		||||
| 
						 | 
				
			
			@ -2033,7 +2033,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
 | 
			
		|||
	 * We can walk it lock-free, because entries only get added
 | 
			
		||||
	 * to the hash:
 | 
			
		||||
	 */
 | 
			
		||||
	list_for_each_entry_rcu(chain, hash_head, entry) {
 | 
			
		||||
	hlist_for_each_entry_rcu(chain, hash_head, entry) {
 | 
			
		||||
		if (chain->chain_key == chain_key) {
 | 
			
		||||
cache_hit:
 | 
			
		||||
			debug_atomic_inc(chain_lookup_hits);
 | 
			
		||||
| 
						 | 
				
			
			@ -2057,7 +2057,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
 | 
			
		|||
	/*
 | 
			
		||||
	 * We have to walk the chain again locked - to avoid duplicates:
 | 
			
		||||
	 */
 | 
			
		||||
	list_for_each_entry(chain, hash_head, entry) {
 | 
			
		||||
	hlist_for_each_entry(chain, hash_head, entry) {
 | 
			
		||||
		if (chain->chain_key == chain_key) {
 | 
			
		||||
			graph_unlock();
 | 
			
		||||
			goto cache_hit;
 | 
			
		||||
| 
						 | 
				
			
			@ -2091,7 +2091,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
 | 
			
		|||
		}
 | 
			
		||||
		chain_hlocks[chain->base + j] = class - lock_classes;
 | 
			
		||||
	}
 | 
			
		||||
	list_add_tail_rcu(&chain->entry, hash_head);
 | 
			
		||||
	hlist_add_head_rcu(&chain->entry, hash_head);
 | 
			
		||||
	debug_atomic_inc(chain_lookup_misses);
 | 
			
		||||
	inc_chains();
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -3875,7 +3875,7 @@ void lockdep_reset(void)
 | 
			
		|||
	nr_process_chains = 0;
 | 
			
		||||
	debug_locks = 1;
 | 
			
		||||
	for (i = 0; i < CHAINHASH_SIZE; i++)
 | 
			
		||||
		INIT_LIST_HEAD(chainhash_table + i);
 | 
			
		||||
		INIT_HLIST_HEAD(chainhash_table + i);
 | 
			
		||||
	raw_local_irq_restore(flags);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -3894,7 +3894,7 @@ static void zap_class(struct lock_class *class)
 | 
			
		|||
	/*
 | 
			
		||||
	 * Unhash the class and remove it from the all_lock_classes list:
 | 
			
		||||
	 */
 | 
			
		||||
	list_del_rcu(&class->hash_entry);
 | 
			
		||||
	hlist_del_rcu(&class->hash_entry);
 | 
			
		||||
	list_del_rcu(&class->lock_entry);
 | 
			
		||||
 | 
			
		||||
	RCU_INIT_POINTER(class->key, NULL);
 | 
			
		||||
| 
						 | 
				
			
			@ -3917,7 +3917,7 @@ static inline int within(const void *addr, void *start, unsigned long size)
 | 
			
		|||
void lockdep_free_key_range(void *start, unsigned long size)
 | 
			
		||||
{
 | 
			
		||||
	struct lock_class *class;
 | 
			
		||||
	struct list_head *head;
 | 
			
		||||
	struct hlist_head *head;
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
	int i;
 | 
			
		||||
	int locked;
 | 
			
		||||
| 
						 | 
				
			
			@ -3930,9 +3930,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
 | 
			
		|||
	 */
 | 
			
		||||
	for (i = 0; i < CLASSHASH_SIZE; i++) {
 | 
			
		||||
		head = classhash_table + i;
 | 
			
		||||
		if (list_empty(head))
 | 
			
		||||
			continue;
 | 
			
		||||
		list_for_each_entry_rcu(class, head, hash_entry) {
 | 
			
		||||
		hlist_for_each_entry_rcu(class, head, hash_entry) {
 | 
			
		||||
			if (within(class->key, start, size))
 | 
			
		||||
				zap_class(class);
 | 
			
		||||
			else if (within(class->name, start, size))
 | 
			
		||||
| 
						 | 
				
			
			@ -3962,7 +3960,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
 | 
			
		|||
void lockdep_reset_lock(struct lockdep_map *lock)
 | 
			
		||||
{
 | 
			
		||||
	struct lock_class *class;
 | 
			
		||||
	struct list_head *head;
 | 
			
		||||
	struct hlist_head *head;
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
	int i, j;
 | 
			
		||||
	int locked;
 | 
			
		||||
| 
						 | 
				
			
			@ -3987,9 +3985,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
 | 
			
		|||
	locked = graph_lock();
 | 
			
		||||
	for (i = 0; i < CLASSHASH_SIZE; i++) {
 | 
			
		||||
		head = classhash_table + i;
 | 
			
		||||
		if (list_empty(head))
 | 
			
		||||
			continue;
 | 
			
		||||
		list_for_each_entry_rcu(class, head, hash_entry) {
 | 
			
		||||
		hlist_for_each_entry_rcu(class, head, hash_entry) {
 | 
			
		||||
			int match = 0;
 | 
			
		||||
 | 
			
		||||
			for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
 | 
			
		||||
| 
						 | 
				
			
			@ -4027,10 +4023,10 @@ void lockdep_init(void)
 | 
			
		|||
		return;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < CLASSHASH_SIZE; i++)
 | 
			
		||||
		INIT_LIST_HEAD(classhash_table + i);
 | 
			
		||||
		INIT_HLIST_HEAD(classhash_table + i);
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < CHAINHASH_SIZE; i++)
 | 
			
		||||
		INIT_LIST_HEAD(chainhash_table + i);
 | 
			
		||||
		INIT_HLIST_HEAD(chainhash_table + i);
 | 
			
		||||
 | 
			
		||||
	lockdep_initialized = 1;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue