mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	workqueue: Remove now redundant lock acquisitions wrt. workqueue flushes
The workqueue code added manual lock acquisition annotations to catch deadlocks. After lockdepcrossrelease was introduced, some of those became redundant, since wait_for_completion() already does the acquisition and tracking. Remove the duplicate annotations. Signed-off-by: Byungchul Park <byungchul.park@lge.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: amir73il@gmail.com Cc: axboe@kernel.dk Cc: darrick.wong@oracle.com Cc: david@fromorbit.com Cc: hch@infradead.org Cc: idryomov@gmail.com Cc: johan@kernel.org Cc: johannes.berg@intel.com Cc: kernel-team@lge.com Cc: linux-block@vger.kernel.org Cc: linux-fsdevel@vger.kernel.org Cc: linux-mm@kvack.org Cc: linux-xfs@vger.kernel.org Cc: oleg@redhat.com Cc: tj@kernel.org Link: http://lkml.kernel.org/r/1508921765-15396-9-git-send-email-byungchul.park@lge.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
							parent
							
								
									a7967bc315
								
							
						
					
					
						commit
						fd1a5b04df
					
				
					 2 changed files with 5 additions and 18 deletions
				
			
		| 
						 | 
				
			
			@ -218,7 +218,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
 | 
			
		|||
									\
 | 
			
		||||
		__init_work((_work), _onstack);				\
 | 
			
		||||
		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
 | 
			
		||||
		lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
 | 
			
		||||
		lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
 | 
			
		||||
		INIT_LIST_HEAD(&(_work)->entry);			\
 | 
			
		||||
		(_work)->func = (_func);				\
 | 
			
		||||
	} while (0)
 | 
			
		||||
| 
						 | 
				
			
			@ -398,7 +398,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
 | 
			
		|||
	static struct lock_class_key __key;				\
 | 
			
		||||
	const char *__lock_name;					\
 | 
			
		||||
									\
 | 
			
		||||
	__lock_name = #fmt#args;					\
 | 
			
		||||
	__lock_name = "(wq_completion)"#fmt#args;			\
 | 
			
		||||
									\
 | 
			
		||||
	__alloc_workqueue_key((fmt), (flags), (max_active),		\
 | 
			
		||||
			      &__key, __lock_name, ##args);		\
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2497,15 +2497,8 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
 | 
			
		|||
	INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
 | 
			
		||||
	__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Explicitly init the crosslock for wq_barrier::done, make its lock
 | 
			
		||||
	 * key a subkey of the corresponding work. As a result we won't
 | 
			
		||||
	 * build a dependency between wq_barrier::done and unrelated work.
 | 
			
		||||
	 */
 | 
			
		||||
	lockdep_init_map_crosslock((struct lockdep_map *)&barr->done.map,
 | 
			
		||||
				   "(complete)wq_barr::done",
 | 
			
		||||
				   target->lockdep_map.key, 1);
 | 
			
		||||
	__init_completion(&barr->done);
 | 
			
		||||
	init_completion_map(&barr->done, &target->lockdep_map);
 | 
			
		||||
 | 
			
		||||
	barr->task = current;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
| 
						 | 
				
			
			@ -2611,16 +2604,13 @@ void flush_workqueue(struct workqueue_struct *wq)
 | 
			
		|||
	struct wq_flusher this_flusher = {
 | 
			
		||||
		.list = LIST_HEAD_INIT(this_flusher.list),
 | 
			
		||||
		.flush_color = -1,
 | 
			
		||||
		.done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
 | 
			
		||||
		.done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
 | 
			
		||||
	};
 | 
			
		||||
	int next_color;
 | 
			
		||||
 | 
			
		||||
	if (WARN_ON(!wq_online))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	lock_map_acquire(&wq->lockdep_map);
 | 
			
		||||
	lock_map_release(&wq->lockdep_map);
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&wq->mutex);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
| 
						 | 
				
			
			@ -2883,9 +2873,6 @@ bool flush_work(struct work_struct *work)
 | 
			
		|||
	if (WARN_ON(!wq_online))
 | 
			
		||||
		return false;
 | 
			
		||||
 | 
			
		||||
	lock_map_acquire(&work->lockdep_map);
 | 
			
		||||
	lock_map_release(&work->lockdep_map);
 | 
			
		||||
 | 
			
		||||
	if (start_flush_work(work, &barr)) {
 | 
			
		||||
		wait_for_completion(&barr.done);
 | 
			
		||||
		destroy_work_on_stack(&barr.work);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue