forked from mirrors/linux
		
	wait: add wake_up_pollfree()
Several ->poll() implementations are special in that they use a waitqueue whose lifetime is the current task, rather than the struct file as is normally the case. This is okay for blocking polls, since a blocking poll occurs within one task; however, non-blocking polls require another solution. This solution is for the queue to be cleared before it is freed, using 'wake_up_poll(wq, EPOLLHUP | POLLFREE);'. However, that has a bug: wake_up_poll() calls __wake_up() with nr_exclusive=1. Therefore, if there are multiple "exclusive" waiters, and the wakeup function for the first one returns a positive value, only that one will be called. That's *not* what's needed for POLLFREE; POLLFREE is special in that it really needs to wake up everyone. Considering the three non-blocking poll systems: - io_uring poll doesn't handle POLLFREE at all, so it is broken anyway. - aio poll is unaffected, since it doesn't support exclusive waits. However, that's fragile, as someone could add this feature later. - epoll doesn't appear to be broken by this, since its wakeup function returns 0 when it sees POLLFREE. But this is fragile. Although there is a workaround (see epoll), it's better to define a function which always sends POLLFREE to all waiters. Add such a function. Also make it verify that the queue really becomes empty after all waiters have been woken up. Reported-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20211209010455.42744-2-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@google.com>
This commit is contained in:
		
							parent
							
								
									0fcfb00b28
								
							
						
					
					
						commit
						42288cb44c
					
				
					 2 changed files with 33 additions and 0 deletions
				
			
		|  | @ -217,6 +217,7 @@ void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void | ||||||
| void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); | void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); | ||||||
| void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); | void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); | ||||||
| void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode); | void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode); | ||||||
|  | void __wake_up_pollfree(struct wait_queue_head *wq_head); | ||||||
| 
 | 
 | ||||||
| #define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL) | #define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL) | ||||||
| #define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL) | #define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL) | ||||||
|  | @ -245,6 +246,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode); | ||||||
| #define wake_up_interruptible_sync_poll_locked(x, m)				\ | #define wake_up_interruptible_sync_poll_locked(x, m)				\ | ||||||
| 	__wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m)) | 	__wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m)) | ||||||
| 
 | 
 | ||||||
|  | /**
 | ||||||
|  |  * wake_up_pollfree - signal that a polled waitqueue is going away | ||||||
|  |  * @wq_head: the wait queue head | ||||||
|  |  * | ||||||
|  |  * In the very rare cases where a ->poll() implementation uses a waitqueue whose | ||||||
|  |  * lifetime is tied to a task rather than to the 'struct file' being polled, | ||||||
|  |  * this function must be called before the waitqueue is freed so that | ||||||
|  |  * non-blocking polls (e.g. epoll) are notified that the queue is going away. | ||||||
|  |  * | ||||||
|  |  * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via | ||||||
|  |  * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU. | ||||||
|  |  */ | ||||||
|  | static inline void wake_up_pollfree(struct wait_queue_head *wq_head) | ||||||
|  | { | ||||||
|  | 	/*
 | ||||||
|  | 	 * For performance reasons, we don't always take the queue lock here. | ||||||
|  | 	 * Therefore, we might race with someone removing the last entry from | ||||||
|  | 	 * the queue, and proceed while they still hold the queue lock. | ||||||
|  | 	 * However, rcu_read_lock() is required to be held in such cases, so we | ||||||
|  | 	 * can safely proceed with an RCU-delayed free. | ||||||
|  | 	 */ | ||||||
|  | 	if (waitqueue_active(wq_head)) | ||||||
|  | 		__wake_up_pollfree(wq_head); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| #define ___wait_cond_timeout(condition)						\ | #define ___wait_cond_timeout(condition)						\ | ||||||
| ({										\ | ({										\ | ||||||
| 	bool __cond = (condition);						\ | 	bool __cond = (condition);						\ | ||||||
|  |  | ||||||
|  | @ -238,6 +238,13 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode) | ||||||
| } | } | ||||||
| EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */ | EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */ | ||||||
| 
 | 
 | ||||||
|  | void __wake_up_pollfree(struct wait_queue_head *wq_head) | ||||||
|  | { | ||||||
|  | 	__wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE)); | ||||||
|  | 	/* POLLFREE must have cleared the queue. */ | ||||||
|  | 	WARN_ON_ONCE(waitqueue_active(wq_head)); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| /*
 | /*
 | ||||||
|  * Note: we use "set_current_state()" _after_ the wait-queue add, |  * Note: we use "set_current_state()" _after_ the wait-queue add, | ||||||
|  * because we need a memory barrier there on SMP, so that any |  * because we need a memory barrier there on SMP, so that any | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Eric Biggers
						Eric Biggers