forked from mirrors/linux
		
	sched/wait: Rename wait_queue_t => wait_queue_entry_t
Rename: wait_queue_t => wait_queue_entry_t 'wait_queue_t' was always a slight misnomer: its name implies that it's a "queue", but in reality it's a queue *entry*. The 'real' queue is the wait queue head, which had to carry the name. Start sorting this out by renaming it to 'wait_queue_entry_t'. This also allows the real structure name 'struct __wait_queue' to lose its double underscore and become 'struct wait_queue_entry', which is the more canonical nomenclature for such data types. Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
							parent
							
								
									9705596d08
								
							
						
					
					
						commit
						ac6424b981
					
				
					 94 changed files with 216 additions and 213 deletions
				
			
		| 
						 | 
				
			
			@ -819,7 +819,7 @@ printk(KERN_INFO "my ip: %pI4\n", &ipaddress);
 | 
			
		|||
   certain condition is true.  They must be used carefully to ensure
 | 
			
		||||
   there is no race condition.  You declare a
 | 
			
		||||
   <type>wait_queue_head_t</type>, and then processes which want to
 | 
			
		||||
   wait for that condition declare a <type>wait_queue_t</type>
 | 
			
		||||
   wait for that condition declare a <type>wait_queue_entry_t</type>
 | 
			
		||||
   referring to themselves, and place that in the queue.
 | 
			
		||||
  </para>
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -316,7 +316,7 @@ For version 5, the format of the message is:
 | 
			
		|||
        struct autofs_v5_packet {
 | 
			
		||||
                int proto_version;                /* Protocol version */
 | 
			
		||||
                int type;                        /* Type of packet */
 | 
			
		||||
                autofs_wqt_t wait_queue_token;
 | 
			
		||||
                autofs_wqt_t wait_queue_entry_token;
 | 
			
		||||
                __u32 dev;
 | 
			
		||||
                __u64 ino;
 | 
			
		||||
                __u32 uid;
 | 
			
		||||
| 
						 | 
				
			
			@ -341,12 +341,12 @@ The pipe will be set to "packet mode" (equivalent to passing
 | 
			
		|||
`O_DIRECT`) to _pipe2(2)_ so that a read from the pipe will return at
 | 
			
		||||
most one packet, and any unread portion of a packet will be discarded.
 | 
			
		||||
 | 
			
		||||
The `wait_queue_token` is a unique number which can identify a
 | 
			
		||||
The `wait_queue_entry_token` is a unique number which can identify a
 | 
			
		||||
particular request to be acknowledged.  When a message is sent over
 | 
			
		||||
the pipe the affected dentry is marked as either "active" or
 | 
			
		||||
"expiring" and other accesses to it block until the message is
 | 
			
		||||
acknowledged using one of the ioctls below and the relevant
 | 
			
		||||
`wait_queue_token`.
 | 
			
		||||
`wait_queue_entry_token`.
 | 
			
		||||
 | 
			
		||||
Communicating with autofs: root directory ioctls
 | 
			
		||||
------------------------------------------------
 | 
			
		||||
| 
						 | 
				
			
			@ -358,7 +358,7 @@ capability, or must be the automount daemon.
 | 
			
		|||
The available ioctl commands are:
 | 
			
		||||
 | 
			
		||||
- **AUTOFS_IOC_READY**: a notification has been handled.  The argument
 | 
			
		||||
    to the ioctl command is the "wait_queue_token" number
 | 
			
		||||
    to the ioctl command is the "wait_queue_entry_token" number
 | 
			
		||||
    corresponding to the notification being acknowledged.
 | 
			
		||||
- **AUTOFS_IOC_FAIL**: similar to above, but indicates failure with
 | 
			
		||||
    the error code `ENOENT`.
 | 
			
		||||
| 
						 | 
				
			
			@ -382,14 +382,14 @@ The available ioctl commands are:
 | 
			
		|||
        struct autofs_packet_expire_multi {
 | 
			
		||||
                int proto_version;              /* Protocol version */
 | 
			
		||||
                int type;                       /* Type of packet */
 | 
			
		||||
                autofs_wqt_t wait_queue_token;
 | 
			
		||||
                autofs_wqt_t wait_queue_entry_token;
 | 
			
		||||
                int len;
 | 
			
		||||
                char name[NAME_MAX+1];
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
     is required.  This is filled in with the name of something
 | 
			
		||||
     that can be unmounted or removed.  If nothing can be expired,
 | 
			
		||||
     `errno` is set to `EAGAIN`.  Even though a `wait_queue_token`
 | 
			
		||||
     `errno` is set to `EAGAIN`.  Even though a `wait_queue_entry_token`
 | 
			
		||||
     is present in the structure, no "wait queue" is established
 | 
			
		||||
     and no acknowledgment is needed.
 | 
			
		||||
- **AUTOFS_IOC_EXPIRE_MULTI**:  This is similar to
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -926,7 +926,7 @@ static bool reorder_tags_to_front(struct list_head *list)
 | 
			
		|||
	return first != NULL;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int blk_mq_dispatch_wake(wait_queue_t *wait, unsigned mode, int flags,
 | 
			
		||||
static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
 | 
			
		||||
				void *key)
 | 
			
		||||
{
 | 
			
		||||
	struct blk_mq_hw_ctx *hctx;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -503,7 +503,7 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
 | 
			
		||||
			     wait_queue_t *wait, unsigned long rw)
 | 
			
		||||
			     wait_queue_entry_t *wait, unsigned long rw)
 | 
			
		||||
{
 | 
			
		||||
	/*
 | 
			
		||||
	 * inc it here even if disabled, since we'll dec it at completion.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -99,7 +99,7 @@ struct kyber_hctx_data {
 | 
			
		|||
	struct list_head rqs[KYBER_NUM_DOMAINS];
 | 
			
		||||
	unsigned int cur_domain;
 | 
			
		||||
	unsigned int batching;
 | 
			
		||||
	wait_queue_t domain_wait[KYBER_NUM_DOMAINS];
 | 
			
		||||
	wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS];
 | 
			
		||||
	atomic_t wait_index[KYBER_NUM_DOMAINS];
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -507,7 +507,7 @@ static void kyber_flush_busy_ctxs(struct kyber_hctx_data *khd,
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int kyber_domain_wake(wait_queue_t *wait, unsigned mode, int flags,
 | 
			
		||||
static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
 | 
			
		||||
			     void *key)
 | 
			
		||||
{
 | 
			
		||||
	struct blk_mq_hw_ctx *hctx = READ_ONCE(wait->private);
 | 
			
		||||
| 
						 | 
				
			
			@ -523,7 +523,7 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
 | 
			
		|||
{
 | 
			
		||||
	unsigned int sched_domain = khd->cur_domain;
 | 
			
		||||
	struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
 | 
			
		||||
	wait_queue_t *wait = &khd->domain_wait[sched_domain];
 | 
			
		||||
	wait_queue_entry_t *wait = &khd->domain_wait[sched_domain];
 | 
			
		||||
	struct sbq_wait_state *ws;
 | 
			
		||||
	int nr;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -734,7 +734,7 @@ static int kyber_##name##_waiting_show(void *data, struct seq_file *m)	\
 | 
			
		|||
{									\
 | 
			
		||||
	struct blk_mq_hw_ctx *hctx = data;				\
 | 
			
		||||
	struct kyber_hctx_data *khd = hctx->sched_data;			\
 | 
			
		||||
	wait_queue_t *wait = &khd->domain_wait[domain];			\
 | 
			
		||||
	wait_queue_entry_t *wait = &khd->domain_wait[domain];		\
 | 
			
		||||
									\
 | 
			
		||||
	seq_printf(m, "%d\n", !list_empty_careful(&wait->task_list));	\
 | 
			
		||||
	return 0;							\
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -602,7 +602,7 @@ static int btmrvl_service_main_thread(void *data)
 | 
			
		|||
	struct btmrvl_thread *thread = data;
 | 
			
		||||
	struct btmrvl_private *priv = thread->priv;
 | 
			
		||||
	struct btmrvl_adapter *adapter = priv->adapter;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	struct sk_buff *skb;
 | 
			
		||||
	ulong flags;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -821,7 +821,7 @@ static ssize_t ipmi_read(struct file *file,
 | 
			
		|||
			 loff_t      *ppos)
 | 
			
		||||
{
 | 
			
		||||
	int          rv = 0;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
 | 
			
		||||
	if (count <= 0)
 | 
			
		||||
		return 0;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -123,7 +123,7 @@ struct drm_i915_gem_request {
 | 
			
		|||
	 * It is used by the driver to then queue the request for execution.
 | 
			
		||||
	 */
 | 
			
		||||
	struct i915_sw_fence submit;
 | 
			
		||||
	wait_queue_t submitq;
 | 
			
		||||
	wait_queue_entry_t submitq;
 | 
			
		||||
	wait_queue_head_t execute;
 | 
			
		||||
 | 
			
		||||
	/* A list of everyone we wait upon, and everyone who waits upon us.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -152,7 +152,7 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
 | 
			
		|||
					struct list_head *continuation)
 | 
			
		||||
{
 | 
			
		||||
	wait_queue_head_t *x = &fence->wait;
 | 
			
		||||
	wait_queue_t *pos, *next;
 | 
			
		||||
	wait_queue_entry_t *pos, *next;
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
 | 
			
		||||
	debug_fence_deactivate(fence);
 | 
			
		||||
| 
						 | 
				
			
			@ -254,7 +254,7 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence)
 | 
			
		|||
	__i915_sw_fence_commit(fence);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key)
 | 
			
		||||
static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key)
 | 
			
		||||
{
 | 
			
		||||
	list_del(&wq->task_list);
 | 
			
		||||
	__i915_sw_fence_complete(wq->private, key);
 | 
			
		||||
| 
						 | 
				
			
			@ -267,7 +267,7 @@ static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *
 | 
			
		|||
static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
 | 
			
		||||
				    const struct i915_sw_fence * const signaler)
 | 
			
		||||
{
 | 
			
		||||
	wait_queue_t *wq;
 | 
			
		||||
	wait_queue_entry_t *wq;
 | 
			
		||||
 | 
			
		||||
	if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
 | 
			
		||||
		return false;
 | 
			
		||||
| 
						 | 
				
			
			@ -288,7 +288,7 @@ static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
 | 
			
		|||
 | 
			
		||||
static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence)
 | 
			
		||||
{
 | 
			
		||||
	wait_queue_t *wq;
 | 
			
		||||
	wait_queue_entry_t *wq;
 | 
			
		||||
 | 
			
		||||
	if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
 | 
			
		||||
		return;
 | 
			
		||||
| 
						 | 
				
			
			@ -320,7 +320,7 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
 | 
			
		|||
 | 
			
		||||
static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
 | 
			
		||||
					  struct i915_sw_fence *signaler,
 | 
			
		||||
					  wait_queue_t *wq, gfp_t gfp)
 | 
			
		||||
					  wait_queue_entry_t *wq, gfp_t gfp)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
	int pending;
 | 
			
		||||
| 
						 | 
				
			
			@ -359,7 +359,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
 | 
			
		|||
 | 
			
		||||
	spin_lock_irqsave(&signaler->wait.lock, flags);
 | 
			
		||||
	if (likely(!i915_sw_fence_done(signaler))) {
 | 
			
		||||
		__add_wait_queue_tail(&signaler->wait, wq);
 | 
			
		||||
		__add_wait_queue_entry_tail(&signaler->wait, wq);
 | 
			
		||||
		pending = 1;
 | 
			
		||||
	} else {
 | 
			
		||||
		i915_sw_fence_wake(wq, 0, 0, NULL);
 | 
			
		||||
| 
						 | 
				
			
			@ -372,7 +372,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
 | 
			
		|||
 | 
			
		||||
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
 | 
			
		||||
				 struct i915_sw_fence *signaler,
 | 
			
		||||
				 wait_queue_t *wq)
 | 
			
		||||
				 wait_queue_entry_t *wq)
 | 
			
		||||
{
 | 
			
		||||
	return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -66,7 +66,7 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence);
 | 
			
		|||
 | 
			
		||||
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
 | 
			
		||||
				 struct i915_sw_fence *after,
 | 
			
		||||
				 wait_queue_t *wq);
 | 
			
		||||
				 wait_queue_entry_t *wq);
 | 
			
		||||
int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
 | 
			
		||||
				     struct i915_sw_fence *after,
 | 
			
		||||
				     gfp_t gfp);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -375,7 +375,7 @@ struct radeon_fence {
 | 
			
		|||
	unsigned		ring;
 | 
			
		||||
	bool			is_vm_update;
 | 
			
		||||
 | 
			
		||||
	wait_queue_t		fence_wake;
 | 
			
		||||
	wait_queue_entry_t		fence_wake;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -158,7 +158,7 @@ int radeon_fence_emit(struct radeon_device *rdev,
 | 
			
		|||
 * for the fence locking itself, so unlocked variants are used for
 | 
			
		||||
 * fence_signal, and remove_wait_queue.
 | 
			
		||||
 */
 | 
			
		||||
static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
 | 
			
		||||
static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode, int flags, void *key)
 | 
			
		||||
{
 | 
			
		||||
	struct radeon_fence *fence;
 | 
			
		||||
	u64 seq;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -417,7 +417,7 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
 | 
			
		|||
{
 | 
			
		||||
	struct vga_device *vgadev, *conflict;
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	int rc = 0;
 | 
			
		||||
 | 
			
		||||
	vga_check_first_use();
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1939,7 +1939,7 @@ static int i40iw_virtchnl_receive(struct i40e_info *ldev,
 | 
			
		|||
bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev)
 | 
			
		||||
{
 | 
			
		||||
	struct i40iw_device *iwdev;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
 | 
			
		||||
	iwdev = dev->back_dev;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -207,7 +207,7 @@ void bkey_put(struct cache_set *c, struct bkey *k);
 | 
			
		|||
 | 
			
		||||
struct btree_op {
 | 
			
		||||
	/* for waiting on btree reserve in btree_split() */
 | 
			
		||||
	wait_queue_t		wait;
 | 
			
		||||
	wait_queue_entry_t		wait;
 | 
			
		||||
 | 
			
		||||
	/* Btree level at which we start taking write locks */
 | 
			
		||||
	short			lock;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -144,7 +144,7 @@ static inline int
 | 
			
		|||
sleep_cond(wait_queue_head_t *wait_queue, int *condition)
 | 
			
		||||
{
 | 
			
		||||
	int errno = 0;
 | 
			
		||||
	wait_queue_t we;
 | 
			
		||||
	wait_queue_entry_t we;
 | 
			
		||||
 | 
			
		||||
	init_waitqueue_entry(&we, current);
 | 
			
		||||
	add_wait_queue(wait_queue, &we);
 | 
			
		||||
| 
						 | 
				
			
			@ -171,7 +171,7 @@ sleep_timeout_cond(wait_queue_head_t *wait_queue,
 | 
			
		|||
		   int *condition,
 | 
			
		||||
		   int timeout)
 | 
			
		||||
{
 | 
			
		||||
	wait_queue_t we;
 | 
			
		||||
	wait_queue_entry_t we;
 | 
			
		||||
 | 
			
		||||
	init_waitqueue_entry(&we, current);
 | 
			
		||||
	add_wait_queue(wait_queue, &we);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3066,7 +3066,7 @@ static int airo_thread(void *data) {
 | 
			
		|||
		if (ai->jobs) {
 | 
			
		||||
			locked = down_interruptible(&ai->sem);
 | 
			
		||||
		} else {
 | 
			
		||||
			wait_queue_t wait;
 | 
			
		||||
			wait_queue_entry_t wait;
 | 
			
		||||
 | 
			
		||||
			init_waitqueue_entry(&wait, current);
 | 
			
		||||
			add_wait_queue(&ai->thr_wait, &wait);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2544,7 +2544,7 @@ static int prism2_ioctl_priv_prism2_param(struct net_device *dev,
 | 
			
		|||
			ret = -EINVAL;
 | 
			
		||||
		}
 | 
			
		||||
		if (local->iw_mode == IW_MODE_MASTER) {
 | 
			
		||||
			wait_queue_t __wait;
 | 
			
		||||
			wait_queue_entry_t __wait;
 | 
			
		||||
			init_waitqueue_entry(&__wait, current);
 | 
			
		||||
			add_wait_queue(&local->hostscan_wq, &__wait);
 | 
			
		||||
			set_current_state(TASK_INTERRUPTIBLE);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -453,7 +453,7 @@ static int lbs_thread(void *data)
 | 
			
		|||
{
 | 
			
		||||
	struct net_device *dev = data;
 | 
			
		||||
	struct lbs_private *priv = dev->ml_priv;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
 | 
			
		||||
	lbs_deb_enter(LBS_DEB_THREAD);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -48,7 +48,7 @@
 | 
			
		|||
#include <linux/wait.h>
 | 
			
		||||
typedef wait_queue_head_t adpt_wait_queue_head_t;
 | 
			
		||||
#define ADPT_DECLARE_WAIT_QUEUE_HEAD(wait) DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait)
 | 
			
		||||
typedef wait_queue_t adpt_wait_queue_t;
 | 
			
		||||
typedef wait_queue_entry_t adpt_wait_queue_entry_t;
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * message structures
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -301,13 +301,13 @@ static uint32_t ips_statupd_copperhead_memio(ips_ha_t *);
 | 
			
		|||
static uint32_t ips_statupd_morpheus(ips_ha_t *);
 | 
			
		||||
static ips_scb_t *ips_getscb(ips_ha_t *);
 | 
			
		||||
static void ips_putq_scb_head(ips_scb_queue_t *, ips_scb_t *);
 | 
			
		||||
static void ips_putq_wait_tail(ips_wait_queue_t *, struct scsi_cmnd *);
 | 
			
		||||
static void ips_putq_wait_tail(ips_wait_queue_entry_t *, struct scsi_cmnd *);
 | 
			
		||||
static void ips_putq_copp_tail(ips_copp_queue_t *,
 | 
			
		||||
				      ips_copp_wait_item_t *);
 | 
			
		||||
static ips_scb_t *ips_removeq_scb_head(ips_scb_queue_t *);
 | 
			
		||||
static ips_scb_t *ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *);
 | 
			
		||||
static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *);
 | 
			
		||||
static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_t *,
 | 
			
		||||
static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *);
 | 
			
		||||
static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *,
 | 
			
		||||
					  struct scsi_cmnd *);
 | 
			
		||||
static ips_copp_wait_item_t *ips_removeq_copp(ips_copp_queue_t *,
 | 
			
		||||
						     ips_copp_wait_item_t *);
 | 
			
		||||
| 
						 | 
				
			
			@ -2871,7 +2871,7 @@ ips_removeq_scb(ips_scb_queue_t * queue, ips_scb_t * item)
 | 
			
		|||
/* ASSUMED to be called from within the HA lock                             */
 | 
			
		||||
/*                                                                          */
 | 
			
		||||
/****************************************************************************/
 | 
			
		||||
static void ips_putq_wait_tail(ips_wait_queue_t *queue, struct scsi_cmnd *item)
 | 
			
		||||
static void ips_putq_wait_tail(ips_wait_queue_entry_t *queue, struct scsi_cmnd *item)
 | 
			
		||||
{
 | 
			
		||||
	METHOD_TRACE("ips_putq_wait_tail", 1);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -2902,7 +2902,7 @@ static void ips_putq_wait_tail(ips_wait_queue_t *queue, struct scsi_cmnd *item)
 | 
			
		|||
/* ASSUMED to be called from within the HA lock                             */
 | 
			
		||||
/*                                                                          */
 | 
			
		||||
/****************************************************************************/
 | 
			
		||||
static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *queue)
 | 
			
		||||
static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *queue)
 | 
			
		||||
{
 | 
			
		||||
	struct scsi_cmnd *item;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -2936,7 +2936,7 @@ static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *queue)
 | 
			
		|||
/* ASSUMED to be called from within the HA lock                             */
 | 
			
		||||
/*                                                                          */
 | 
			
		||||
/****************************************************************************/
 | 
			
		||||
static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_t *queue,
 | 
			
		||||
static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *queue,
 | 
			
		||||
					  struct scsi_cmnd *item)
 | 
			
		||||
{
 | 
			
		||||
	struct scsi_cmnd *p;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -989,7 +989,7 @@ typedef struct ips_wait_queue {
 | 
			
		|||
	struct scsi_cmnd *head;
 | 
			
		||||
	struct scsi_cmnd *tail;
 | 
			
		||||
	int count;
 | 
			
		||||
} ips_wait_queue_t;
 | 
			
		||||
} ips_wait_queue_entry_t;
 | 
			
		||||
 | 
			
		||||
typedef struct ips_copp_wait_item {
 | 
			
		||||
	struct scsi_cmnd *scsi_cmd;
 | 
			
		||||
| 
						 | 
				
			
			@ -1035,7 +1035,7 @@ typedef struct ips_ha {
 | 
			
		|||
   ips_stat_t         sp;                 /* Status packer pointer      */
 | 
			
		||||
   struct ips_scb    *scbs;               /* Array of all CCBS          */
 | 
			
		||||
   struct ips_scb    *scb_freelist;       /* SCB free list              */
 | 
			
		||||
   ips_wait_queue_t   scb_waitlist;       /* Pending SCB list           */
 | 
			
		||||
   ips_wait_queue_entry_t   scb_waitlist;       /* Pending SCB list           */
 | 
			
		||||
   ips_copp_queue_t   copp_waitlist;      /* Pending PT list            */
 | 
			
		||||
   ips_scb_queue_t    scb_activelist;     /* Active SCB list            */
 | 
			
		||||
   IPS_IO_CMD        *dummy;              /* dummy command              */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3267,7 +3267,7 @@ int
 | 
			
		|||
kiblnd_connd(void *arg)
 | 
			
		||||
{
 | 
			
		||||
	spinlock_t *lock = &kiblnd_data.kib_connd_lock;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
	struct kib_conn *conn;
 | 
			
		||||
	int timeout;
 | 
			
		||||
| 
						 | 
				
			
			@ -3521,7 +3521,7 @@ kiblnd_scheduler(void *arg)
 | 
			
		|||
	long id = (long)arg;
 | 
			
		||||
	struct kib_sched_info *sched;
 | 
			
		||||
	struct kib_conn *conn;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
	struct ib_wc wc;
 | 
			
		||||
	int did_something;
 | 
			
		||||
| 
						 | 
				
			
			@ -3656,7 +3656,7 @@ kiblnd_failover_thread(void *arg)
 | 
			
		|||
{
 | 
			
		||||
	rwlock_t *glock = &kiblnd_data.kib_global_lock;
 | 
			
		||||
	struct kib_dev *dev;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
	int rc;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2166,7 +2166,7 @@ ksocknal_connd(void *arg)
 | 
			
		|||
{
 | 
			
		||||
	spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
 | 
			
		||||
	struct ksock_connreq *cr;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	int nloops = 0;
 | 
			
		||||
	int cons_retry = 0;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -2554,7 +2554,7 @@ ksocknal_check_peer_timeouts(int idx)
 | 
			
		|||
int
 | 
			
		||||
ksocknal_reaper(void *arg)
 | 
			
		||||
{
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	struct ksock_conn *conn;
 | 
			
		||||
	struct ksock_sched *sched;
 | 
			
		||||
	struct list_head enomem_conns;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -361,7 +361,7 @@ static int libcfs_debug_dumplog_thread(void *arg)
 | 
			
		|||
 | 
			
		||||
void libcfs_debug_dumplog(void)
 | 
			
		||||
{
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	struct task_struct *dumper;
 | 
			
		||||
 | 
			
		||||
	/* we're being careful to ensure that the kernel thread is
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -990,7 +990,7 @@ static int tracefiled(void *arg)
 | 
			
		|||
	complete(&tctl->tctl_start);
 | 
			
		||||
 | 
			
		||||
	while (1) {
 | 
			
		||||
		wait_queue_t __wait;
 | 
			
		||||
		wait_queue_entry_t __wait;
 | 
			
		||||
 | 
			
		||||
		pc.pc_want_daemon_pages = 0;
 | 
			
		||||
		collect_pages(&pc);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -312,7 +312,7 @@ __must_hold(&the_lnet.ln_eq_wait_lock)
 | 
			
		|||
{
 | 
			
		||||
	int tms = *timeout_ms;
 | 
			
		||||
	int wait;
 | 
			
		||||
	wait_queue_t wl;
 | 
			
		||||
	wait_queue_entry_t wl;
 | 
			
		||||
	unsigned long now;
 | 
			
		||||
 | 
			
		||||
	if (!tms)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -516,7 +516,7 @@ lnet_sock_listen(struct socket **sockp, __u32 local_ip, int local_port,
 | 
			
		|||
int
 | 
			
		||||
lnet_sock_accept(struct socket **newsockp, struct socket *sock)
 | 
			
		||||
{
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	struct socket *newsock;
 | 
			
		||||
	int rc;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -192,7 +192,7 @@ static int seq_client_alloc_seq(const struct lu_env *env,
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static int seq_fid_alloc_prep(struct lu_client_seq *seq,
 | 
			
		||||
			      wait_queue_t *link)
 | 
			
		||||
			      wait_queue_entry_t *link)
 | 
			
		||||
{
 | 
			
		||||
	if (seq->lcs_update) {
 | 
			
		||||
		add_wait_queue(&seq->lcs_waitq, link);
 | 
			
		||||
| 
						 | 
				
			
			@ -223,7 +223,7 @@ static void seq_fid_alloc_fini(struct lu_client_seq *seq)
 | 
			
		|||
int seq_client_alloc_fid(const struct lu_env *env,
 | 
			
		||||
			 struct lu_client_seq *seq, struct lu_fid *fid)
 | 
			
		||||
{
 | 
			
		||||
	wait_queue_t link;
 | 
			
		||||
	wait_queue_entry_t link;
 | 
			
		||||
	int rc;
 | 
			
		||||
 | 
			
		||||
	LASSERT(seq);
 | 
			
		||||
| 
						 | 
				
			
			@ -290,7 +290,7 @@ EXPORT_SYMBOL(seq_client_alloc_fid);
 | 
			
		|||
 */
 | 
			
		||||
void seq_client_flush(struct lu_client_seq *seq)
 | 
			
		||||
{
 | 
			
		||||
	wait_queue_t link;
 | 
			
		||||
	wait_queue_entry_t link;
 | 
			
		||||
 | 
			
		||||
	LASSERT(seq);
 | 
			
		||||
	init_waitqueue_entry(&link, current);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -201,7 +201,7 @@ struct l_wait_info {
 | 
			
		|||
			   sigmask(SIGALRM))
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
 | 
			
		||||
 * wait_queue_entry_t of Linux (version < 2.6.34) is a FIFO list for exclusively
 | 
			
		||||
 * waiting threads, which is not always desirable because all threads will
 | 
			
		||||
 * be waken up again and again, even user only needs a few of them to be
 | 
			
		||||
 * active most time. This is not good for performance because cache can
 | 
			
		||||
| 
						 | 
				
			
			@ -228,7 +228,7 @@ struct l_wait_info {
 | 
			
		|||
 */
 | 
			
		||||
#define __l_wait_event(wq, condition, info, ret, l_add_wait)		   \
 | 
			
		||||
do {									   \
 | 
			
		||||
	wait_queue_t __wait;						 \
 | 
			
		||||
	wait_queue_entry_t __wait;						 \
 | 
			
		||||
	long __timeout = info->lwi_timeout;			  \
 | 
			
		||||
	sigset_t   __blocked;					      \
 | 
			
		||||
	int   __allow_intr = info->lwi_allow_intr;			     \
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -207,7 +207,7 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
 | 
			
		|||
static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
 | 
			
		||||
{
 | 
			
		||||
	struct lu_object_header *header = obj->co_lu.lo_header;
 | 
			
		||||
	wait_queue_t	   waiter;
 | 
			
		||||
	wait_queue_entry_t	   waiter;
 | 
			
		||||
 | 
			
		||||
	if (unlikely(atomic_read(&header->loh_ref) != 1)) {
 | 
			
		||||
		struct lu_site *site = obj->co_lu.lo_dev->ld_site;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -370,7 +370,7 @@ struct lov_thread_info {
 | 
			
		|||
	struct ost_lvb	  lti_lvb;
 | 
			
		||||
	struct cl_2queue	lti_cl2q;
 | 
			
		||||
	struct cl_page_list     lti_plist;
 | 
			
		||||
	wait_queue_t	  lti_waiter;
 | 
			
		||||
	wait_queue_entry_t	  lti_waiter;
 | 
			
		||||
	struct cl_attr          lti_attr;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -371,7 +371,7 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
 | 
			
		|||
	struct lov_layout_raid0 *r0;
 | 
			
		||||
	struct lu_site	  *site;
 | 
			
		||||
	struct lu_site_bkt_data *bkt;
 | 
			
		||||
	wait_queue_t	  *waiter;
 | 
			
		||||
	wait_queue_entry_t	  *waiter;
 | 
			
		||||
 | 
			
		||||
	r0  = &lov->u.raid0;
 | 
			
		||||
	LASSERT(r0->lo_sub[idx] == los);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -556,7 +556,7 @@ EXPORT_SYMBOL(lu_object_print);
 | 
			
		|||
static struct lu_object *htable_lookup(struct lu_site *s,
 | 
			
		||||
				       struct cfs_hash_bd *bd,
 | 
			
		||||
				       const struct lu_fid *f,
 | 
			
		||||
				       wait_queue_t *waiter,
 | 
			
		||||
				       wait_queue_entry_t *waiter,
 | 
			
		||||
				       __u64 *version)
 | 
			
		||||
{
 | 
			
		||||
	struct lu_site_bkt_data *bkt;
 | 
			
		||||
| 
						 | 
				
			
			@ -670,7 +670,7 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env,
 | 
			
		|||
					    struct lu_device *dev,
 | 
			
		||||
					    const struct lu_fid *f,
 | 
			
		||||
					    const struct lu_object_conf *conf,
 | 
			
		||||
					    wait_queue_t *waiter)
 | 
			
		||||
					    wait_queue_entry_t *waiter)
 | 
			
		||||
{
 | 
			
		||||
	struct lu_object      *o;
 | 
			
		||||
	struct lu_object      *shadow;
 | 
			
		||||
| 
						 | 
				
			
			@ -750,7 +750,7 @@ struct lu_object *lu_object_find_at(const struct lu_env *env,
 | 
			
		|||
{
 | 
			
		||||
	struct lu_site_bkt_data *bkt;
 | 
			
		||||
	struct lu_object	*obj;
 | 
			
		||||
	wait_queue_t	   wait;
 | 
			
		||||
	wait_queue_entry_t	   wait;
 | 
			
		||||
 | 
			
		||||
	while (1) {
 | 
			
		||||
		obj = lu_object_find_try(env, dev, f, conf, &wait);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -184,7 +184,7 @@ static void hdlcdev_exit(struct slgt_info *info);
 | 
			
		|||
struct cond_wait {
 | 
			
		||||
	struct cond_wait *next;
 | 
			
		||||
	wait_queue_head_t q;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	unsigned int data;
 | 
			
		||||
};
 | 
			
		||||
static void init_cond_wait(struct cond_wait *w, unsigned int data);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -43,7 +43,7 @@ static void virqfd_deactivate(struct virqfd *virqfd)
 | 
			
		|||
	queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
 | 
			
		||||
static int virqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
 | 
			
		||||
{
 | 
			
		||||
	struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
 | 
			
		||||
	unsigned long flags = (unsigned long)key;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -165,7 +165,7 @@ static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
 | 
			
		|||
	add_wait_queue(wqh, &poll->wait);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
 | 
			
		||||
static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
 | 
			
		||||
			     void *key)
 | 
			
		||||
{
 | 
			
		||||
	struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -31,7 +31,7 @@ struct vhost_work {
 | 
			
		|||
struct vhost_poll {
 | 
			
		||||
	poll_table                table;
 | 
			
		||||
	wait_queue_head_t        *wqh;
 | 
			
		||||
	wait_queue_t              wait;
 | 
			
		||||
	wait_queue_entry_t              wait;
 | 
			
		||||
	struct vhost_work	  work;
 | 
			
		||||
	unsigned long		  mask;
 | 
			
		||||
	struct vhost_dev	 *dev;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -83,7 +83,7 @@ struct autofs_info {
 | 
			
		|||
struct autofs_wait_queue {
 | 
			
		||||
	wait_queue_head_t queue;
 | 
			
		||||
	struct autofs_wait_queue *next;
 | 
			
		||||
	autofs_wqt_t wait_queue_token;
 | 
			
		||||
	autofs_wqt_t wait_queue_entry_token;
 | 
			
		||||
	/* We use the following to see what we are waiting for */
 | 
			
		||||
	struct qstr name;
 | 
			
		||||
	u32 dev;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -104,7 +104,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
 | 
			
		|||
	size_t pktsz;
 | 
			
		||||
 | 
			
		||||
	pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n",
 | 
			
		||||
		 (unsigned long) wq->wait_queue_token,
 | 
			
		||||
		 (unsigned long) wq->wait_queue_entry_token,
 | 
			
		||||
		 wq->name.len, wq->name.name, type);
 | 
			
		||||
 | 
			
		||||
	memset(&pkt, 0, sizeof(pkt)); /* For security reasons */
 | 
			
		||||
| 
						 | 
				
			
			@ -120,7 +120,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
 | 
			
		|||
 | 
			
		||||
		pktsz = sizeof(*mp);
 | 
			
		||||
 | 
			
		||||
		mp->wait_queue_token = wq->wait_queue_token;
 | 
			
		||||
		mp->wait_queue_entry_token = wq->wait_queue_entry_token;
 | 
			
		||||
		mp->len = wq->name.len;
 | 
			
		||||
		memcpy(mp->name, wq->name.name, wq->name.len);
 | 
			
		||||
		mp->name[wq->name.len] = '\0';
 | 
			
		||||
| 
						 | 
				
			
			@ -133,7 +133,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
 | 
			
		|||
 | 
			
		||||
		pktsz = sizeof(*ep);
 | 
			
		||||
 | 
			
		||||
		ep->wait_queue_token = wq->wait_queue_token;
 | 
			
		||||
		ep->wait_queue_entry_token = wq->wait_queue_entry_token;
 | 
			
		||||
		ep->len = wq->name.len;
 | 
			
		||||
		memcpy(ep->name, wq->name.name, wq->name.len);
 | 
			
		||||
		ep->name[wq->name.len] = '\0';
 | 
			
		||||
| 
						 | 
				
			
			@ -153,7 +153,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
 | 
			
		|||
 | 
			
		||||
		pktsz = sizeof(*packet);
 | 
			
		||||
 | 
			
		||||
		packet->wait_queue_token = wq->wait_queue_token;
 | 
			
		||||
		packet->wait_queue_entry_token = wq->wait_queue_entry_token;
 | 
			
		||||
		packet->len = wq->name.len;
 | 
			
		||||
		memcpy(packet->name, wq->name.name, wq->name.len);
 | 
			
		||||
		packet->name[wq->name.len] = '\0';
 | 
			
		||||
| 
						 | 
				
			
			@ -428,7 +428,7 @@ int autofs4_wait(struct autofs_sb_info *sbi,
 | 
			
		|||
			return -ENOMEM;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		wq->wait_queue_token = autofs4_next_wait_queue;
 | 
			
		||||
		wq->wait_queue_entry_token = autofs4_next_wait_queue;
 | 
			
		||||
		if (++autofs4_next_wait_queue == 0)
 | 
			
		||||
			autofs4_next_wait_queue = 1;
 | 
			
		||||
		wq->next = sbi->queues;
 | 
			
		||||
| 
						 | 
				
			
			@ -461,7 +461,7 @@ int autofs4_wait(struct autofs_sb_info *sbi,
 | 
			
		|||
		}
 | 
			
		||||
 | 
			
		||||
		pr_debug("new wait id = 0x%08lx, name = %.*s, nfy=%d\n",
 | 
			
		||||
			 (unsigned long) wq->wait_queue_token, wq->name.len,
 | 
			
		||||
			 (unsigned long) wq->wait_queue_entry_token, wq->name.len,
 | 
			
		||||
			 wq->name.name, notify);
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
| 
						 | 
				
			
			@ -471,7 +471,7 @@ int autofs4_wait(struct autofs_sb_info *sbi,
 | 
			
		|||
	} else {
 | 
			
		||||
		wq->wait_ctr++;
 | 
			
		||||
		pr_debug("existing wait id = 0x%08lx, name = %.*s, nfy=%d\n",
 | 
			
		||||
			 (unsigned long) wq->wait_queue_token, wq->name.len,
 | 
			
		||||
			 (unsigned long) wq->wait_queue_entry_token, wq->name.len,
 | 
			
		||||
			 wq->name.name, notify);
 | 
			
		||||
		mutex_unlock(&sbi->wq_mutex);
 | 
			
		||||
		kfree(qstr.name);
 | 
			
		||||
| 
						 | 
				
			
			@ -550,13 +550,13 @@ int autofs4_wait(struct autofs_sb_info *sbi,
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_token, int status)
 | 
			
		||||
int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_entry_token, int status)
 | 
			
		||||
{
 | 
			
		||||
	struct autofs_wait_queue *wq, **wql;
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&sbi->wq_mutex);
 | 
			
		||||
	for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) {
 | 
			
		||||
		if (wq->wait_queue_token == wait_queue_token)
 | 
			
		||||
		if (wq->wait_queue_entry_token == wait_queue_entry_token)
 | 
			
		||||
			break;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -97,7 +97,7 @@ struct cachefiles_cache {
 | 
			
		|||
 * backing file read tracking
 | 
			
		||||
 */
 | 
			
		||||
struct cachefiles_one_read {
 | 
			
		||||
	wait_queue_t			monitor;	/* link into monitored waitqueue */
 | 
			
		||||
	wait_queue_entry_t			monitor;	/* link into monitored waitqueue */
 | 
			
		||||
	struct page			*back_page;	/* backing file page we're waiting for */
 | 
			
		||||
	struct page			*netfs_page;	/* netfs page we're going to fill */
 | 
			
		||||
	struct fscache_retrieval	*op;		/* retrieval op covering this */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -204,7 +204,7 @@ static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
 | 
			
		|||
		wait_queue_head_t *wq;
 | 
			
		||||
 | 
			
		||||
		signed long timeout = 60 * HZ;
 | 
			
		||||
		wait_queue_t wait;
 | 
			
		||||
		wait_queue_entry_t wait;
 | 
			
		||||
		bool requeue;
 | 
			
		||||
 | 
			
		||||
		/* if the object we're waiting for is queued for processing,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -21,7 +21,7 @@
 | 
			
		|||
 * - we use this to detect read completion of backing pages
 | 
			
		||||
 * - the caller holds the waitqueue lock
 | 
			
		||||
 */
 | 
			
		||||
static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
 | 
			
		||||
static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
 | 
			
		||||
				  int sync, void *_key)
 | 
			
		||||
{
 | 
			
		||||
	struct cachefiles_one_read *monitor =
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										4
									
								
								fs/dax.c
									
									
									
									
									
								
							
							
						
						
									
										4
									
								
								fs/dax.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -84,7 +84,7 @@ struct exceptional_entry_key {
 | 
			
		|||
};
 | 
			
		||||
 | 
			
		||||
struct wait_exceptional_entry_queue {
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	struct exceptional_entry_key key;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -108,7 +108,7 @@ static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
 | 
			
		|||
	return wait_table + hash;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
 | 
			
		||||
static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
 | 
			
		||||
				       int sync, void *keyp)
 | 
			
		||||
{
 | 
			
		||||
	struct exceptional_entry_key *key = keyp;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -191,7 +191,7 @@ static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
 | 
			
		|||
 * This is used to atomically remove a wait queue entry from the eventfd wait
 | 
			
		||||
 * queue head, and read/reset the counter value.
 | 
			
		||||
 */
 | 
			
		||||
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait,
 | 
			
		||||
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
 | 
			
		||||
				  __u64 *cnt)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -244,7 +244,7 @@ struct eppoll_entry {
 | 
			
		|||
	 * Wait queue item that will be linked to the target file wait
 | 
			
		||||
	 * queue head.
 | 
			
		||||
	 */
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
 | 
			
		||||
	/* The wait queue head that linked the "wait" wait queue item */
 | 
			
		||||
	wait_queue_head_t *whead;
 | 
			
		||||
| 
						 | 
				
			
			@ -347,13 +347,13 @@ static inline int ep_is_linked(struct list_head *p)
 | 
			
		|||
	return !list_empty(p);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p)
 | 
			
		||||
static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_entry_t *p)
 | 
			
		||||
{
 | 
			
		||||
	return container_of(p, struct eppoll_entry, wait);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Get the "struct epitem" from a wait queue pointer */
 | 
			
		||||
static inline struct epitem *ep_item_from_wait(wait_queue_t *p)
 | 
			
		||||
static inline struct epitem *ep_item_from_wait(wait_queue_entry_t *p)
 | 
			
		||||
{
 | 
			
		||||
	return container_of(p, struct eppoll_entry, wait)->base;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -1078,7 +1078,7 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
 | 
			
		|||
 * mechanism. It is called by the stored file descriptors when they
 | 
			
		||||
 * have events to report.
 | 
			
		||||
 */
 | 
			
		||||
static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
 | 
			
		||||
static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
 | 
			
		||||
{
 | 
			
		||||
	int pwake = 0;
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
| 
						 | 
				
			
			@ -1699,7 +1699,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
 | 
			
		|||
	int res = 0, eavail, timed_out = 0;
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
	u64 slack = 0;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	ktime_t expires, *to = NULL;
 | 
			
		||||
 | 
			
		||||
	if (timeout > 0) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -34,7 +34,7 @@ void pin_insert(struct fs_pin *pin, struct vfsmount *m)
 | 
			
		|||
 | 
			
		||||
void pin_kill(struct fs_pin *p)
 | 
			
		||||
{
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
 | 
			
		||||
	if (!p) {
 | 
			
		||||
		rcu_read_unlock();
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -6372,7 +6372,7 @@ struct nfs4_lock_waiter {
 | 
			
		|||
};
 | 
			
		||||
 | 
			
		||||
static int
 | 
			
		||||
nfs4_wake_lock_waiter(wait_queue_t *wait, unsigned int mode, int flags, void *key)
 | 
			
		||||
nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key)
 | 
			
		||||
{
 | 
			
		||||
	int ret;
 | 
			
		||||
	struct cb_notify_lock_args *cbnl = key;
 | 
			
		||||
| 
						 | 
				
			
			@ -6415,7 +6415,7 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
 | 
			
		|||
					   .inode = state->inode,
 | 
			
		||||
					   .owner = &owner,
 | 
			
		||||
					   .notified = false };
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
 | 
			
		||||
	/* Don't bother with waitqueue if we don't expect a callback */
 | 
			
		||||
	if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags))
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2161,7 +2161,7 @@ void nilfs_flush_segment(struct super_block *sb, ino_t ino)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
struct nilfs_segctor_wait_request {
 | 
			
		||||
	wait_queue_t	wq;
 | 
			
		||||
	wait_queue_entry_t	wq;
 | 
			
		||||
	__u32		seq;
 | 
			
		||||
	int		err;
 | 
			
		||||
	atomic_t	done;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -47,7 +47,7 @@ static void run_down(struct slot_map *m)
 | 
			
		|||
	if (m->c != -1) {
 | 
			
		||||
		for (;;) {
 | 
			
		||||
			if (likely(list_empty(&wait.task_list)))
 | 
			
		||||
				__add_wait_queue_tail(&m->q, &wait);
 | 
			
		||||
				__add_wait_queue_entry_tail(&m->q, &wait);
 | 
			
		||||
			set_current_state(TASK_UNINTERRUPTIBLE);
 | 
			
		||||
 | 
			
		||||
			if (m->c == -1)
 | 
			
		||||
| 
						 | 
				
			
			@ -85,7 +85,7 @@ static int wait_for_free(struct slot_map *m)
 | 
			
		|||
	do {
 | 
			
		||||
		long n = left, t;
 | 
			
		||||
		if (likely(list_empty(&wait.task_list)))
 | 
			
		||||
			__add_wait_queue_tail_exclusive(&m->q, &wait);
 | 
			
		||||
			__add_wait_queue_entry_tail_exclusive(&m->q, &wait);
 | 
			
		||||
		set_current_state(TASK_INTERRUPTIBLE);
 | 
			
		||||
 | 
			
		||||
		if (m->c > 0)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2956,7 +2956,7 @@ void reiserfs_wait_on_write_block(struct super_block *s)
 | 
			
		|||
 | 
			
		||||
static void queue_log_writer(struct super_block *s)
 | 
			
		||||
{
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	struct reiserfs_journal *journal = SB_JOURNAL(s);
 | 
			
		||||
	set_bit(J_WRITERS_QUEUED, &journal->j_state);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -180,7 +180,7 @@ static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
 | 
			
		|||
	return table->entry++;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
 | 
			
		||||
static int __pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
 | 
			
		||||
{
 | 
			
		||||
	struct poll_wqueues *pwq = wait->private;
 | 
			
		||||
	DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
 | 
			
		||||
| 
						 | 
				
			
			@ -206,7 +206,7 @@ static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
 | 
			
		|||
	return default_wake_function(&dummy_wait, mode, sync, key);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
 | 
			
		||||
static int pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
 | 
			
		||||
{
 | 
			
		||||
	struct poll_table_entry *entry;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -43,7 +43,7 @@ void signalfd_cleanup(struct sighand_struct *sighand)
 | 
			
		|||
	if (likely(!waitqueue_active(wqh)))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	/* wait_queue_t->func(POLLFREE) should do remove_wait_queue() */
 | 
			
		||||
	/* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */
 | 
			
		||||
	wake_up_poll(wqh, POLLHUP | POLLFREE);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -81,7 +81,7 @@ struct userfaultfd_unmap_ctx {
 | 
			
		|||
 | 
			
		||||
struct userfaultfd_wait_queue {
 | 
			
		||||
	struct uffd_msg msg;
 | 
			
		||||
	wait_queue_t wq;
 | 
			
		||||
	wait_queue_entry_t wq;
 | 
			
		||||
	struct userfaultfd_ctx *ctx;
 | 
			
		||||
	bool waken;
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			@ -91,7 +91,7 @@ struct userfaultfd_wake_range {
 | 
			
		|||
	unsigned long len;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
 | 
			
		||||
static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
 | 
			
		||||
				     int wake_flags, void *key)
 | 
			
		||||
{
 | 
			
		||||
	struct userfaultfd_wake_range *range = key;
 | 
			
		||||
| 
						 | 
				
			
			@ -860,7 +860,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
 | 
			
		|||
static inline struct userfaultfd_wait_queue *find_userfault_in(
 | 
			
		||||
		wait_queue_head_t *wqh)
 | 
			
		||||
{
 | 
			
		||||
	wait_queue_t *wq;
 | 
			
		||||
	wait_queue_entry_t *wq;
 | 
			
		||||
	struct userfaultfd_wait_queue *uwq;
 | 
			
		||||
 | 
			
		||||
	VM_BUG_ON(!spin_is_locked(&wqh->lock));
 | 
			
		||||
| 
						 | 
				
			
			@ -1747,7 +1747,7 @@ static long userfaultfd_ioctl(struct file *file, unsigned cmd,
 | 
			
		|||
static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
 | 
			
		||||
{
 | 
			
		||||
	struct userfaultfd_ctx *ctx = f->private_data;
 | 
			
		||||
	wait_queue_t *wq;
 | 
			
		||||
	wait_queue_entry_t *wq;
 | 
			
		||||
	struct userfaultfd_wait_queue *uwq;
 | 
			
		||||
	unsigned long pending = 0, total = 0;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -33,7 +33,7 @@ struct blk_mq_hw_ctx {
 | 
			
		|||
	struct blk_mq_ctx	**ctxs;
 | 
			
		||||
	unsigned int		nr_ctx;
 | 
			
		||||
 | 
			
		||||
	wait_queue_t		dispatch_wait;
 | 
			
		||||
	wait_queue_entry_t		dispatch_wait;
 | 
			
		||||
	atomic_t		wait_index;
 | 
			
		||||
 | 
			
		||||
	struct blk_mq_tags	*tags;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -37,7 +37,7 @@ struct eventfd_ctx *eventfd_ctx_fdget(int fd);
 | 
			
		|||
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
 | 
			
		||||
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
 | 
			
		||||
ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt);
 | 
			
		||||
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait,
 | 
			
		||||
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
 | 
			
		||||
				  __u64 *cnt);
 | 
			
		||||
 | 
			
		||||
#else /* CONFIG_EVENTFD */
 | 
			
		||||
| 
						 | 
				
			
			@ -73,7 +73,7 @@ static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait,
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
 | 
			
		||||
						wait_queue_t *wait, __u64 *cnt)
 | 
			
		||||
						wait_queue_entry_t *wait, __u64 *cnt)
 | 
			
		||||
{
 | 
			
		||||
	return -ENOSYS;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -46,7 +46,7 @@ struct kvm_kernel_irqfd_resampler {
 | 
			
		|||
struct kvm_kernel_irqfd {
 | 
			
		||||
	/* Used for MSI fast-path */
 | 
			
		||||
	struct kvm *kvm;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	/* Update side is protected by irqfds.lock */
 | 
			
		||||
	struct kvm_kernel_irq_routing_entry irq_entry;
 | 
			
		||||
	seqcount_t irq_entry_sc;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -524,7 +524,7 @@ void page_endio(struct page *page, bool is_write, int err);
 | 
			
		|||
/*
 | 
			
		||||
 * Add an arbitrary waiter to a page's wait queue
 | 
			
		||||
 */
 | 
			
		||||
extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
 | 
			
		||||
extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Fault everything in given userspace address range in.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -75,7 +75,7 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
 | 
			
		|||
struct poll_table_entry {
 | 
			
		||||
	struct file *filp;
 | 
			
		||||
	unsigned long key;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	wait_queue_head_t *wait_address;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -183,7 +183,7 @@ struct virqfd {
 | 
			
		|||
	void			(*thread)(void *, void *);
 | 
			
		||||
	void			*data;
 | 
			
		||||
	struct work_struct	inject;
 | 
			
		||||
	wait_queue_t		wait;
 | 
			
		||||
	wait_queue_entry_t		wait;
 | 
			
		||||
	poll_table		pt;
 | 
			
		||||
	struct work_struct	shutdown;
 | 
			
		||||
	struct virqfd		**pvirqfd;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -10,15 +10,18 @@
 | 
			
		|||
#include <asm/current.h>
 | 
			
		||||
#include <uapi/linux/wait.h>
 | 
			
		||||
 | 
			
		||||
typedef struct __wait_queue wait_queue_t;
 | 
			
		||||
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
 | 
			
		||||
int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
 | 
			
		||||
typedef struct wait_queue_entry wait_queue_entry_t;
 | 
			
		||||
typedef int (*wait_queue_func_t)(wait_queue_entry_t *wait, unsigned mode, int flags, void *key);
 | 
			
		||||
int default_wake_function(wait_queue_entry_t *wait, unsigned mode, int flags, void *key);
 | 
			
		||||
 | 
			
		||||
/* __wait_queue::flags */
 | 
			
		||||
/* wait_queue_entry::flags */
 | 
			
		||||
#define WQ_FLAG_EXCLUSIVE	0x01
 | 
			
		||||
#define WQ_FLAG_WOKEN		0x02
 | 
			
		||||
 | 
			
		||||
struct __wait_queue {
 | 
			
		||||
/*
 | 
			
		||||
 * A single wait-queue entry structure:
 | 
			
		||||
 */
 | 
			
		||||
struct wait_queue_entry {
 | 
			
		||||
	unsigned int		flags;
 | 
			
		||||
	void			*private;
 | 
			
		||||
	wait_queue_func_t	func;
 | 
			
		||||
| 
						 | 
				
			
			@ -34,7 +37,7 @@ struct wait_bit_key {
 | 
			
		|||
 | 
			
		||||
struct wait_bit_queue {
 | 
			
		||||
	struct wait_bit_key	key;
 | 
			
		||||
	wait_queue_t		wait;
 | 
			
		||||
	wait_queue_entry_t	wait;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct __wait_queue_head {
 | 
			
		||||
| 
						 | 
				
			
			@ -55,7 +58,7 @@ struct task_struct;
 | 
			
		|||
	.task_list	= { NULL, NULL } }
 | 
			
		||||
 | 
			
		||||
#define DECLARE_WAITQUEUE(name, tsk)					\
 | 
			
		||||
	wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
 | 
			
		||||
	wait_queue_entry_t name = __WAITQUEUE_INITIALIZER(name, tsk)
 | 
			
		||||
 | 
			
		||||
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) {				\
 | 
			
		||||
	.lock		= __SPIN_LOCK_UNLOCKED(name.lock),		\
 | 
			
		||||
| 
						 | 
				
			
			@ -88,7 +91,7 @@ extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct
 | 
			
		|||
# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
 | 
			
		||||
static inline void init_waitqueue_entry(wait_queue_entry_t *q, struct task_struct *p)
 | 
			
		||||
{
 | 
			
		||||
	q->flags	= 0;
 | 
			
		||||
	q->private	= p;
 | 
			
		||||
| 
						 | 
				
			
			@ -96,7 +99,7 @@ static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
 | 
			
		||||
init_waitqueue_func_entry(wait_queue_entry_t *q, wait_queue_func_t func)
 | 
			
		||||
{
 | 
			
		||||
	q->flags	= 0;
 | 
			
		||||
	q->private	= NULL;
 | 
			
		||||
| 
						 | 
				
			
			@ -159,11 +162,11 @@ static inline bool wq_has_sleeper(wait_queue_head_t *wq)
 | 
			
		|||
	return waitqueue_active(wq);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
 | 
			
		||||
extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
 | 
			
		||||
extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
 | 
			
		||||
extern void add_wait_queue(wait_queue_head_t *q, wait_queue_entry_t *wait);
 | 
			
		||||
extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait);
 | 
			
		||||
extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_entry_t *wait);
 | 
			
		||||
 | 
			
		||||
static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
 | 
			
		||||
static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *new)
 | 
			
		||||
{
 | 
			
		||||
	list_add(&new->task_list, &head->task_list);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -172,27 +175,27 @@ static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
 | 
			
		|||
 * Used for wake-one threads:
 | 
			
		||||
 */
 | 
			
		||||
static inline void
 | 
			
		||||
__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
 | 
			
		||||
__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait)
 | 
			
		||||
{
 | 
			
		||||
	wait->flags |= WQ_FLAG_EXCLUSIVE;
 | 
			
		||||
	__add_wait_queue(q, wait);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void __add_wait_queue_tail(wait_queue_head_t *head,
 | 
			
		||||
					 wait_queue_t *new)
 | 
			
		||||
static inline void __add_wait_queue_entry_tail(wait_queue_head_t *head,
 | 
			
		||||
					 wait_queue_entry_t *new)
 | 
			
		||||
{
 | 
			
		||||
	list_add_tail(&new->task_list, &head->task_list);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
 | 
			
		||||
__add_wait_queue_entry_tail_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait)
 | 
			
		||||
{
 | 
			
		||||
	wait->flags |= WQ_FLAG_EXCLUSIVE;
 | 
			
		||||
	__add_wait_queue_tail(q, wait);
 | 
			
		||||
	__add_wait_queue_entry_tail(q, wait);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
 | 
			
		||||
__remove_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *old)
 | 
			
		||||
{
 | 
			
		||||
	list_del(&old->task_list);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -249,7 +252,7 @@ wait_queue_head_t *bit_waitqueue(void *, int);
 | 
			
		|||
	(!__builtin_constant_p(state) ||				\
 | 
			
		||||
		state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)	\
 | 
			
		||||
 | 
			
		||||
extern void init_wait_entry(wait_queue_t *__wait, int flags);
 | 
			
		||||
extern void init_wait_entry(wait_queue_entry_t *__wait, int flags);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * The below macro ___wait_event() has an explicit shadow of the __ret
 | 
			
		||||
| 
						 | 
				
			
			@ -266,7 +269,7 @@ extern void init_wait_entry(wait_queue_t *__wait, int flags);
 | 
			
		|||
#define ___wait_event(wq, condition, state, exclusive, ret, cmd)	\
 | 
			
		||||
({									\
 | 
			
		||||
	__label__ __out;						\
 | 
			
		||||
	wait_queue_t __wait;						\
 | 
			
		||||
	wait_queue_entry_t __wait;						\
 | 
			
		||||
	long __ret = ret;	/* explicit shadow */			\
 | 
			
		||||
									\
 | 
			
		||||
	init_wait_entry(&__wait, exclusive ? WQ_FLAG_EXCLUSIVE : 0);	\
 | 
			
		||||
| 
						 | 
				
			
			@ -620,8 +623,8 @@ do {									\
 | 
			
		|||
	__ret;								\
 | 
			
		||||
})
 | 
			
		||||
 | 
			
		||||
extern int do_wait_intr(wait_queue_head_t *, wait_queue_t *);
 | 
			
		||||
extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_t *);
 | 
			
		||||
extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
 | 
			
		||||
extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
 | 
			
		||||
 | 
			
		||||
#define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
 | 
			
		||||
({									\
 | 
			
		||||
| 
						 | 
				
			
			@ -967,17 +970,17 @@ do {									\
 | 
			
		|||
/*
 | 
			
		||||
 * Waitqueues which are removed from the waitqueue_head at wakeup time
 | 
			
		||||
 */
 | 
			
		||||
void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
 | 
			
		||||
void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
 | 
			
		||||
long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
 | 
			
		||||
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
 | 
			
		||||
long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
 | 
			
		||||
int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
 | 
			
		||||
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
 | 
			
		||||
int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
 | 
			
		||||
void prepare_to_wait(wait_queue_head_t *q, wait_queue_entry_t *wait, int state);
 | 
			
		||||
void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait, int state);
 | 
			
		||||
long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_entry_t *wait, int state);
 | 
			
		||||
void finish_wait(wait_queue_head_t *q, wait_queue_entry_t *wait);
 | 
			
		||||
long wait_woken(wait_queue_entry_t *wait, unsigned mode, long timeout);
 | 
			
		||||
int woken_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key);
 | 
			
		||||
int autoremove_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key);
 | 
			
		||||
int wake_bit_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key);
 | 
			
		||||
 | 
			
		||||
#define DEFINE_WAIT_FUNC(name, function)				\
 | 
			
		||||
	wait_queue_t name = {						\
 | 
			
		||||
	wait_queue_entry_t name = {					\
 | 
			
		||||
		.private	= current,				\
 | 
			
		||||
		.func		= function,				\
 | 
			
		||||
		.task_list	= LIST_HEAD_INIT((name).task_list),	\
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -62,7 +62,7 @@ struct unix_sock {
 | 
			
		|||
#define UNIX_GC_CANDIDATE	0
 | 
			
		||||
#define UNIX_GC_MAYBE_CYCLE	1
 | 
			
		||||
	struct socket_wq	peer_wq;
 | 
			
		||||
	wait_queue_t		peer_wake;
 | 
			
		||||
	wait_queue_entry_t		peer_wake;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static inline struct unix_sock *unix_sk(const struct sock *sk)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -26,7 +26,7 @@
 | 
			
		|||
#define AUTOFS_MIN_PROTO_VERSION	AUTOFS_PROTO_VERSION
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * The wait_queue_token (autofs_wqt_t) is part of a structure which is passed
 | 
			
		||||
 * The wait_queue_entry_token (autofs_wqt_t) is part of a structure which is passed
 | 
			
		||||
 * back to the kernel via ioctl from userspace. On architectures where 32- and
 | 
			
		||||
 * 64-bit userspace binaries can be executed it's important that the size of
 | 
			
		||||
 * autofs_wqt_t stays constant between 32- and 64-bit Linux kernels so that we
 | 
			
		||||
| 
						 | 
				
			
			@ -49,7 +49,7 @@ struct autofs_packet_hdr {
 | 
			
		|||
 | 
			
		||||
struct autofs_packet_missing {
 | 
			
		||||
	struct autofs_packet_hdr hdr;
 | 
			
		||||
	autofs_wqt_t wait_queue_token;
 | 
			
		||||
	autofs_wqt_t wait_queue_entry_token;
 | 
			
		||||
	int len;
 | 
			
		||||
	char name[NAME_MAX+1];
 | 
			
		||||
};	
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -108,7 +108,7 @@ enum autofs_notify {
 | 
			
		|||
/* v4 multi expire (via pipe) */
 | 
			
		||||
struct autofs_packet_expire_multi {
 | 
			
		||||
	struct autofs_packet_hdr hdr;
 | 
			
		||||
	autofs_wqt_t wait_queue_token;
 | 
			
		||||
	autofs_wqt_t wait_queue_entry_token;
 | 
			
		||||
	int len;
 | 
			
		||||
	char name[NAME_MAX+1];
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			@ -123,7 +123,7 @@ union autofs_packet_union {
 | 
			
		|||
/* autofs v5 common packet struct */
 | 
			
		||||
struct autofs_v5_packet {
 | 
			
		||||
	struct autofs_packet_hdr hdr;
 | 
			
		||||
	autofs_wqt_t wait_queue_token;
 | 
			
		||||
	autofs_wqt_t wait_queue_entry_token;
 | 
			
		||||
	__u32 dev;
 | 
			
		||||
	__u64 ino;
 | 
			
		||||
	__u32 uid;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1004,7 +1004,7 @@ struct wait_opts {
 | 
			
		|||
	int __user		*wo_stat;
 | 
			
		||||
	struct rusage __user	*wo_rusage;
 | 
			
		||||
 | 
			
		||||
	wait_queue_t		child_wait;
 | 
			
		||||
	wait_queue_entry_t		child_wait;
 | 
			
		||||
	int			notask_error;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1541,7 +1541,7 @@ static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
 | 
			
		|||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int child_wait_callback(wait_queue_t *wait, unsigned mode,
 | 
			
		||||
static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode,
 | 
			
		||||
				int sync, void *key)
 | 
			
		||||
{
 | 
			
		||||
	struct wait_opts *wo = container_of(wait, struct wait_opts,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -225,7 +225,7 @@ struct futex_pi_state {
 | 
			
		|||
 * @requeue_pi_key:	the requeue_pi target futex key
 | 
			
		||||
 * @bitset:		bitset for the optional bitmasked wakeup
 | 
			
		||||
 *
 | 
			
		||||
 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
 | 
			
		||||
 * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
 | 
			
		||||
 * we can wake only the relevant ones (hashed queues may be shared).
 | 
			
		||||
 *
 | 
			
		||||
 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -66,7 +66,7 @@ do_wait_for_common(struct completion *x,
 | 
			
		|||
	if (!x->done) {
 | 
			
		||||
		DECLARE_WAITQUEUE(wait, current);
 | 
			
		||||
 | 
			
		||||
		__add_wait_queue_tail_exclusive(&x->wait, &wait);
 | 
			
		||||
		__add_wait_queue_entry_tail_exclusive(&x->wait, &wait);
 | 
			
		||||
		do {
 | 
			
		||||
			if (signal_pending_state(state, current)) {
 | 
			
		||||
				timeout = -ERESTARTSYS;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3687,7 +3687,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
 | 
			
		|||
	exception_exit(prev_state);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
 | 
			
		||||
int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
 | 
			
		||||
			  void *key)
 | 
			
		||||
{
 | 
			
		||||
	return try_to_wake_up(curr->private, mode, wake_flags);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -21,7 +21,7 @@ void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_c
 | 
			
		|||
 | 
			
		||||
EXPORT_SYMBOL(__init_waitqueue_head);
 | 
			
		||||
 | 
			
		||||
void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
 | 
			
		||||
void add_wait_queue(wait_queue_head_t *q, wait_queue_entry_t *wait)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -32,18 +32,18 @@ void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL(add_wait_queue);
 | 
			
		||||
 | 
			
		||||
void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
 | 
			
		||||
void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
 | 
			
		||||
	wait->flags |= WQ_FLAG_EXCLUSIVE;
 | 
			
		||||
	spin_lock_irqsave(&q->lock, flags);
 | 
			
		||||
	__add_wait_queue_tail(q, wait);
 | 
			
		||||
	__add_wait_queue_entry_tail(q, wait);
 | 
			
		||||
	spin_unlock_irqrestore(&q->lock, flags);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(add_wait_queue_exclusive);
 | 
			
		||||
 | 
			
		||||
void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
 | 
			
		||||
void remove_wait_queue(wait_queue_head_t *q, wait_queue_entry_t *wait)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -66,7 +66,7 @@ EXPORT_SYMBOL(remove_wait_queue);
 | 
			
		|||
static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
 | 
			
		||||
			int nr_exclusive, int wake_flags, void *key)
 | 
			
		||||
{
 | 
			
		||||
	wait_queue_t *curr, *next;
 | 
			
		||||
	wait_queue_entry_t *curr, *next;
 | 
			
		||||
 | 
			
		||||
	list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
 | 
			
		||||
		unsigned flags = curr->flags;
 | 
			
		||||
| 
						 | 
				
			
			@ -170,7 +170,7 @@ EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
 | 
			
		|||
 * loads to move into the critical region).
 | 
			
		||||
 */
 | 
			
		||||
void
 | 
			
		||||
prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
 | 
			
		||||
prepare_to_wait(wait_queue_head_t *q, wait_queue_entry_t *wait, int state)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -184,20 +184,20 @@ prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
 | 
			
		|||
EXPORT_SYMBOL(prepare_to_wait);
 | 
			
		||||
 | 
			
		||||
void
 | 
			
		||||
prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
 | 
			
		||||
prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait, int state)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
 | 
			
		||||
	wait->flags |= WQ_FLAG_EXCLUSIVE;
 | 
			
		||||
	spin_lock_irqsave(&q->lock, flags);
 | 
			
		||||
	if (list_empty(&wait->task_list))
 | 
			
		||||
		__add_wait_queue_tail(q, wait);
 | 
			
		||||
		__add_wait_queue_entry_tail(q, wait);
 | 
			
		||||
	set_current_state(state);
 | 
			
		||||
	spin_unlock_irqrestore(&q->lock, flags);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(prepare_to_wait_exclusive);
 | 
			
		||||
 | 
			
		||||
void init_wait_entry(wait_queue_t *wait, int flags)
 | 
			
		||||
void init_wait_entry(wait_queue_entry_t *wait, int flags)
 | 
			
		||||
{
 | 
			
		||||
	wait->flags = flags;
 | 
			
		||||
	wait->private = current;
 | 
			
		||||
| 
						 | 
				
			
			@ -206,7 +206,7 @@ void init_wait_entry(wait_queue_t *wait, int flags)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL(init_wait_entry);
 | 
			
		||||
 | 
			
		||||
long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state)
 | 
			
		||||
long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_entry_t *wait, int state)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
	long ret = 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -230,7 +230,7 @@ long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state)
 | 
			
		|||
	} else {
 | 
			
		||||
		if (list_empty(&wait->task_list)) {
 | 
			
		||||
			if (wait->flags & WQ_FLAG_EXCLUSIVE)
 | 
			
		||||
				__add_wait_queue_tail(q, wait);
 | 
			
		||||
				__add_wait_queue_entry_tail(q, wait);
 | 
			
		||||
			else
 | 
			
		||||
				__add_wait_queue(q, wait);
 | 
			
		||||
		}
 | 
			
		||||
| 
						 | 
				
			
			@ -249,10 +249,10 @@ EXPORT_SYMBOL(prepare_to_wait_event);
 | 
			
		|||
 * condition in the caller before they add the wait
 | 
			
		||||
 * entry to the wake queue.
 | 
			
		||||
 */
 | 
			
		||||
int do_wait_intr(wait_queue_head_t *wq, wait_queue_t *wait)
 | 
			
		||||
int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
 | 
			
		||||
{
 | 
			
		||||
	if (likely(list_empty(&wait->task_list)))
 | 
			
		||||
		__add_wait_queue_tail(wq, wait);
 | 
			
		||||
		__add_wait_queue_entry_tail(wq, wait);
 | 
			
		||||
 | 
			
		||||
	set_current_state(TASK_INTERRUPTIBLE);
 | 
			
		||||
	if (signal_pending(current))
 | 
			
		||||
| 
						 | 
				
			
			@ -265,10 +265,10 @@ int do_wait_intr(wait_queue_head_t *wq, wait_queue_t *wait)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL(do_wait_intr);
 | 
			
		||||
 | 
			
		||||
int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_t *wait)
 | 
			
		||||
int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
 | 
			
		||||
{
 | 
			
		||||
	if (likely(list_empty(&wait->task_list)))
 | 
			
		||||
		__add_wait_queue_tail(wq, wait);
 | 
			
		||||
		__add_wait_queue_entry_tail(wq, wait);
 | 
			
		||||
 | 
			
		||||
	set_current_state(TASK_INTERRUPTIBLE);
 | 
			
		||||
	if (signal_pending(current))
 | 
			
		||||
| 
						 | 
				
			
			@ -290,7 +290,7 @@ EXPORT_SYMBOL(do_wait_intr_irq);
 | 
			
		|||
 * the wait descriptor from the given waitqueue if still
 | 
			
		||||
 * queued.
 | 
			
		||||
 */
 | 
			
		||||
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
 | 
			
		||||
void finish_wait(wait_queue_head_t *q, wait_queue_entry_t *wait)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -316,7 +316,7 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL(finish_wait);
 | 
			
		||||
 | 
			
		||||
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
 | 
			
		||||
int autoremove_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
 | 
			
		||||
{
 | 
			
		||||
	int ret = default_wake_function(wait, mode, sync, key);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -351,7 +351,7 @@ static inline bool is_kthread_should_stop(void)
 | 
			
		|||
 * remove_wait_queue(&wq, &wait);
 | 
			
		||||
 *
 | 
			
		||||
 */
 | 
			
		||||
long wait_woken(wait_queue_t *wait, unsigned mode, long timeout)
 | 
			
		||||
long wait_woken(wait_queue_entry_t *wait, unsigned mode, long timeout)
 | 
			
		||||
{
 | 
			
		||||
	set_current_state(mode); /* A */
 | 
			
		||||
	/*
 | 
			
		||||
| 
						 | 
				
			
			@ -375,7 +375,7 @@ long wait_woken(wait_queue_t *wait, unsigned mode, long timeout)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL(wait_woken);
 | 
			
		||||
 | 
			
		||||
int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
 | 
			
		||||
int woken_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
 | 
			
		||||
{
 | 
			
		||||
	/*
 | 
			
		||||
	 * Although this function is called under waitqueue lock, LOCK
 | 
			
		||||
| 
						 | 
				
			
			@ -391,7 +391,7 @@ int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL(woken_wake_function);
 | 
			
		||||
 | 
			
		||||
int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
 | 
			
		||||
int wake_bit_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
 | 
			
		||||
{
 | 
			
		||||
	struct wait_bit_key *key = arg;
 | 
			
		||||
	struct wait_bit_queue *wait_bit
 | 
			
		||||
| 
						 | 
				
			
			@ -534,7 +534,7 @@ static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
 | 
			
		|||
	return bit_waitqueue(p, 0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int wake_atomic_t_function(wait_queue_t *wait, unsigned mode, int sync,
 | 
			
		||||
static int wake_atomic_t_function(wait_queue_entry_t *wait, unsigned mode, int sync,
 | 
			
		||||
				  void *arg)
 | 
			
		||||
{
 | 
			
		||||
	struct wait_bit_key *key = arg;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2864,11 +2864,11 @@ bool flush_work(struct work_struct *work)
 | 
			
		|||
EXPORT_SYMBOL_GPL(flush_work);
 | 
			
		||||
 | 
			
		||||
struct cwt_wait {
 | 
			
		||||
	wait_queue_t		wait;
 | 
			
		||||
	wait_queue_entry_t		wait;
 | 
			
		||||
	struct work_struct	*work;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key)
 | 
			
		||||
static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
 | 
			
		||||
{
 | 
			
		||||
	struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										10
									
								
								mm/filemap.c
									
									
									
									
									
								
							
							
						
						
									
										10
									
								
								mm/filemap.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -768,10 +768,10 @@ struct wait_page_key {
 | 
			
		|||
struct wait_page_queue {
 | 
			
		||||
	struct page *page;
 | 
			
		||||
	int bit_nr;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static int wake_page_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
 | 
			
		||||
static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
 | 
			
		||||
{
 | 
			
		||||
	struct wait_page_key *key = arg;
 | 
			
		||||
	struct wait_page_queue *wait_page
 | 
			
		||||
| 
						 | 
				
			
			@ -834,7 +834,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 | 
			
		|||
		struct page *page, int bit_nr, int state, bool lock)
 | 
			
		||||
{
 | 
			
		||||
	struct wait_page_queue wait_page;
 | 
			
		||||
	wait_queue_t *wait = &wait_page.wait;
 | 
			
		||||
	wait_queue_entry_t *wait = &wait_page.wait;
 | 
			
		||||
	int ret = 0;
 | 
			
		||||
 | 
			
		||||
	init_wait(wait);
 | 
			
		||||
| 
						 | 
				
			
			@ -847,7 +847,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 | 
			
		|||
 | 
			
		||||
		if (likely(list_empty(&wait->task_list))) {
 | 
			
		||||
			if (lock)
 | 
			
		||||
				__add_wait_queue_tail_exclusive(q, wait);
 | 
			
		||||
				__add_wait_queue_entry_tail_exclusive(q, wait);
 | 
			
		||||
			else
 | 
			
		||||
				__add_wait_queue(q, wait);
 | 
			
		||||
			SetPageWaiters(page);
 | 
			
		||||
| 
						 | 
				
			
			@ -907,7 +907,7 @@ int wait_on_page_bit_killable(struct page *page, int bit_nr)
 | 
			
		|||
 *
 | 
			
		||||
 * Add an arbitrary @waiter to the wait queue for the nominated @page.
 | 
			
		||||
 */
 | 
			
		||||
void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
 | 
			
		||||
void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter)
 | 
			
		||||
{
 | 
			
		||||
	wait_queue_head_t *q = page_waitqueue(page);
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -170,7 +170,7 @@ struct mem_cgroup_event {
 | 
			
		|||
	 */
 | 
			
		||||
	poll_table pt;
 | 
			
		||||
	wait_queue_head_t *wqh;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	struct work_struct remove;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1479,10 +1479,10 @@ static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
 | 
			
		|||
 | 
			
		||||
struct oom_wait_info {
 | 
			
		||||
	struct mem_cgroup *memcg;
 | 
			
		||||
	wait_queue_t	wait;
 | 
			
		||||
	wait_queue_entry_t	wait;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static int memcg_oom_wake_function(wait_queue_t *wait,
 | 
			
		||||
static int memcg_oom_wake_function(wait_queue_entry_t *wait,
 | 
			
		||||
	unsigned mode, int sync, void *arg)
 | 
			
		||||
{
 | 
			
		||||
	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
 | 
			
		||||
| 
						 | 
				
			
			@ -3725,7 +3725,7 @@ static void memcg_event_remove(struct work_struct *work)
 | 
			
		|||
 *
 | 
			
		||||
 * Called with wqh->lock held and interrupts disabled.
 | 
			
		||||
 */
 | 
			
		||||
static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
 | 
			
		||||
static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
 | 
			
		||||
			    int sync, void *key)
 | 
			
		||||
{
 | 
			
		||||
	struct mem_cgroup_event *event =
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -312,7 +312,7 @@ void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
 | 
			
		|||
{
 | 
			
		||||
	void *element;
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	gfp_t gfp_temp;
 | 
			
		||||
 | 
			
		||||
	VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1902,7 +1902,7 @@ alloc_nohuge:		page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
 | 
			
		|||
 * entry unconditionally - even if something else had already woken the
 | 
			
		||||
 * target.
 | 
			
		||||
 */
 | 
			
		||||
static int synchronous_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
 | 
			
		||||
static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
 | 
			
		||||
{
 | 
			
		||||
	int ret = default_wake_function(wait, mode, sync, key);
 | 
			
		||||
	list_del_init(&wait->task_list);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -95,7 +95,7 @@ enum {
 | 
			
		|||
 | 
			
		||||
struct p9_poll_wait {
 | 
			
		||||
	struct p9_conn *conn;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	wait_queue_head_t *wait_addr;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -522,7 +522,7 @@ static void p9_write_work(struct work_struct *work)
 | 
			
		|||
	clear_bit(Wworksched, &m->wsched);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int p9_pollwake(wait_queue_t *wait, unsigned int mode, int sync, void *key)
 | 
			
		||||
static int p9_pollwake(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
 | 
			
		||||
{
 | 
			
		||||
	struct p9_poll_wait *pwait =
 | 
			
		||||
		container_of(wait, struct p9_poll_wait, wait);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -484,7 +484,7 @@ static int bnep_session(void *arg)
 | 
			
		|||
	struct net_device *dev = s->dev;
 | 
			
		||||
	struct sock *sk = s->sock->sk;
 | 
			
		||||
	struct sk_buff *skb;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
 | 
			
		||||
	BT_DBG("");
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -280,7 +280,7 @@ static int cmtp_session(void *arg)
 | 
			
		|||
	struct cmtp_session *session = arg;
 | 
			
		||||
	struct sock *sk = session->sock->sk;
 | 
			
		||||
	struct sk_buff *skb;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
 | 
			
		||||
	BT_DBG("session %p", session);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1244,7 +1244,7 @@ static void hidp_session_run(struct hidp_session *session)
 | 
			
		|||
static int hidp_session_thread(void *arg)
 | 
			
		||||
{
 | 
			
		||||
	struct hidp_session *session = arg;
 | 
			
		||||
	wait_queue_t ctrl_wait, intr_wait;
 | 
			
		||||
	wait_queue_entry_t ctrl_wait, intr_wait;
 | 
			
		||||
 | 
			
		||||
	BT_DBG("session %p", session);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -68,7 +68,7 @@ static inline int connection_based(struct sock *sk)
 | 
			
		|||
	return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int sync,
 | 
			
		||||
static int receiver_wake_function(wait_queue_entry_t *wait, unsigned int mode, int sync,
 | 
			
		||||
				  void *key)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long bits = (unsigned long)key;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -343,7 +343,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
 | 
			
		|||
 * are still connected to it and there's no way to inform "a polling
 | 
			
		||||
 * implementation" that it should let go of a certain wait queue
 | 
			
		||||
 *
 | 
			
		||||
 * In order to propagate a wake up, a wait_queue_t of the client
 | 
			
		||||
 * In order to propagate a wake up, a wait_queue_entry_t of the client
 | 
			
		||||
 * socket is enqueued on the peer_wait queue of the server socket
 | 
			
		||||
 * whose wake function does a wake_up on the ordinary client socket
 | 
			
		||||
 * wait queue. This connection is established whenever a write (or
 | 
			
		||||
| 
						 | 
				
			
			@ -352,7 +352,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
 | 
			
		|||
 * was relayed.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
 | 
			
		||||
static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
 | 
			
		||||
				      void *key)
 | 
			
		||||
{
 | 
			
		||||
	struct unix_sock *u;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1577,7 +1577,7 @@ static ssize_t snd_ctl_read(struct file *file, char __user *buffer,
 | 
			
		|||
		struct snd_ctl_event ev;
 | 
			
		||||
		struct snd_kctl_event *kev;
 | 
			
		||||
		while (list_empty(&ctl->events)) {
 | 
			
		||||
			wait_queue_t wait;
 | 
			
		||||
			wait_queue_entry_t wait;
 | 
			
		||||
			if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
 | 
			
		||||
				err = -EAGAIN;
 | 
			
		||||
				goto __end_lock;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -85,7 +85,7 @@ static int snd_hwdep_open(struct inode *inode, struct file * file)
 | 
			
		|||
	int major = imajor(inode);
 | 
			
		||||
	struct snd_hwdep *hw;
 | 
			
		||||
	int err;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
 | 
			
		||||
	if (major == snd_major) {
 | 
			
		||||
		hw = snd_lookup_minor_data(iminor(inode),
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -989,7 +989,7 @@ EXPORT_SYMBOL(snd_card_file_remove);
 | 
			
		|||
 */
 | 
			
		||||
int snd_power_wait(struct snd_card *card, unsigned int power_state)
 | 
			
		||||
{
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	int result = 0;
 | 
			
		||||
 | 
			
		||||
	/* fastpath */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1554,7 +1554,7 @@ static int snd_pcm_oss_sync1(struct snd_pcm_substream *substream, size_t size)
 | 
			
		|||
	ssize_t result = 0;
 | 
			
		||||
	snd_pcm_state_t state;
 | 
			
		||||
	long res;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
 | 
			
		||||
	runtime = substream->runtime;
 | 
			
		||||
	init_waitqueue_entry(&wait, current);
 | 
			
		||||
| 
						 | 
				
			
			@ -2387,7 +2387,7 @@ static int snd_pcm_oss_open(struct inode *inode, struct file *file)
 | 
			
		|||
	struct snd_pcm_oss_file *pcm_oss_file;
 | 
			
		||||
	struct snd_pcm_oss_setup setup[2];
 | 
			
		||||
	int nonblock;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
 | 
			
		||||
	err = nonseekable_open(inode, file);
 | 
			
		||||
	if (err < 0)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1904,7 +1904,7 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
 | 
			
		|||
{
 | 
			
		||||
	struct snd_pcm_runtime *runtime = substream->runtime;
 | 
			
		||||
	int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	int err = 0;
 | 
			
		||||
	snd_pcm_uframes_t avail = 0;
 | 
			
		||||
	long wait_time, tout;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1652,7 +1652,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
 | 
			
		|||
	struct snd_card *card;
 | 
			
		||||
	struct snd_pcm_runtime *runtime;
 | 
			
		||||
	struct snd_pcm_substream *s;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	int result = 0;
 | 
			
		||||
	int nonblock = 0;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -2353,7 +2353,7 @@ static int snd_pcm_capture_open(struct inode *inode, struct file *file)
 | 
			
		|||
static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream)
 | 
			
		||||
{
 | 
			
		||||
	int err;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
 | 
			
		||||
	if (pcm == NULL) {
 | 
			
		||||
		err = -ENODEV;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -368,7 +368,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file)
 | 
			
		|||
	int err;
 | 
			
		||||
	struct snd_rawmidi *rmidi;
 | 
			
		||||
	struct snd_rawmidi_file *rawmidi_file = NULL;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
 | 
			
		||||
	if ((file->f_flags & O_APPEND) && !(file->f_flags & O_NONBLOCK)) 
 | 
			
		||||
		return -EINVAL;		/* invalid combination */
 | 
			
		||||
| 
						 | 
				
			
			@ -1002,7 +1002,7 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun
 | 
			
		|||
	while (count > 0) {
 | 
			
		||||
		spin_lock_irq(&runtime->lock);
 | 
			
		||||
		while (!snd_rawmidi_ready(substream)) {
 | 
			
		||||
			wait_queue_t wait;
 | 
			
		||||
			wait_queue_entry_t wait;
 | 
			
		||||
			if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
 | 
			
		||||
				spin_unlock_irq(&runtime->lock);
 | 
			
		||||
				return result > 0 ? result : -EAGAIN;
 | 
			
		||||
| 
						 | 
				
			
			@ -1306,7 +1306,7 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf,
 | 
			
		|||
	while (count > 0) {
 | 
			
		||||
		spin_lock_irq(&runtime->lock);
 | 
			
		||||
		while (!snd_rawmidi_ready_append(substream, count)) {
 | 
			
		||||
			wait_queue_t wait;
 | 
			
		||||
			wait_queue_entry_t wait;
 | 
			
		||||
			if (file->f_flags & O_NONBLOCK) {
 | 
			
		||||
				spin_unlock_irq(&runtime->lock);
 | 
			
		||||
				return result > 0 ? result : -EAGAIN;
 | 
			
		||||
| 
						 | 
				
			
			@ -1338,7 +1338,7 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf,
 | 
			
		|||
	if (file->f_flags & O_DSYNC) {
 | 
			
		||||
		spin_lock_irq(&runtime->lock);
 | 
			
		||||
		while (runtime->avail != runtime->buffer_size) {
 | 
			
		||||
			wait_queue_t wait;
 | 
			
		||||
			wait_queue_entry_t wait;
 | 
			
		||||
			unsigned int last_avail = runtime->avail;
 | 
			
		||||
			init_waitqueue_entry(&wait, current);
 | 
			
		||||
			add_wait_queue(&runtime->sleep, &wait);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -179,7 +179,7 @@ int snd_seq_fifo_cell_out(struct snd_seq_fifo *f,
 | 
			
		|||
{
 | 
			
		||||
	struct snd_seq_event_cell *cell;
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
 | 
			
		||||
	if (snd_BUG_ON(!f))
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -227,7 +227,7 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
 | 
			
		|||
	struct snd_seq_event_cell *cell;
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
	int err = -EAGAIN;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
 | 
			
		||||
	if (pool == NULL)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1964,7 +1964,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
 | 
			
		|||
	spin_lock_irq(&tu->qlock);
 | 
			
		||||
	while ((long)count - result >= unit) {
 | 
			
		||||
		while (!tu->qused) {
 | 
			
		||||
			wait_queue_t wait;
 | 
			
		||||
			wait_queue_entry_t wait;
 | 
			
		||||
 | 
			
		||||
			if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
 | 
			
		||||
				err = -EAGAIN;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1782,7 +1782,7 @@ wavefront_should_cause_interrupt (snd_wavefront_t *dev,
 | 
			
		|||
				  int val, int port, unsigned long timeout)
 | 
			
		||||
 | 
			
		||||
{
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
 | 
			
		||||
	init_waitqueue_entry(&wait, current);
 | 
			
		||||
	spin_lock_irq(&dev->irq_lock);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -239,7 +239,7 @@ int snd_mixart_send_msg(struct mixart_mgr *mgr, struct mixart_msg *request, int
 | 
			
		|||
	struct mixart_msg resp;
 | 
			
		||||
	u32 msg_frame = 0; /* set to 0, so it's no notification to wait for, but the answer */
 | 
			
		||||
	int err;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	long timeout;
 | 
			
		||||
 | 
			
		||||
	init_waitqueue_entry(&wait, current);
 | 
			
		||||
| 
						 | 
				
			
			@ -284,7 +284,7 @@ int snd_mixart_send_msg_wait_notif(struct mixart_mgr *mgr,
 | 
			
		|||
				   struct mixart_msg *request, u32 notif_event)
 | 
			
		||||
{
 | 
			
		||||
	int err;
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	long timeout;
 | 
			
		||||
 | 
			
		||||
	if (snd_BUG_ON(!notif_event))
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -781,7 +781,7 @@ static snd_pcm_uframes_t snd_ymfpci_capture_pointer(struct snd_pcm_substream *su
 | 
			
		|||
 | 
			
		||||
static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
 | 
			
		||||
{
 | 
			
		||||
	wait_queue_t wait;
 | 
			
		||||
	wait_queue_entry_t wait;
 | 
			
		||||
	int loops = 4;
 | 
			
		||||
 | 
			
		||||
	while (loops-- > 0) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -184,7 +184,7 @@ int __attribute__((weak)) kvm_arch_set_irq_inatomic(
 | 
			
		|||
 * Called with wqh->lock held and interrupts disabled
 | 
			
		||||
 */
 | 
			
		||||
static int
 | 
			
		||||
irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
 | 
			
		||||
irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
 | 
			
		||||
{
 | 
			
		||||
	struct kvm_kernel_irqfd *irqfd =
 | 
			
		||||
		container_of(wait, struct kvm_kernel_irqfd, wait);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue