mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	locking/rtmutex: Extend the rtmutex core to support ww_mutex
Add a ww acquire context pointer to the waiter and various functions and add the ww_mutex related invocations to the proper spots in the locking code, similar to the mutex based variant. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20210815211304.966139174@linutronix.de
This commit is contained in:
		
							parent
							
								
									2408f7a378
								
							
						
					
					
						commit
						add461325e
					
				
					 4 changed files with 114 additions and 13 deletions
				
			
		| 
						 | 
					@ -17,9 +17,44 @@
 | 
				
			||||||
#include <linux/sched/signal.h>
 | 
					#include <linux/sched/signal.h>
 | 
				
			||||||
#include <linux/sched/rt.h>
 | 
					#include <linux/sched/rt.h>
 | 
				
			||||||
#include <linux/sched/wake_q.h>
 | 
					#include <linux/sched/wake_q.h>
 | 
				
			||||||
 | 
					#include <linux/ww_mutex.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include "rtmutex_common.h"
 | 
					#include "rtmutex_common.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifndef WW_RT
 | 
				
			||||||
 | 
					# define build_ww_mutex()	(false)
 | 
				
			||||||
 | 
					# define ww_container_of(rtm)	NULL
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline int __ww_mutex_add_waiter(struct rt_mutex_waiter *waiter,
 | 
				
			||||||
 | 
										struct rt_mutex *lock,
 | 
				
			||||||
 | 
										struct ww_acquire_ctx *ww_ctx)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void __ww_mutex_check_waiters(struct rt_mutex *lock,
 | 
				
			||||||
 | 
										    struct ww_acquire_ctx *ww_ctx)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void ww_mutex_lock_acquired(struct ww_mutex *lock,
 | 
				
			||||||
 | 
										  struct ww_acquire_ctx *ww_ctx)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline int __ww_mutex_check_kill(struct rt_mutex *lock,
 | 
				
			||||||
 | 
										struct rt_mutex_waiter *waiter,
 | 
				
			||||||
 | 
										struct ww_acquire_ctx *ww_ctx)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
					# define build_ww_mutex()	(true)
 | 
				
			||||||
 | 
					# define ww_container_of(rtm)	container_of(rtm, struct ww_mutex, base)
 | 
				
			||||||
 | 
					# include "ww_mutex.h"
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * lock->owner state tracking:
 | 
					 * lock->owner state tracking:
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
| 
						 | 
					@ -308,7 +343,28 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b)
 | 
					static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return rt_mutex_waiter_less(__node_2_waiter(a), __node_2_waiter(b));
 | 
						struct rt_mutex_waiter *aw = __node_2_waiter(a);
 | 
				
			||||||
 | 
						struct rt_mutex_waiter *bw = __node_2_waiter(b);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (rt_mutex_waiter_less(aw, bw))
 | 
				
			||||||
 | 
							return 1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!build_ww_mutex())
 | 
				
			||||||
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (rt_mutex_waiter_less(bw, aw))
 | 
				
			||||||
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* NOTE: relies on waiter->ww_ctx being set before insertion */
 | 
				
			||||||
 | 
						if (aw->ww_ctx) {
 | 
				
			||||||
 | 
							if (!bw->ww_ctx)
 | 
				
			||||||
 | 
								return 1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							return (signed long)(aw->ww_ctx->stamp -
 | 
				
			||||||
 | 
									     bw->ww_ctx->stamp) < 0;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static __always_inline void
 | 
					static __always_inline void
 | 
				
			||||||
| 
						 | 
					@ -961,6 +1017,7 @@ try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task,
 | 
				
			||||||
static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
 | 
					static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
 | 
				
			||||||
					   struct rt_mutex_waiter *waiter,
 | 
										   struct rt_mutex_waiter *waiter,
 | 
				
			||||||
					   struct task_struct *task,
 | 
										   struct task_struct *task,
 | 
				
			||||||
 | 
										   struct ww_acquire_ctx *ww_ctx,
 | 
				
			||||||
					   enum rtmutex_chainwalk chwalk)
 | 
										   enum rtmutex_chainwalk chwalk)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct task_struct *owner = rt_mutex_owner(lock);
 | 
						struct task_struct *owner = rt_mutex_owner(lock);
 | 
				
			||||||
| 
						 | 
					@ -996,6 +1053,16 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	raw_spin_unlock(&task->pi_lock);
 | 
						raw_spin_unlock(&task->pi_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (build_ww_mutex() && ww_ctx) {
 | 
				
			||||||
 | 
							struct rt_mutex *rtm;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							/* Check whether the waiter should back out immediately */
 | 
				
			||||||
 | 
							rtm = container_of(lock, struct rt_mutex, rtmutex);
 | 
				
			||||||
 | 
							res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx);
 | 
				
			||||||
 | 
							if (res)
 | 
				
			||||||
 | 
								return res;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!owner)
 | 
						if (!owner)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1281,6 +1348,7 @@ static void __sched remove_waiter(struct rt_mutex_base *lock,
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop
 | 
					 * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop
 | 
				
			||||||
 * @lock:		 the rt_mutex to take
 | 
					 * @lock:		 the rt_mutex to take
 | 
				
			||||||
 | 
					 * @ww_ctx:		 WW mutex context pointer
 | 
				
			||||||
 * @state:		 the state the task should block in (TASK_INTERRUPTIBLE
 | 
					 * @state:		 the state the task should block in (TASK_INTERRUPTIBLE
 | 
				
			||||||
 *			 or TASK_UNINTERRUPTIBLE)
 | 
					 *			 or TASK_UNINTERRUPTIBLE)
 | 
				
			||||||
 * @timeout:		 the pre-initialized and started timer, or NULL for none
 | 
					 * @timeout:		 the pre-initialized and started timer, or NULL for none
 | 
				
			||||||
| 
						 | 
					@ -1289,10 +1357,12 @@ static void __sched remove_waiter(struct rt_mutex_base *lock,
 | 
				
			||||||
 * Must be called with lock->wait_lock held and interrupts disabled
 | 
					 * Must be called with lock->wait_lock held and interrupts disabled
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
 | 
					static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
 | 
				
			||||||
 | 
										   struct ww_acquire_ctx *ww_ctx,
 | 
				
			||||||
					   unsigned int state,
 | 
										   unsigned int state,
 | 
				
			||||||
					   struct hrtimer_sleeper *timeout,
 | 
										   struct hrtimer_sleeper *timeout,
 | 
				
			||||||
					   struct rt_mutex_waiter *waiter)
 | 
										   struct rt_mutex_waiter *waiter)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
 | 
				
			||||||
	int ret = 0;
 | 
						int ret = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (;;) {
 | 
						for (;;) {
 | 
				
			||||||
| 
						 | 
					@ -1309,6 +1379,12 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (build_ww_mutex() && ww_ctx) {
 | 
				
			||||||
 | 
								ret = __ww_mutex_check_kill(rtm, waiter, ww_ctx);
 | 
				
			||||||
 | 
								if (ret)
 | 
				
			||||||
 | 
									break;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		raw_spin_unlock_irq(&lock->wait_lock);
 | 
							raw_spin_unlock_irq(&lock->wait_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		schedule();
 | 
							schedule();
 | 
				
			||||||
| 
						 | 
					@ -1331,6 +1407,9 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
 | 
				
			||||||
	if (res != -EDEADLOCK || detect_deadlock)
 | 
						if (res != -EDEADLOCK || detect_deadlock)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (build_ww_mutex() && w->ww_ctx)
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Yell loudly and stop the task right here.
 | 
						 * Yell loudly and stop the task right here.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
| 
						 | 
					@ -1344,31 +1423,46 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held
 | 
					 * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held
 | 
				
			||||||
 * @lock:	The rtmutex to block lock
 | 
					 * @lock:	The rtmutex to block lock
 | 
				
			||||||
 | 
					 * @ww_ctx:	WW mutex context pointer
 | 
				
			||||||
 * @state:	The task state for sleeping
 | 
					 * @state:	The task state for sleeping
 | 
				
			||||||
 * @chwalk:	Indicator whether full or partial chainwalk is requested
 | 
					 * @chwalk:	Indicator whether full or partial chainwalk is requested
 | 
				
			||||||
 * @waiter:	Initializer waiter for blocking
 | 
					 * @waiter:	Initializer waiter for blocking
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
 | 
					static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
 | 
				
			||||||
 | 
									       struct ww_acquire_ctx *ww_ctx,
 | 
				
			||||||
				       unsigned int state,
 | 
									       unsigned int state,
 | 
				
			||||||
				       enum rtmutex_chainwalk chwalk,
 | 
									       enum rtmutex_chainwalk chwalk,
 | 
				
			||||||
				       struct rt_mutex_waiter *waiter)
 | 
									       struct rt_mutex_waiter *waiter)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
 | 
				
			||||||
 | 
						struct ww_mutex *ww = ww_container_of(rtm);
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	lockdep_assert_held(&lock->wait_lock);
 | 
						lockdep_assert_held(&lock->wait_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Try to acquire the lock again: */
 | 
						/* Try to acquire the lock again: */
 | 
				
			||||||
	if (try_to_take_rt_mutex(lock, current, NULL))
 | 
						if (try_to_take_rt_mutex(lock, current, NULL)) {
 | 
				
			||||||
 | 
							if (build_ww_mutex() && ww_ctx) {
 | 
				
			||||||
 | 
								__ww_mutex_check_waiters(rtm, ww_ctx);
 | 
				
			||||||
 | 
								ww_mutex_lock_acquired(ww, ww_ctx);
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	set_current_state(state);
 | 
						set_current_state(state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);
 | 
						ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk);
 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (likely(!ret))
 | 
						if (likely(!ret))
 | 
				
			||||||
		ret = rt_mutex_slowlock_block(lock, state, NULL, waiter);
 | 
							ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (unlikely(ret)) {
 | 
						if (likely(!ret)) {
 | 
				
			||||||
 | 
							/* acquired the lock */
 | 
				
			||||||
 | 
							if (build_ww_mutex() && ww_ctx) {
 | 
				
			||||||
 | 
								if (!ww_ctx->is_wait_die)
 | 
				
			||||||
 | 
									__ww_mutex_check_waiters(rtm, ww_ctx);
 | 
				
			||||||
 | 
								ww_mutex_lock_acquired(ww, ww_ctx);
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
		__set_current_state(TASK_RUNNING);
 | 
							__set_current_state(TASK_RUNNING);
 | 
				
			||||||
		remove_waiter(lock, waiter);
 | 
							remove_waiter(lock, waiter);
 | 
				
			||||||
		rt_mutex_handle_deadlock(ret, chwalk, waiter);
 | 
							rt_mutex_handle_deadlock(ret, chwalk, waiter);
 | 
				
			||||||
| 
						 | 
					@ -1383,14 +1477,17 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
 | 
					static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
 | 
				
			||||||
 | 
										     struct ww_acquire_ctx *ww_ctx,
 | 
				
			||||||
					     unsigned int state)
 | 
										     unsigned int state)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct rt_mutex_waiter waiter;
 | 
						struct rt_mutex_waiter waiter;
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rt_mutex_init_waiter(&waiter);
 | 
						rt_mutex_init_waiter(&waiter);
 | 
				
			||||||
 | 
						waiter.ww_ctx = ww_ctx;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = __rt_mutex_slowlock(lock, state, RT_MUTEX_MIN_CHAINWALK, &waiter);
 | 
						ret = __rt_mutex_slowlock(lock, ww_ctx, state, RT_MUTEX_MIN_CHAINWALK,
 | 
				
			||||||
 | 
									  &waiter);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	debug_rt_mutex_free_waiter(&waiter);
 | 
						debug_rt_mutex_free_waiter(&waiter);
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
| 
						 | 
					@ -1399,9 +1496,11 @@ static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * rt_mutex_slowlock - Locking slowpath invoked when fast path fails
 | 
					 * rt_mutex_slowlock - Locking slowpath invoked when fast path fails
 | 
				
			||||||
 * @lock:	The rtmutex to block lock
 | 
					 * @lock:	The rtmutex to block lock
 | 
				
			||||||
 | 
					 * @ww_ctx:	WW mutex context pointer
 | 
				
			||||||
 * @state:	The task state for sleeping
 | 
					 * @state:	The task state for sleeping
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
 | 
					static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
 | 
				
			||||||
 | 
									     struct ww_acquire_ctx *ww_ctx,
 | 
				
			||||||
				     unsigned int state)
 | 
									     unsigned int state)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long flags;
 | 
						unsigned long flags;
 | 
				
			||||||
| 
						 | 
					@ -1416,7 +1515,7 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
 | 
				
			||||||
	 * irqsave/restore variants.
 | 
						 * irqsave/restore variants.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	raw_spin_lock_irqsave(&lock->wait_lock, flags);
 | 
						raw_spin_lock_irqsave(&lock->wait_lock, flags);
 | 
				
			||||||
	ret = __rt_mutex_slowlock_locked(lock, state);
 | 
						ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state);
 | 
				
			||||||
	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 | 
						raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
| 
						 | 
					@ -1428,7 +1527,7 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
 | 
				
			||||||
	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
 | 
						if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return rt_mutex_slowlock(lock, state);
 | 
						return rt_mutex_slowlock(lock, NULL, state);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#endif /* RT_MUTEX_BUILD_MUTEX */
 | 
					#endif /* RT_MUTEX_BUILD_MUTEX */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1455,7 +1554,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
 | 
				
			||||||
	/* Save current state and set state to TASK_RTLOCK_WAIT */
 | 
						/* Save current state and set state to TASK_RTLOCK_WAIT */
 | 
				
			||||||
	current_save_and_set_rtlock_wait_state();
 | 
						current_save_and_set_rtlock_wait_state();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	task_blocks_on_rt_mutex(lock, &waiter, current, RT_MUTEX_MIN_CHAINWALK);
 | 
						task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (;;) {
 | 
						for (;;) {
 | 
				
			||||||
		/* Try to acquire the lock again */
 | 
							/* Try to acquire the lock again */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -267,7 +267,7 @@ int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
 | 
				
			||||||
		return 1;
 | 
							return 1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* We enforce deadlock detection for futexes */
 | 
						/* We enforce deadlock detection for futexes */
 | 
				
			||||||
	ret = task_blocks_on_rt_mutex(lock, waiter, task,
 | 
						ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL,
 | 
				
			||||||
				      RT_MUTEX_FULL_CHAINWALK);
 | 
									      RT_MUTEX_FULL_CHAINWALK);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (ret && !rt_mutex_owner(lock)) {
 | 
						if (ret && !rt_mutex_owner(lock)) {
 | 
				
			||||||
| 
						 | 
					@ -343,7 +343,7 @@ int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
 | 
				
			||||||
	raw_spin_lock_irq(&lock->wait_lock);
 | 
						raw_spin_lock_irq(&lock->wait_lock);
 | 
				
			||||||
	/* sleep on the mutex */
 | 
						/* sleep on the mutex */
 | 
				
			||||||
	set_current_state(TASK_INTERRUPTIBLE);
 | 
						set_current_state(TASK_INTERRUPTIBLE);
 | 
				
			||||||
	ret = rt_mutex_slowlock_block(lock, TASK_INTERRUPTIBLE, to, waiter);
 | 
						ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter);
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
 | 
						 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
 | 
				
			||||||
	 * have to fix that up.
 | 
						 * have to fix that up.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -28,6 +28,7 @@
 | 
				
			||||||
 * @wake_state:		Wakeup state to use (TASK_NORMAL or TASK_RTLOCK_WAIT)
 | 
					 * @wake_state:		Wakeup state to use (TASK_NORMAL or TASK_RTLOCK_WAIT)
 | 
				
			||||||
 * @prio:		Priority of the waiter
 | 
					 * @prio:		Priority of the waiter
 | 
				
			||||||
 * @deadline:		Deadline of the waiter if applicable
 | 
					 * @deadline:		Deadline of the waiter if applicable
 | 
				
			||||||
 | 
					 * @ww_ctx:		WW context pointer
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
struct rt_mutex_waiter {
 | 
					struct rt_mutex_waiter {
 | 
				
			||||||
	struct rb_node		tree_entry;
 | 
						struct rb_node		tree_entry;
 | 
				
			||||||
| 
						 | 
					@ -37,6 +38,7 @@ struct rt_mutex_waiter {
 | 
				
			||||||
	unsigned int		wake_state;
 | 
						unsigned int		wake_state;
 | 
				
			||||||
	int			prio;
 | 
						int			prio;
 | 
				
			||||||
	u64			deadline;
 | 
						u64			deadline;
 | 
				
			||||||
 | 
						struct ww_acquire_ctx	*ww_ctx;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1360,7 +1360,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
 | 
				
			||||||
	__rt_mutex_lock(rtm, state)
 | 
						__rt_mutex_lock(rtm, state)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define rwbase_rtmutex_slowlock_locked(rtm, state)	\
 | 
					#define rwbase_rtmutex_slowlock_locked(rtm, state)	\
 | 
				
			||||||
	__rt_mutex_slowlock_locked(rtm, state)
 | 
						__rt_mutex_slowlock_locked(rtm, NULL, state)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define rwbase_rtmutex_unlock(rtm)			\
 | 
					#define rwbase_rtmutex_unlock(rtm)			\
 | 
				
			||||||
	__rt_mutex_unlock(rtm)
 | 
						__rt_mutex_unlock(rtm)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue