mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	With DEBUG_RT_MUTEXES enabled the fast-path rt_mutex_cmpxchg_acquire() always fails and all lock operations take the slow path. Provide a new helper inline rt_mutex_try_acquire() which maps to rt_mutex_cmpxchg_acquire() in the non-debug case. For the debug case it invokes rt_mutex_slowtrylock() which can acquire a non-contended rtmutex under full debug coverage. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20230908162254.999499-3-bigeasy@linutronix.de
		
			
				
	
	
		
			101 lines
		
	
	
	
		
			2.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			101 lines
		
	
	
	
		
			2.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
// SPDX-License-Identifier: GPL-2.0-only
 | 
						|
/*
 | 
						|
 * rtmutex API
 | 
						|
 */
 | 
						|
#include <linux/spinlock.h>
 | 
						|
#include <linux/export.h>
 | 
						|
 | 
						|
#define RT_MUTEX_BUILD_MUTEX
 | 
						|
#define WW_RT
 | 
						|
#include "rtmutex.c"
 | 
						|
 | 
						|
int ww_mutex_trylock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
 | 
						|
{
 | 
						|
	struct rt_mutex *rtm = &lock->base;
 | 
						|
 | 
						|
	if (!ww_ctx)
 | 
						|
		return rt_mutex_trylock(rtm);
 | 
						|
 | 
						|
	/*
 | 
						|
	 * Reset the wounded flag after a kill. No other process can
 | 
						|
	 * race and wound us here, since they can't have a valid owner
 | 
						|
	 * pointer if we don't have any locks held.
 | 
						|
	 */
 | 
						|
	if (ww_ctx->acquired == 0)
 | 
						|
		ww_ctx->wounded = 0;
 | 
						|
 | 
						|
	if (__rt_mutex_trylock(&rtm->rtmutex)) {
 | 
						|
		ww_mutex_set_context_fastpath(lock, ww_ctx);
 | 
						|
		mutex_acquire_nest(&rtm->dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
 | 
						|
		return 1;
 | 
						|
	}
 | 
						|
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(ww_mutex_trylock);
 | 
						|
 | 
						|
static int __sched
 | 
						|
__ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx,
 | 
						|
		   unsigned int state, unsigned long ip)
 | 
						|
{
 | 
						|
	struct lockdep_map __maybe_unused *nest_lock = NULL;
 | 
						|
	struct rt_mutex *rtm = &lock->base;
 | 
						|
	int ret;
 | 
						|
 | 
						|
	might_sleep();
 | 
						|
 | 
						|
	if (ww_ctx) {
 | 
						|
		if (unlikely(ww_ctx == READ_ONCE(lock->ctx)))
 | 
						|
			return -EALREADY;
 | 
						|
 | 
						|
		/*
 | 
						|
		 * Reset the wounded flag after a kill. No other process can
 | 
						|
		 * race and wound us here, since they can't have a valid owner
 | 
						|
		 * pointer if we don't have any locks held.
 | 
						|
		 */
 | 
						|
		if (ww_ctx->acquired == 0)
 | 
						|
			ww_ctx->wounded = 0;
 | 
						|
 | 
						|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
 | 
						|
		nest_lock = &ww_ctx->dep_map;
 | 
						|
#endif
 | 
						|
	}
 | 
						|
	mutex_acquire_nest(&rtm->dep_map, 0, 0, nest_lock, ip);
 | 
						|
 | 
						|
	if (likely(rt_mutex_try_acquire(&rtm->rtmutex))) {
 | 
						|
		if (ww_ctx)
 | 
						|
			ww_mutex_set_context_fastpath(lock, ww_ctx);
 | 
						|
		return 0;
 | 
						|
	}
 | 
						|
 | 
						|
	ret = rt_mutex_slowlock(&rtm->rtmutex, ww_ctx, state);
 | 
						|
 | 
						|
	if (ret)
 | 
						|
		mutex_release(&rtm->dep_map, ip);
 | 
						|
	return ret;
 | 
						|
}
 | 
						|
 | 
						|
int __sched
 | 
						|
ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 | 
						|
{
 | 
						|
	return __ww_rt_mutex_lock(lock, ctx, TASK_UNINTERRUPTIBLE, _RET_IP_);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(ww_mutex_lock);
 | 
						|
 | 
						|
int __sched
 | 
						|
ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 | 
						|
{
 | 
						|
	return __ww_rt_mutex_lock(lock, ctx, TASK_INTERRUPTIBLE, _RET_IP_);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(ww_mutex_lock_interruptible);
 | 
						|
 | 
						|
void __sched ww_mutex_unlock(struct ww_mutex *lock)
 | 
						|
{
 | 
						|
	struct rt_mutex *rtm = &lock->base;
 | 
						|
 | 
						|
	__ww_mutex_unlock(lock);
 | 
						|
 | 
						|
	mutex_release(&rtm->dep_map, _RET_IP_);
 | 
						|
	__rt_mutex_unlock(&rtm->rtmutex);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(ww_mutex_unlock);
 |