mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	When a prospective writer takes the qrwlock locking slowpath due to the lock being held, it attempts to cmpxchg the wmode field from 0 to _QW_WAITING so that concurrent lockers also take the slowpath and queue on the spinlock accordingly, allowing the lockers to drain. Unfortunately, this isn't fair, because a fastpath writer that comes in after the lock is made available but before the _QW_WAITING flag is set can effectively jump the queue. If there is a steady stream of prospective writers, then the waiter will be held off indefinitely. This patch restores fairness by separating _QW_WAITING and _QW_LOCKED into two distinct fields: _QW_LOCKED continues to occupy the bottom byte of the lockword so that it can be cleared unconditionally when unlocking, but _QW_WAITING now occupies what used to be the bottom bit of the reader count. This then forces the slow-path for concurrent lockers. Tested-by: Waiman Long <longman@redhat.com> Tested-by: Jeremy Linton <jeremy.linton@arm.com> Tested-by: Adam Wallis <awallis@codeaurora.org> Tested-by: Jan Glauber <jglauber@cavium.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Jeremy.Linton@arm.com Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-arm-kernel@lists.infradead.org Link: http://lkml.kernel.org/r/1507810851-306-6-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
		
			
				
	
	
		
			92 lines
		
	
	
	
		
			2.8 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			92 lines
		
	
	
	
		
			2.8 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/*
 | 
						|
 * Queued read/write locks
 | 
						|
 *
 | 
						|
 * This program is free software; you can redistribute it and/or modify
 | 
						|
 * it under the terms of the GNU General Public License as published by
 | 
						|
 * the Free Software Foundation; either version 2 of the License, or
 | 
						|
 * (at your option) any later version.
 | 
						|
 *
 | 
						|
 * This program is distributed in the hope that it will be useful,
 | 
						|
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
						|
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
						|
 * GNU General Public License for more details.
 | 
						|
 *
 | 
						|
 * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
 | 
						|
 *
 | 
						|
 * Authors: Waiman Long <waiman.long@hp.com>
 | 
						|
 */
 | 
						|
#include <linux/smp.h>
 | 
						|
#include <linux/bug.h>
 | 
						|
#include <linux/cpumask.h>
 | 
						|
#include <linux/percpu.h>
 | 
						|
#include <linux/hardirq.h>
 | 
						|
#include <linux/spinlock.h>
 | 
						|
#include <asm/qrwlock.h>
 | 
						|
 | 
						|
/**
 | 
						|
 * queued_read_lock_slowpath - acquire read lock of a queue rwlock
 | 
						|
 * @lock: Pointer to queue rwlock structure
 | 
						|
 */
 | 
						|
void queued_read_lock_slowpath(struct qrwlock *lock)
 | 
						|
{
 | 
						|
	/*
 | 
						|
	 * Readers come here when they cannot get the lock without waiting
 | 
						|
	 */
 | 
						|
	if (unlikely(in_interrupt())) {
 | 
						|
		/*
 | 
						|
		 * Readers in interrupt context will get the lock immediately
 | 
						|
		 * if the writer is just waiting (not holding the lock yet),
 | 
						|
		 * so spin with ACQUIRE semantics until the lock is available
 | 
						|
		 * without waiting in the queue.
 | 
						|
		 */
 | 
						|
		atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
 | 
						|
		return;
 | 
						|
	}
 | 
						|
	atomic_sub(_QR_BIAS, &lock->cnts);
 | 
						|
 | 
						|
	/*
 | 
						|
	 * Put the reader into the wait queue
 | 
						|
	 */
 | 
						|
	arch_spin_lock(&lock->wait_lock);
 | 
						|
	atomic_add(_QR_BIAS, &lock->cnts);
 | 
						|
 | 
						|
	/*
 | 
						|
	 * The ACQUIRE semantics of the following spinning code ensure
 | 
						|
	 * that accesses can't leak upwards out of our subsequent critical
 | 
						|
	 * section in the case that the lock is currently held for write.
 | 
						|
	 */
 | 
						|
	atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
 | 
						|
 | 
						|
	/*
 | 
						|
	 * Signal the next one in queue to become queue head
 | 
						|
	 */
 | 
						|
	arch_spin_unlock(&lock->wait_lock);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(queued_read_lock_slowpath);
 | 
						|
 | 
						|
/**
 | 
						|
 * queued_write_lock_slowpath - acquire write lock of a queue rwlock
 | 
						|
 * @lock : Pointer to queue rwlock structure
 | 
						|
 */
 | 
						|
void queued_write_lock_slowpath(struct qrwlock *lock)
 | 
						|
{
 | 
						|
	/* Put the writer into the wait queue */
 | 
						|
	arch_spin_lock(&lock->wait_lock);
 | 
						|
 | 
						|
	/* Try to acquire the lock directly if no reader is present */
 | 
						|
	if (!atomic_read(&lock->cnts) &&
 | 
						|
	    (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
 | 
						|
		goto unlock;
 | 
						|
 | 
						|
	/* Set the waiting flag to notify readers that a writer is pending */
 | 
						|
	atomic_add(_QW_WAITING, &lock->cnts);
 | 
						|
 | 
						|
	/* When no more readers or writers, set the locked flag */
 | 
						|
	do {
 | 
						|
		atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
 | 
						|
	} while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
 | 
						|
					_QW_LOCKED) != _QW_WAITING);
 | 
						|
unlock:
 | 
						|
	arch_spin_unlock(&lock->wait_lock);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(queued_write_lock_slowpath);
 |