mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	A circular lock dependency splat has been seen involving down_trylock():
  ======================================================
  WARNING: possible circular locking dependency detected
  6.12.0-41.el10.s390x+debug
  ------------------------------------------------------
  dd/32479 is trying to acquire lock:
  0015a20accd0d4f8 ((console_sem).lock){-.-.}-{2:2}, at: down_trylock+0x26/0x90
  but task is already holding lock:
  000000017e461698 (&zone->lock){-.-.}-{2:2}, at: rmqueue_bulk+0xac/0x8f0
  the existing dependency chain (in reverse order) is:
  -> #4 (&zone->lock){-.-.}-{2:2}:
  -> #3 (hrtimer_bases.lock){-.-.}-{2:2}:
  -> #2 (&rq->__lock){-.-.}-{2:2}:
  -> #1 (&p->pi_lock){-.-.}-{2:2}:
  -> #0 ((console_sem).lock){-.-.}-{2:2}:
The console_sem -> pi_lock dependency is due to calling try_to_wake_up()
while holding the console_sem raw_spinlock. This dependency can be broken
by using wake_q to do the wakeup instead of calling try_to_wake_up()
under the console_sem lock. This will also make the semaphore's
raw_spinlock become a terminal lock without taking any further locks
underneath it.
The hrtimer_bases.lock is a raw_spinlock while zone->lock is a
spinlock. The hrtimer_bases.lock -> zone->lock dependency happens via
the debug_objects_fill_pool() helper function in the debugobjects code.
  -> #4 (&zone->lock){-.-.}-{2:2}:
         __lock_acquire+0xe86/0x1cc0
         lock_acquire.part.0+0x258/0x630
         lock_acquire+0xb8/0xe0
         _raw_spin_lock_irqsave+0xb4/0x120
         rmqueue_bulk+0xac/0x8f0
         __rmqueue_pcplist+0x580/0x830
         rmqueue_pcplist+0xfc/0x470
         rmqueue.isra.0+0xdec/0x11b0
         get_page_from_freelist+0x2ee/0xeb0
         __alloc_pages_noprof+0x2c2/0x520
         alloc_pages_mpol_noprof+0x1fc/0x4d0
         alloc_pages_noprof+0x8c/0xe0
         allocate_slab+0x320/0x460
         ___slab_alloc+0xa58/0x12b0
         __slab_alloc.isra.0+0x42/0x60
         kmem_cache_alloc_noprof+0x304/0x350
         fill_pool+0xf6/0x450
         debug_object_activate+0xfe/0x360
         enqueue_hrtimer+0x34/0x190
         __run_hrtimer+0x3c8/0x4c0
         __hrtimer_run_queues+0x1b2/0x260
         hrtimer_interrupt+0x316/0x760
         do_IRQ+0x9a/0xe0
         do_irq_async+0xf6/0x160
Normally a raw_spinlock to spinlock dependency is not legitimate
and will be warned if CONFIG_PROVE_RAW_LOCK_NESTING is enabled,
but debug_objects_fill_pool() is an exception as it explicitly
allows this dependency for non-PREEMPT_RT kernel without causing
PROVE_RAW_LOCK_NESTING lockdep splat. As a result, this dependency is
legitimate and not a bug.
Anyway, semaphore is the only locking primitive left that is still
using try_to_wake_up() to do wakeup inside critical section, all the
other locking primitives had been migrated to use wake_q to do wakeup
outside of the critical section. It is also possible that there are
other circular locking dependencies involving printk/console_sem or
other existing/new semaphores lurking somewhere which may show up in
the future. Let just do the migration now to wake_q to avoid headache
like this.
Reported-by: yzbot+ed801a886dfdbfe7136d@syzkaller.appspotmail.com
Signed-off-by: Waiman Long <longman@redhat.com>
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: https://lore.kernel.org/r/20250307232717.1759087-3-boqun.feng@gmail.com
		
	
			
		
			
				
	
	
		
			284 lines
		
	
	
	
		
			7.8 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			284 lines
		
	
	
	
		
			7.8 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
// SPDX-License-Identifier: GPL-2.0-only
 | 
						|
/*
 | 
						|
 * Copyright (c) 2008 Intel Corporation
 | 
						|
 * Author: Matthew Wilcox <willy@linux.intel.com>
 | 
						|
 *
 | 
						|
 * This file implements counting semaphores.
 | 
						|
 * A counting semaphore may be acquired 'n' times before sleeping.
 | 
						|
 * See mutex.c for single-acquisition sleeping locks which enforce
 | 
						|
 * rules which allow code to be debugged more easily.
 | 
						|
 */
 | 
						|
 | 
						|
/*
 | 
						|
 * Some notes on the implementation:
 | 
						|
 *
 | 
						|
 * The spinlock controls access to the other members of the semaphore.
 | 
						|
 * down_trylock() and up() can be called from interrupt context, so we
 | 
						|
 * have to disable interrupts when taking the lock.  It turns out various
 | 
						|
 * parts of the kernel expect to be able to use down() on a semaphore in
 | 
						|
 * interrupt context when they know it will succeed, so we have to use
 | 
						|
 * irqsave variants for down(), down_interruptible() and down_killable()
 | 
						|
 * too.
 | 
						|
 *
 | 
						|
 * The ->count variable represents how many more tasks can acquire this
 | 
						|
 * semaphore.  If it's zero, there may be tasks waiting on the wait_list.
 | 
						|
 */
 | 
						|
 | 
						|
#include <linux/compiler.h>
 | 
						|
#include <linux/kernel.h>
 | 
						|
#include <linux/export.h>
 | 
						|
#include <linux/sched.h>
 | 
						|
#include <linux/sched/debug.h>
 | 
						|
#include <linux/sched/wake_q.h>
 | 
						|
#include <linux/semaphore.h>
 | 
						|
#include <linux/spinlock.h>
 | 
						|
#include <linux/ftrace.h>
 | 
						|
#include <trace/events/lock.h>
 | 
						|
 | 
						|
static noinline void __down(struct semaphore *sem);
 | 
						|
static noinline int __down_interruptible(struct semaphore *sem);
 | 
						|
static noinline int __down_killable(struct semaphore *sem);
 | 
						|
static noinline int __down_timeout(struct semaphore *sem, long timeout);
 | 
						|
static noinline void __up(struct semaphore *sem, struct wake_q_head *wake_q);
 | 
						|
 | 
						|
/**
 | 
						|
 * down - acquire the semaphore
 | 
						|
 * @sem: the semaphore to be acquired
 | 
						|
 *
 | 
						|
 * Acquires the semaphore.  If no more tasks are allowed to acquire the
 | 
						|
 * semaphore, calling this function will put the task to sleep until the
 | 
						|
 * semaphore is released.
 | 
						|
 *
 | 
						|
 * Use of this function is deprecated, please use down_interruptible() or
 | 
						|
 * down_killable() instead.
 | 
						|
 */
 | 
						|
void __sched down(struct semaphore *sem)
 | 
						|
{
 | 
						|
	unsigned long flags;
 | 
						|
 | 
						|
	might_sleep();
 | 
						|
	raw_spin_lock_irqsave(&sem->lock, flags);
 | 
						|
	if (likely(sem->count > 0))
 | 
						|
		sem->count--;
 | 
						|
	else
 | 
						|
		__down(sem);
 | 
						|
	raw_spin_unlock_irqrestore(&sem->lock, flags);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(down);
 | 
						|
 | 
						|
/**
 | 
						|
 * down_interruptible - acquire the semaphore unless interrupted
 | 
						|
 * @sem: the semaphore to be acquired
 | 
						|
 *
 | 
						|
 * Attempts to acquire the semaphore.  If no more tasks are allowed to
 | 
						|
 * acquire the semaphore, calling this function will put the task to sleep.
 | 
						|
 * If the sleep is interrupted by a signal, this function will return -EINTR.
 | 
						|
 * If the semaphore is successfully acquired, this function returns 0.
 | 
						|
 */
 | 
						|
int __sched down_interruptible(struct semaphore *sem)
 | 
						|
{
 | 
						|
	unsigned long flags;
 | 
						|
	int result = 0;
 | 
						|
 | 
						|
	might_sleep();
 | 
						|
	raw_spin_lock_irqsave(&sem->lock, flags);
 | 
						|
	if (likely(sem->count > 0))
 | 
						|
		sem->count--;
 | 
						|
	else
 | 
						|
		result = __down_interruptible(sem);
 | 
						|
	raw_spin_unlock_irqrestore(&sem->lock, flags);
 | 
						|
 | 
						|
	return result;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(down_interruptible);
 | 
						|
 | 
						|
/**
 | 
						|
 * down_killable - acquire the semaphore unless killed
 | 
						|
 * @sem: the semaphore to be acquired
 | 
						|
 *
 | 
						|
 * Attempts to acquire the semaphore.  If no more tasks are allowed to
 | 
						|
 * acquire the semaphore, calling this function will put the task to sleep.
 | 
						|
 * If the sleep is interrupted by a fatal signal, this function will return
 | 
						|
 * -EINTR.  If the semaphore is successfully acquired, this function returns
 | 
						|
 * 0.
 | 
						|
 */
 | 
						|
int __sched down_killable(struct semaphore *sem)
 | 
						|
{
 | 
						|
	unsigned long flags;
 | 
						|
	int result = 0;
 | 
						|
 | 
						|
	might_sleep();
 | 
						|
	raw_spin_lock_irqsave(&sem->lock, flags);
 | 
						|
	if (likely(sem->count > 0))
 | 
						|
		sem->count--;
 | 
						|
	else
 | 
						|
		result = __down_killable(sem);
 | 
						|
	raw_spin_unlock_irqrestore(&sem->lock, flags);
 | 
						|
 | 
						|
	return result;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(down_killable);
 | 
						|
 | 
						|
/**
 | 
						|
 * down_trylock - try to acquire the semaphore, without waiting
 | 
						|
 * @sem: the semaphore to be acquired
 | 
						|
 *
 | 
						|
 * Try to acquire the semaphore atomically.  Returns 0 if the semaphore has
 | 
						|
 * been acquired successfully or 1 if it cannot be acquired.
 | 
						|
 *
 | 
						|
 * NOTE: This return value is inverted from both spin_trylock and
 | 
						|
 * mutex_trylock!  Be careful about this when converting code.
 | 
						|
 *
 | 
						|
 * Unlike mutex_trylock, this function can be used from interrupt context,
 | 
						|
 * and the semaphore can be released by any task or interrupt.
 | 
						|
 */
 | 
						|
int __sched down_trylock(struct semaphore *sem)
 | 
						|
{
 | 
						|
	unsigned long flags;
 | 
						|
	int count;
 | 
						|
 | 
						|
	raw_spin_lock_irqsave(&sem->lock, flags);
 | 
						|
	count = sem->count - 1;
 | 
						|
	if (likely(count >= 0))
 | 
						|
		sem->count = count;
 | 
						|
	raw_spin_unlock_irqrestore(&sem->lock, flags);
 | 
						|
 | 
						|
	return (count < 0);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(down_trylock);
 | 
						|
 | 
						|
/**
 | 
						|
 * down_timeout - acquire the semaphore within a specified time
 | 
						|
 * @sem: the semaphore to be acquired
 | 
						|
 * @timeout: how long to wait before failing
 | 
						|
 *
 | 
						|
 * Attempts to acquire the semaphore.  If no more tasks are allowed to
 | 
						|
 * acquire the semaphore, calling this function will put the task to sleep.
 | 
						|
 * If the semaphore is not released within the specified number of jiffies,
 | 
						|
 * this function returns -ETIME.  It returns 0 if the semaphore was acquired.
 | 
						|
 */
 | 
						|
int __sched down_timeout(struct semaphore *sem, long timeout)
 | 
						|
{
 | 
						|
	unsigned long flags;
 | 
						|
	int result = 0;
 | 
						|
 | 
						|
	might_sleep();
 | 
						|
	raw_spin_lock_irqsave(&sem->lock, flags);
 | 
						|
	if (likely(sem->count > 0))
 | 
						|
		sem->count--;
 | 
						|
	else
 | 
						|
		result = __down_timeout(sem, timeout);
 | 
						|
	raw_spin_unlock_irqrestore(&sem->lock, flags);
 | 
						|
 | 
						|
	return result;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(down_timeout);
 | 
						|
 | 
						|
/**
 | 
						|
 * up - release the semaphore
 | 
						|
 * @sem: the semaphore to release
 | 
						|
 *
 | 
						|
 * Release the semaphore.  Unlike mutexes, up() may be called from any
 | 
						|
 * context and even by tasks which have never called down().
 | 
						|
 */
 | 
						|
void __sched up(struct semaphore *sem)
 | 
						|
{
 | 
						|
	unsigned long flags;
 | 
						|
	DEFINE_WAKE_Q(wake_q);
 | 
						|
 | 
						|
	raw_spin_lock_irqsave(&sem->lock, flags);
 | 
						|
	if (likely(list_empty(&sem->wait_list)))
 | 
						|
		sem->count++;
 | 
						|
	else
 | 
						|
		__up(sem, &wake_q);
 | 
						|
	raw_spin_unlock_irqrestore(&sem->lock, flags);
 | 
						|
	if (!wake_q_empty(&wake_q))
 | 
						|
		wake_up_q(&wake_q);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(up);
 | 
						|
 | 
						|
/* Functions for the contended case */
 | 
						|
 | 
						|
struct semaphore_waiter {
 | 
						|
	struct list_head list;
 | 
						|
	struct task_struct *task;
 | 
						|
	bool up;
 | 
						|
};
 | 
						|
 | 
						|
/*
 | 
						|
 * Because this function is inlined, the 'state' parameter will be
 | 
						|
 * constant, and thus optimised away by the compiler.  Likewise the
 | 
						|
 * 'timeout' parameter for the cases without timeouts.
 | 
						|
 */
 | 
						|
static inline int __sched ___down_common(struct semaphore *sem, long state,
 | 
						|
								long timeout)
 | 
						|
{
 | 
						|
	struct semaphore_waiter waiter;
 | 
						|
 | 
						|
	list_add_tail(&waiter.list, &sem->wait_list);
 | 
						|
	waiter.task = current;
 | 
						|
	waiter.up = false;
 | 
						|
 | 
						|
	for (;;) {
 | 
						|
		if (signal_pending_state(state, current))
 | 
						|
			goto interrupted;
 | 
						|
		if (unlikely(timeout <= 0))
 | 
						|
			goto timed_out;
 | 
						|
		__set_current_state(state);
 | 
						|
		raw_spin_unlock_irq(&sem->lock);
 | 
						|
		timeout = schedule_timeout(timeout);
 | 
						|
		raw_spin_lock_irq(&sem->lock);
 | 
						|
		if (waiter.up)
 | 
						|
			return 0;
 | 
						|
	}
 | 
						|
 | 
						|
 timed_out:
 | 
						|
	list_del(&waiter.list);
 | 
						|
	return -ETIME;
 | 
						|
 | 
						|
 interrupted:
 | 
						|
	list_del(&waiter.list);
 | 
						|
	return -EINTR;
 | 
						|
}
 | 
						|
 | 
						|
static inline int __sched __down_common(struct semaphore *sem, long state,
 | 
						|
					long timeout)
 | 
						|
{
 | 
						|
	int ret;
 | 
						|
 | 
						|
	trace_contention_begin(sem, 0);
 | 
						|
	ret = ___down_common(sem, state, timeout);
 | 
						|
	trace_contention_end(sem, ret);
 | 
						|
 | 
						|
	return ret;
 | 
						|
}
 | 
						|
 | 
						|
static noinline void __sched __down(struct semaphore *sem)
 | 
						|
{
 | 
						|
	__down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
 | 
						|
}
 | 
						|
 | 
						|
static noinline int __sched __down_interruptible(struct semaphore *sem)
 | 
						|
{
 | 
						|
	return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
 | 
						|
}
 | 
						|
 | 
						|
static noinline int __sched __down_killable(struct semaphore *sem)
 | 
						|
{
 | 
						|
	return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
 | 
						|
}
 | 
						|
 | 
						|
static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
 | 
						|
{
 | 
						|
	return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
 | 
						|
}
 | 
						|
 | 
						|
static noinline void __sched __up(struct semaphore *sem,
 | 
						|
				  struct wake_q_head *wake_q)
 | 
						|
{
 | 
						|
	struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
 | 
						|
						struct semaphore_waiter, list);
 | 
						|
	list_del(&waiter->list);
 | 
						|
	waiter->up = true;
 | 
						|
	wake_q_add(wake_q, waiter->task);
 | 
						|
}
 |