mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	hrtimer: fix rq->lock inversion (again)
It appears I inadvertly introduced rq->lock recursion to the hrtimer_start() path when I delegated running already expired timers to softirq context. This patch fixes it by introducing a __hrtimer_start_range_ns() method that will not use raise_softirq_irqoff() but __raise_softirq_irqoff() which avoids the wakeup. It then also changes schedule() to check for pending softirqs and do the wakeup then, I'm not quite sure I like this last bit, nor am I convinced its really needed. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus@samba.org LKML-Reference: <20090313112301.096138802@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
		
							parent
							
								
									7bee946358
								
							
						
					
					
						commit
						7f1e2ca9f0
					
				
					 5 changed files with 52 additions and 25 deletions
				
			
		| 
						 | 
				
			
			@ -336,6 +336,11 @@ extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
 | 
			
		|||
			 const enum hrtimer_mode mode);
 | 
			
		||||
extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
 | 
			
		||||
			unsigned long range_ns, const enum hrtimer_mode mode);
 | 
			
		||||
extern int
 | 
			
		||||
__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
 | 
			
		||||
			 unsigned long delta_ns,
 | 
			
		||||
			 const enum hrtimer_mode mode, int wakeup);
 | 
			
		||||
 | 
			
		||||
extern int hrtimer_cancel(struct hrtimer *timer);
 | 
			
		||||
extern int hrtimer_try_to_cancel(struct hrtimer *timer);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -294,6 +294,7 @@ extern void softirq_init(void);
 | 
			
		|||
#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
 | 
			
		||||
extern void raise_softirq_irqoff(unsigned int nr);
 | 
			
		||||
extern void raise_softirq(unsigned int nr);
 | 
			
		||||
extern void wakeup_softirqd(void);
 | 
			
		||||
 | 
			
		||||
/* This is the worklist that queues up per-cpu softirq work.
 | 
			
		||||
 *
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -651,14 +651,20 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
 | 
			
		|||
 * and expiry check is done in the hrtimer_interrupt or in the softirq.
 | 
			
		||||
 */
 | 
			
		||||
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
 | 
			
		||||
					    struct hrtimer_clock_base *base)
 | 
			
		||||
					    struct hrtimer_clock_base *base,
 | 
			
		||||
					    int wakeup)
 | 
			
		||||
{
 | 
			
		||||
	if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
 | 
			
		||||
		if (wakeup) {
 | 
			
		||||
			spin_unlock(&base->cpu_base->lock);
 | 
			
		||||
			raise_softirq_irqoff(HRTIMER_SOFTIRQ);
 | 
			
		||||
			spin_lock(&base->cpu_base->lock);
 | 
			
		||||
		} else
 | 
			
		||||
			__raise_softirq_irqoff(HRTIMER_SOFTIRQ);
 | 
			
		||||
 | 
			
		||||
		return 1;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -703,7 +709,8 @@ static inline int hrtimer_is_hres_enabled(void) { return 0; }
 | 
			
		|||
static inline int hrtimer_switch_to_hres(void) { return 0; }
 | 
			
		||||
static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { }
 | 
			
		||||
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
 | 
			
		||||
					    struct hrtimer_clock_base *base)
 | 
			
		||||
					    struct hrtimer_clock_base *base,
 | 
			
		||||
					    int wakeup)
 | 
			
		||||
{
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -886,20 +893,9 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
 | 
			
		|||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
 | 
			
		||||
 * @timer:	the timer to be added
 | 
			
		||||
 * @tim:	expiry time
 | 
			
		||||
 * @delta_ns:	"slack" range for the timer
 | 
			
		||||
 * @mode:	expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
 | 
			
		||||
 *
 | 
			
		||||
 * Returns:
 | 
			
		||||
 *  0 on success
 | 
			
		||||
 *  1 when the timer was active
 | 
			
		||||
 */
 | 
			
		||||
int
 | 
			
		||||
hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_ns,
 | 
			
		||||
			const enum hrtimer_mode mode)
 | 
			
		||||
int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
 | 
			
		||||
		unsigned long delta_ns, const enum hrtimer_mode mode,
 | 
			
		||||
		int wakeup)
 | 
			
		||||
{
 | 
			
		||||
	struct hrtimer_clock_base *base, *new_base;
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
| 
						 | 
				
			
			@ -940,12 +936,29 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
 | 
			
		|||
	 * XXX send_remote_softirq() ?
 | 
			
		||||
	 */
 | 
			
		||||
	if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
 | 
			
		||||
		hrtimer_enqueue_reprogram(timer, new_base);
 | 
			
		||||
		hrtimer_enqueue_reprogram(timer, new_base, wakeup);
 | 
			
		||||
 | 
			
		||||
	unlock_hrtimer_base(timer, &flags);
 | 
			
		||||
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
 | 
			
		||||
 * @timer:	the timer to be added
 | 
			
		||||
 * @tim:	expiry time
 | 
			
		||||
 * @delta_ns:	"slack" range for the timer
 | 
			
		||||
 * @mode:	expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
 | 
			
		||||
 *
 | 
			
		||||
 * Returns:
 | 
			
		||||
 *  0 on success
 | 
			
		||||
 *  1 when the timer was active
 | 
			
		||||
 */
 | 
			
		||||
int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
 | 
			
		||||
		unsigned long delta_ns, const enum hrtimer_mode mode)
 | 
			
		||||
{
 | 
			
		||||
	return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
| 
						 | 
				
			
			@ -961,7 +974,7 @@ EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
 | 
			
		|||
int
 | 
			
		||||
hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
 | 
			
		||||
{
 | 
			
		||||
	return hrtimer_start_range_ns(timer, tim, 0, mode);
 | 
			
		||||
	return __hrtimer_start_range_ns(timer, tim, 0, mode, 1);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(hrtimer_start);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -231,13 +231,20 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
 | 
			
		|||
 | 
			
		||||
	spin_lock(&rt_b->rt_runtime_lock);
 | 
			
		||||
	for (;;) {
 | 
			
		||||
		unsigned long delta;
 | 
			
		||||
		ktime_t soft, hard;
 | 
			
		||||
 | 
			
		||||
		if (hrtimer_active(&rt_b->rt_period_timer))
 | 
			
		||||
			break;
 | 
			
		||||
 | 
			
		||||
		now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
 | 
			
		||||
		hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
 | 
			
		||||
		hrtimer_start_expires(&rt_b->rt_period_timer,
 | 
			
		||||
				HRTIMER_MODE_ABS);
 | 
			
		||||
 | 
			
		||||
		soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
 | 
			
		||||
		hard = hrtimer_get_expires(&rt_b->rt_period_timer);
 | 
			
		||||
		delta = ktime_to_ns(ktime_sub(hard, soft));
 | 
			
		||||
		__hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
 | 
			
		||||
				HRTIMER_MODE_ABS, 0);
 | 
			
		||||
	}
 | 
			
		||||
	spin_unlock(&rt_b->rt_runtime_lock);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -1146,7 +1153,8 @@ static __init void init_hrtick(void)
 | 
			
		|||
 */
 | 
			
		||||
static void hrtick_start(struct rq *rq, u64 delay)
 | 
			
		||||
{
 | 
			
		||||
	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL);
 | 
			
		||||
	__hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
 | 
			
		||||
			HRTIMER_MODE_REL, 0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void init_hrtick(void)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -58,7 +58,7 @@ static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 | 
			
		|||
 * to the pending events, so lets the scheduler to balance
 | 
			
		||||
 * the softirq load for us.
 | 
			
		||||
 */
 | 
			
		||||
static inline void wakeup_softirqd(void)
 | 
			
		||||
void wakeup_softirqd(void)
 | 
			
		||||
{
 | 
			
		||||
	/* Interrupts are disabled: no need to stop preemption */
 | 
			
		||||
	struct task_struct *tsk = __get_cpu_var(ksoftirqd);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue