mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 08:38:45 +02:00 
			
		
		
		
	sched/rt: Make rt_rq->pushable_tasks updates drive rto_mask
Sebastian noted that the rto_push_work IRQ work can be queued for a CPU
that has an empty pushable_tasks list, which means nothing useful will be
done in the IPI other than queue the work for the next CPU on the rto_mask.
rto_push_irq_work_func() only operates on tasks in the pushable_tasks list,
but the conditions for that irq_work to be queued (and for a CPU to be
added to the rto_mask) rely on rq_rt->nr_migratory instead.
nr_migratory is increased whenever an RT task entity is enqueued and it has
nr_cpus_allowed > 1. Unlike the pushable_tasks list, nr_migratory includes a
rt_rq's current task. This means a rt_rq can have a migratible current, N
non-migratible queued tasks, and be flagged as overloaded / have its CPU
set in the rto_mask, despite having an empty pushable_tasks list.
Make an rt_rq's overload logic be driven by {enqueue,dequeue}_pushable_task().
Since rt_rq->{rt_nr_migratory,rt_nr_total} become unused, remove them.
Note that the case where the current task is pushed away to make way for a
migration-disabled task remains unchanged: the migration-disabled task has
to be in the pushable_tasks list in the first place, which means it has
nr_cpus_allowed > 1.
Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Valentin Schneider <vschneid@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://lore.kernel.org/r/20230811112044.3302588-1-vschneid@redhat.com
			
			
This commit is contained in:
		
							parent
							
								
									3eafe22599
								
							
						
					
					
						commit
						612f769edd
					
				
					 3 changed files with 10 additions and 65 deletions
				
			
		|  | @ -724,9 +724,6 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) | |||
| 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x)) | ||||
| 
 | ||||
| 	PU(rt_nr_running); | ||||
| #ifdef CONFIG_SMP | ||||
| 	PU(rt_nr_migratory); | ||||
| #endif | ||||
| 	P(rt_throttled); | ||||
| 	PN(rt_time); | ||||
| 	PN(rt_runtime); | ||||
|  |  | |||
|  | @ -143,7 +143,6 @@ void init_rt_rq(struct rt_rq *rt_rq) | |||
| #if defined CONFIG_SMP | ||||
| 	rt_rq->highest_prio.curr = MAX_RT_PRIO-1; | ||||
| 	rt_rq->highest_prio.next = MAX_RT_PRIO-1; | ||||
| 	rt_rq->rt_nr_migratory = 0; | ||||
| 	rt_rq->overloaded = 0; | ||||
| 	plist_head_init(&rt_rq->pushable_tasks); | ||||
| #endif /* CONFIG_SMP */ | ||||
|  | @ -358,53 +357,6 @@ static inline void rt_clear_overload(struct rq *rq) | |||
| 	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); | ||||
| } | ||||
| 
 | ||||
| static void update_rt_migration(struct rt_rq *rt_rq) | ||||
| { | ||||
| 	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { | ||||
| 		if (!rt_rq->overloaded) { | ||||
| 			rt_set_overload(rq_of_rt_rq(rt_rq)); | ||||
| 			rt_rq->overloaded = 1; | ||||
| 		} | ||||
| 	} else if (rt_rq->overloaded) { | ||||
| 		rt_clear_overload(rq_of_rt_rq(rt_rq)); | ||||
| 		rt_rq->overloaded = 0; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | ||||
| { | ||||
| 	struct task_struct *p; | ||||
| 
 | ||||
| 	if (!rt_entity_is_task(rt_se)) | ||||
| 		return; | ||||
| 
 | ||||
| 	p = rt_task_of(rt_se); | ||||
| 	rt_rq = &rq_of_rt_rq(rt_rq)->rt; | ||||
| 
 | ||||
| 	rt_rq->rt_nr_total++; | ||||
| 	if (p->nr_cpus_allowed > 1) | ||||
| 		rt_rq->rt_nr_migratory++; | ||||
| 
 | ||||
| 	update_rt_migration(rt_rq); | ||||
| } | ||||
| 
 | ||||
| static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | ||||
| { | ||||
| 	struct task_struct *p; | ||||
| 
 | ||||
| 	if (!rt_entity_is_task(rt_se)) | ||||
| 		return; | ||||
| 
 | ||||
| 	p = rt_task_of(rt_se); | ||||
| 	rt_rq = &rq_of_rt_rq(rt_rq)->rt; | ||||
| 
 | ||||
| 	rt_rq->rt_nr_total--; | ||||
| 	if (p->nr_cpus_allowed > 1) | ||||
| 		rt_rq->rt_nr_migratory--; | ||||
| 
 | ||||
| 	update_rt_migration(rt_rq); | ||||
| } | ||||
| 
 | ||||
| static inline int has_pushable_tasks(struct rq *rq) | ||||
| { | ||||
| 	return !plist_head_empty(&rq->rt.pushable_tasks); | ||||
|  | @ -438,6 +390,11 @@ static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) | |||
| 	/* Update the highest prio pushable task */ | ||||
| 	if (p->prio < rq->rt.highest_prio.next) | ||||
| 		rq->rt.highest_prio.next = p->prio; | ||||
| 
 | ||||
| 	if (!rq->rt.overloaded) { | ||||
| 		rt_set_overload(rq); | ||||
| 		rq->rt.overloaded = 1; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) | ||||
|  | @ -451,6 +408,11 @@ static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) | |||
| 		rq->rt.highest_prio.next = p->prio; | ||||
| 	} else { | ||||
| 		rq->rt.highest_prio.next = MAX_RT_PRIO-1; | ||||
| 
 | ||||
| 		if (rq->rt.overloaded) { | ||||
| 			rt_clear_overload(rq); | ||||
| 			rq->rt.overloaded = 0; | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  | @ -464,16 +426,6 @@ static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p) | |||
| { | ||||
| } | ||||
| 
 | ||||
| static inline | ||||
| void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | ||||
| { | ||||
| } | ||||
| 
 | ||||
| static inline | ||||
| void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | ||||
| { | ||||
| } | ||||
| 
 | ||||
| static inline void rt_queue_push_tasks(struct rq *rq) | ||||
| { | ||||
| } | ||||
|  | @ -1281,7 +1233,6 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
| 	rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se); | ||||
| 
 | ||||
| 	inc_rt_prio(rt_rq, prio); | ||||
| 	inc_rt_migration(rt_se, rt_rq); | ||||
| 	inc_rt_group(rt_se, rt_rq); | ||||
| } | ||||
| 
 | ||||
|  | @ -1294,7 +1245,6 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
| 	rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se); | ||||
| 
 | ||||
| 	dec_rt_prio(rt_rq, rt_se_prio(rt_se)); | ||||
| 	dec_rt_migration(rt_se, rt_rq); | ||||
| 	dec_rt_group(rt_se, rt_rq); | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -663,8 +663,6 @@ struct rt_rq { | |||
| 	} highest_prio; | ||||
| #endif | ||||
| #ifdef CONFIG_SMP | ||||
| 	unsigned int		rt_nr_migratory; | ||||
| 	unsigned int		rt_nr_total; | ||||
| 	int			overloaded; | ||||
| 	struct plist_head	pushable_tasks; | ||||
| 
 | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Valentin Schneider
						Valentin Schneider