mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	sched/deadline: Optimize pull_dl_task()
pull_dl_task() uses pick_next_earliest_dl_task() to select a migration candidate; this is sub-optimal since the next earliest task -- as per the regular runqueue -- might not be migratable at all. This could result in iterating the entire runqueue looking for a task. Instead iterate the pushable queue -- this queue only contains tasks that have at least 2 cpus set in their cpus_allowed mask. Signed-off-by: Wanpeng Li <wanpeng.li@linux.intel.com> [ Improved the changelog. ] Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Juri Lelli <juri.lelli@arm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1431496867-4194-1-git-send-email-wanpeng.li@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
							parent
							
								
									1cde2930e1
								
							
						
					
					
						commit
						8b5e770ed7
					
				
					 1 changed files with 27 additions and 1 deletions
				
			
		| 
						 | 
					@ -1230,6 +1230,32 @@ static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
 | 
				
			||||||
	return NULL;
 | 
						return NULL;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Return the earliest pushable rq's task, which is suitable to be executed
 | 
				
			||||||
 | 
					 * on the CPU, NULL otherwise:
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost;
 | 
				
			||||||
 | 
						struct task_struct *p = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!has_pushable_dl_tasks(rq))
 | 
				
			||||||
 | 
							return NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					next_node:
 | 
				
			||||||
 | 
						if (next_node) {
 | 
				
			||||||
 | 
							p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (pick_dl_task(rq, p, cpu))
 | 
				
			||||||
 | 
								return p;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							next_node = rb_next(next_node);
 | 
				
			||||||
 | 
							goto next_node;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return NULL;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
 | 
					static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int find_later_rq(struct task_struct *task)
 | 
					static int find_later_rq(struct task_struct *task)
 | 
				
			||||||
| 
						 | 
					@ -1514,7 +1540,7 @@ static int pull_dl_task(struct rq *this_rq)
 | 
				
			||||||
		if (src_rq->dl.dl_nr_running <= 1)
 | 
							if (src_rq->dl.dl_nr_running <= 1)
 | 
				
			||||||
			goto skip;
 | 
								goto skip;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		p = pick_next_earliest_dl_task(src_rq, this_cpu);
 | 
							p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * We found a task to be pulled if:
 | 
							 * We found a task to be pulled if:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue