mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	sched: Make the idle task quack like a per-CPU kthread
For all intents and purposes, the idle task is a per-CPU kthread. It isn't created via the same route as other pcpu kthreads however, and as a result it is missing a few bells and whistles: it fails kthread_is_per_cpu() and it doesn't have PF_NO_SETAFFINITY set. Fix the former by giving the idle task a kthread struct along with the KTHREAD_IS_PER_CPU flag. This requires some extra iffery as init_idle() call be called more than once on the same idle task. Signed-off-by: Valentin Schneider <valentin.schneider@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20210510151024.2448573-2-valentin.schneider@arm.com
This commit is contained in:
		
							parent
							
								
									fcb5017045
								
							
						
					
					
						commit
						00b89fe019
					
				
					 3 changed files with 35 additions and 18 deletions
				
			
		| 
						 | 
					@ -33,6 +33,8 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
 | 
				
			||||||
					  unsigned int cpu,
 | 
										  unsigned int cpu,
 | 
				
			||||||
					  const char *namefmt);
 | 
										  const char *namefmt);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void set_kthread_struct(struct task_struct *p);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void kthread_set_per_cpu(struct task_struct *k, int cpu);
 | 
					void kthread_set_per_cpu(struct task_struct *k, int cpu);
 | 
				
			||||||
bool kthread_is_per_cpu(struct task_struct *k);
 | 
					bool kthread_is_per_cpu(struct task_struct *k);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -68,16 +68,6 @@ enum KTHREAD_BITS {
 | 
				
			||||||
	KTHREAD_SHOULD_PARK,
 | 
						KTHREAD_SHOULD_PARK,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void set_kthread_struct(void *kthread)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * We abuse ->set_child_tid to avoid the new member and because it
 | 
					 | 
				
			||||||
	 * can't be wrongly copied by copy_process(). We also rely on fact
 | 
					 | 
				
			||||||
	 * that the caller can't exec, so PF_KTHREAD can't be cleared.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	current->set_child_tid = (__force void __user *)kthread;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline struct kthread *to_kthread(struct task_struct *k)
 | 
					static inline struct kthread *to_kthread(struct task_struct *k)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	WARN_ON(!(k->flags & PF_KTHREAD));
 | 
						WARN_ON(!(k->flags & PF_KTHREAD));
 | 
				
			||||||
| 
						 | 
					@ -103,6 +93,22 @@ static inline struct kthread *__to_kthread(struct task_struct *p)
 | 
				
			||||||
	return kthread;
 | 
						return kthread;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void set_kthread_struct(struct task_struct *p)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct kthread *kthread;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (__to_kthread(p))
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * We abuse ->set_child_tid to avoid the new member and because it
 | 
				
			||||||
 | 
						 * can't be wrongly copied by copy_process(). We also rely on fact
 | 
				
			||||||
 | 
						 * that the caller can't exec, so PF_KTHREAD can't be cleared.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						p->set_child_tid = (__force void __user *)kthread;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void free_kthread_struct(struct task_struct *k)
 | 
					void free_kthread_struct(struct task_struct *k)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct kthread *kthread;
 | 
						struct kthread *kthread;
 | 
				
			||||||
| 
						 | 
					@ -272,8 +278,8 @@ static int kthread(void *_create)
 | 
				
			||||||
	struct kthread *self;
 | 
						struct kthread *self;
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	self = kzalloc(sizeof(*self), GFP_KERNEL);
 | 
						set_kthread_struct(current);
 | 
				
			||||||
	set_kthread_struct(self);
 | 
						self = to_kthread(current);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* If user was SIGKILLed, I release the structure. */
 | 
						/* If user was SIGKILLed, I release the structure. */
 | 
				
			||||||
	done = xchg(&create->done, NULL);
 | 
						done = xchg(&create->done, NULL);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -8234,12 +8234,25 @@ void __init init_idle(struct task_struct *idle, int cpu)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	__sched_fork(0, idle);
 | 
						__sched_fork(0, idle);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * The idle task doesn't need the kthread struct to function, but it
 | 
				
			||||||
 | 
						 * is dressed up as a per-CPU kthread and thus needs to play the part
 | 
				
			||||||
 | 
						 * if we want to avoid special-casing it in code that deals with per-CPU
 | 
				
			||||||
 | 
						 * kthreads.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						set_kthread_struct(idle);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	raw_spin_lock_irqsave(&idle->pi_lock, flags);
 | 
						raw_spin_lock_irqsave(&idle->pi_lock, flags);
 | 
				
			||||||
	raw_spin_rq_lock(rq);
 | 
						raw_spin_rq_lock(rq);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	idle->state = TASK_RUNNING;
 | 
						idle->state = TASK_RUNNING;
 | 
				
			||||||
	idle->se.exec_start = sched_clock();
 | 
						idle->se.exec_start = sched_clock();
 | 
				
			||||||
	idle->flags |= PF_IDLE;
 | 
						/*
 | 
				
			||||||
 | 
						 * PF_KTHREAD should already be set at this point; regardless, make it
 | 
				
			||||||
 | 
						 * look like a proper per-CPU kthread.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
 | 
				
			||||||
 | 
						kthread_set_per_cpu(idle, cpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	scs_task_reset(idle);
 | 
						scs_task_reset(idle);
 | 
				
			||||||
	kasan_unpoison_task_stack(idle);
 | 
						kasan_unpoison_task_stack(idle);
 | 
				
			||||||
| 
						 | 
					@ -8456,12 +8469,8 @@ static void balance_push(struct rq *rq)
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Both the cpu-hotplug and stop task are in this case and are
 | 
						 * Both the cpu-hotplug and stop task are in this case and are
 | 
				
			||||||
	 * required to complete the hotplug process.
 | 
						 * required to complete the hotplug process.
 | 
				
			||||||
	 *
 | 
					 | 
				
			||||||
	 * XXX: the idle task does not match kthread_is_per_cpu() due to
 | 
					 | 
				
			||||||
	 * histerical raisins.
 | 
					 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (rq->idle == push_task ||
 | 
						if (kthread_is_per_cpu(push_task) ||
 | 
				
			||||||
	    kthread_is_per_cpu(push_task) ||
 | 
					 | 
				
			||||||
	    is_migration_disabled(push_task)) {
 | 
						    is_migration_disabled(push_task)) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue