mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 00:28:52 +02:00 
			
		
		
		
	 adb35e8dc9
			
		
	
	
		adb35e8dc9
		
	
	
	
	
		
			
			- migrate_disable/enable() support which originates from the RT tree and
    is now a prerequisite for the new preemptible kmap_local() API which aims
    to replace kmap_atomic().
 
  - A fair amount of topology and NUMA related improvements
 
  - Improvements for the frequency invariant calculations
 
  - Enhanced robustness for the global CPU priority tracking and decision
    making
 
  - The usual small fixes and enhancements all over the place
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAl/XwK4THHRnbHhAbGlu
 dXRyb25peC5kZQAKCRCmGPVMDXSYoX28D/9cVrvziSQGfBfuQWnUiw8iOIq1QBa2
 Me+Tvenhfrlt7xU6rbP9ciFu7eTN+fS06m5uQPGI+t22WuJmHzbmw1bJVXfkvYfI
 /QoU+Hg7DkDAn1p7ZKXh0dRkV0nI9ixxSHl0E+Zf1ATBxCUMV2SO85flg6z/4qJq
 3VWUye0dmR7/bhtkIjv5rwce9v2JB2g1AbgYXYTW9lHVoUdGoMSdiZAF4tGyHLnx
 sJ6DMqQ+k+dmPyYO0z5MTzjW/fXit4n9w2e3z9TvRH/uBu58WSW1RBmQYX6aHBAg
 dhT9F4lvTs6lJY23x5RSFWDOv6xAvKF5a0xfb8UZcyH5EoLYrPRvm42a0BbjdeRa
 u0z7LbwIlKA+RFdZzFZWz8UvvO0ljyMjmiuqZnZ5dY9Cd80LSBuxrWeQYG0qg6lR
 Y2povhhCepEG+q8AXIe2YjHKWKKC1s/l/VY3CNnCzcd21JPQjQ4Z5eWGmHif5IED
 CntaeFFhZadR3w02tkX35zFmY3w4soKKrbI4EKWrQwd+cIEQlOSY7dEPI/b5BbYj
 MWAb3P4EG9N77AWTNmbhK4nN0brEYb+rBbCA+5dtNBVhHTxAC7OTWElJOC2O66FI
 e06dREjvwYtOkRUkUguWwErbIai2gJ2MH0VILV3hHoh64oRk7jjM8PZYnjQkdptQ
 Gsq0rJW5iiu/OQ==
 =Oz1V
 -----END PGP SIGNATURE-----
Merge tag 'sched-core-2020-12-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Thomas Gleixner:
 - migrate_disable/enable() support which originates from the RT tree
   and is now a prerequisite for the new preemptible kmap_local() API
   which aims to replace kmap_atomic().
 - A fair amount of topology and NUMA related improvements
 - Improvements for the frequency invariant calculations
 - Enhanced robustness for the global CPU priority tracking and decision
   making
 - The usual small fixes and enhancements all over the place
* tag 'sched-core-2020-12-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (61 commits)
  sched/fair: Trivial correction of the newidle_balance() comment
  sched/fair: Clear SMT siblings after determining the core is not idle
  sched: Fix kernel-doc markup
  x86: Print ratio freq_max/freq_base used in frequency invariance calculations
  x86, sched: Use midpoint of max_boost and max_P for frequency invariance on AMD EPYC
  x86, sched: Calculate frequency invariance for AMD systems
  irq_work: Optimize irq_work_single()
  smp: Cleanup smp_call_function*()
  irq_work: Cleanup
  sched: Limit the amount of NUMA imbalance that can exist at fork time
  sched/numa: Allow a floating imbalance between NUMA nodes
  sched: Avoid unnecessary calculation of load imbalance at clone time
  sched/numa: Rename nr_running and break out the magic number
  sched: Make migrate_disable/enable() independent of RT
  sched/topology: Condition EAS enablement on FIE support
  arm64: Rebuild sched domains on invariance status changes
  sched/topology,schedutil: Wrap sched domains rebuild
  sched/uclamp: Allow to reset a task uclamp constraint value
  sched/core: Fix typos in comments
  Documentation: scheduler: fix information on arch SD flags, sched_domain and sched_debug
  ...
		
	
			
		
			
				
	
	
		
			511 lines
		
	
	
	
		
			12 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			511 lines
		
	
	
	
		
			12 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| // SPDX-License-Identifier: GPL-2.0-only
 | |
| /*
 | |
|  * Generic entry points for the idle threads and
 | |
|  * implementation of the idle task scheduling class.
 | |
|  *
 | |
|  * (NOTE: these are not related to SCHED_IDLE batch scheduled
 | |
|  *        tasks which are handled in sched/fair.c )
 | |
|  */
 | |
| #include "sched.h"
 | |
| 
 | |
| #include <trace/events/power.h>
 | |
| 
 | |
| /* Linker adds these: start and end of __cpuidle functions */
 | |
| extern char __cpuidle_text_start[], __cpuidle_text_end[];
 | |
| 
 | |
| /**
 | |
|  * sched_idle_set_state - Record idle state for the current CPU.
 | |
|  * @idle_state: State to record.
 | |
|  */
 | |
| void sched_idle_set_state(struct cpuidle_state *idle_state)
 | |
| {
 | |
| 	idle_set_state(this_rq(), idle_state);
 | |
| }
 | |
| 
 | |
| static int __read_mostly cpu_idle_force_poll;
 | |
| 
 | |
| void cpu_idle_poll_ctrl(bool enable)
 | |
| {
 | |
| 	if (enable) {
 | |
| 		cpu_idle_force_poll++;
 | |
| 	} else {
 | |
| 		cpu_idle_force_poll--;
 | |
| 		WARN_ON_ONCE(cpu_idle_force_poll < 0);
 | |
| 	}
 | |
| }
 | |
| 
 | |
| #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
 | |
| static int __init cpu_idle_poll_setup(char *__unused)
 | |
| {
 | |
| 	cpu_idle_force_poll = 1;
 | |
| 
 | |
| 	return 1;
 | |
| }
 | |
| __setup("nohlt", cpu_idle_poll_setup);
 | |
| 
 | |
| static int __init cpu_idle_nopoll_setup(char *__unused)
 | |
| {
 | |
| 	cpu_idle_force_poll = 0;
 | |
| 
 | |
| 	return 1;
 | |
| }
 | |
| __setup("hlt", cpu_idle_nopoll_setup);
 | |
| #endif
 | |
| 
 | |
| static noinline int __cpuidle cpu_idle_poll(void)
 | |
| {
 | |
| 	trace_cpu_idle(0, smp_processor_id());
 | |
| 	stop_critical_timings();
 | |
| 	rcu_idle_enter();
 | |
| 	local_irq_enable();
 | |
| 
 | |
| 	while (!tif_need_resched() &&
 | |
| 	       (cpu_idle_force_poll || tick_check_broadcast_expired()))
 | |
| 		cpu_relax();
 | |
| 
 | |
| 	rcu_idle_exit();
 | |
| 	start_critical_timings();
 | |
| 	trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
 | |
| 
 | |
| 	return 1;
 | |
| }
 | |
| 
 | |
| /* Weak implementations for optional arch specific functions */
 | |
| void __weak arch_cpu_idle_prepare(void) { }
 | |
| void __weak arch_cpu_idle_enter(void) { }
 | |
| void __weak arch_cpu_idle_exit(void) { }
 | |
| void __weak arch_cpu_idle_dead(void) { }
 | |
| void __weak arch_cpu_idle(void)
 | |
| {
 | |
| 	cpu_idle_force_poll = 1;
 | |
| 	raw_local_irq_enable();
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * default_idle_call - Default CPU idle routine.
 | |
|  *
 | |
|  * To use when the cpuidle framework cannot be used.
 | |
|  */
 | |
| void __cpuidle default_idle_call(void)
 | |
| {
 | |
| 	if (current_clr_polling_and_test()) {
 | |
| 		local_irq_enable();
 | |
| 	} else {
 | |
| 
 | |
| 		trace_cpu_idle(1, smp_processor_id());
 | |
| 		stop_critical_timings();
 | |
| 
 | |
| 		/*
 | |
| 		 * arch_cpu_idle() is supposed to enable IRQs, however
 | |
| 		 * we can't do that because of RCU and tracing.
 | |
| 		 *
 | |
| 		 * Trace IRQs enable here, then switch off RCU, and have
 | |
| 		 * arch_cpu_idle() use raw_local_irq_enable(). Note that
 | |
| 		 * rcu_idle_enter() relies on lockdep IRQ state, so switch that
 | |
| 		 * last -- this is very similar to the entry code.
 | |
| 		 */
 | |
| 		trace_hardirqs_on_prepare();
 | |
| 		lockdep_hardirqs_on_prepare(_THIS_IP_);
 | |
| 		rcu_idle_enter();
 | |
| 		lockdep_hardirqs_on(_THIS_IP_);
 | |
| 
 | |
| 		arch_cpu_idle();
 | |
| 
 | |
| 		/*
 | |
| 		 * OK, so IRQs are enabled here, but RCU needs them disabled to
 | |
| 		 * turn itself back on.. funny thing is that disabling IRQs
 | |
| 		 * will cause tracing, which needs RCU. Jump through hoops to
 | |
| 		 * make it 'work'.
 | |
| 		 */
 | |
| 		raw_local_irq_disable();
 | |
| 		lockdep_hardirqs_off(_THIS_IP_);
 | |
| 		rcu_idle_exit();
 | |
| 		lockdep_hardirqs_on(_THIS_IP_);
 | |
| 		raw_local_irq_enable();
 | |
| 
 | |
| 		start_critical_timings();
 | |
| 		trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
 | |
| 	}
 | |
| }
 | |
| 
 | |
| static int call_cpuidle_s2idle(struct cpuidle_driver *drv,
 | |
| 			       struct cpuidle_device *dev)
 | |
| {
 | |
| 	if (current_clr_polling_and_test())
 | |
| 		return -EBUSY;
 | |
| 
 | |
| 	return cpuidle_enter_s2idle(drv, dev);
 | |
| }
 | |
| 
 | |
| static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
 | |
| 		      int next_state)
 | |
| {
 | |
| 	/*
 | |
| 	 * The idle task must be scheduled, it is pointless to go to idle, just
 | |
| 	 * update no idle residency and return.
 | |
| 	 */
 | |
| 	if (current_clr_polling_and_test()) {
 | |
| 		dev->last_residency_ns = 0;
 | |
| 		local_irq_enable();
 | |
| 		return -EBUSY;
 | |
| 	}
 | |
| 
 | |
| 	/*
 | |
| 	 * Enter the idle state previously returned by the governor decision.
 | |
| 	 * This function will block until an interrupt occurs and will take
 | |
| 	 * care of re-enabling the local interrupts
 | |
| 	 */
 | |
| 	return cpuidle_enter(drv, dev, next_state);
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * cpuidle_idle_call - the main idle function
 | |
|  *
 | |
|  * NOTE: no locks or semaphores should be used here
 | |
|  *
 | |
|  * On archs that support TIF_POLLING_NRFLAG, is called with polling
 | |
|  * set, and it returns with polling set.  If it ever stops polling, it
 | |
|  * must clear the polling bit.
 | |
|  */
 | |
| static void cpuidle_idle_call(void)
 | |
| {
 | |
| 	struct cpuidle_device *dev = cpuidle_get_device();
 | |
| 	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
 | |
| 	int next_state, entered_state;
 | |
| 
 | |
| 	/*
 | |
| 	 * Check if the idle task must be rescheduled. If it is the
 | |
| 	 * case, exit the function after re-enabling the local irq.
 | |
| 	 */
 | |
| 	if (need_resched()) {
 | |
| 		local_irq_enable();
 | |
| 		return;
 | |
| 	}
 | |
| 
 | |
| 	/*
 | |
| 	 * The RCU framework needs to be told that we are entering an idle
 | |
| 	 * section, so no more rcu read side critical sections and one more
 | |
| 	 * step to the grace period
 | |
| 	 */
 | |
| 
 | |
| 	if (cpuidle_not_available(drv, dev)) {
 | |
| 		tick_nohz_idle_stop_tick();
 | |
| 
 | |
| 		default_idle_call();
 | |
| 		goto exit_idle;
 | |
| 	}
 | |
| 
 | |
| 	/*
 | |
| 	 * Suspend-to-idle ("s2idle") is a system state in which all user space
 | |
| 	 * has been frozen, all I/O devices have been suspended and the only
 | |
| 	 * activity happens here and in interrupts (if any). In that case bypass
 | |
| 	 * the cpuidle governor and go stratight for the deepest idle state
 | |
| 	 * available.  Possibly also suspend the local tick and the entire
 | |
| 	 * timekeeping to prevent timer interrupts from kicking us out of idle
 | |
| 	 * until a proper wakeup interrupt happens.
 | |
| 	 */
 | |
| 
 | |
| 	if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) {
 | |
| 		u64 max_latency_ns;
 | |
| 
 | |
| 		if (idle_should_enter_s2idle()) {
 | |
| 
 | |
| 			entered_state = call_cpuidle_s2idle(drv, dev);
 | |
| 			if (entered_state > 0)
 | |
| 				goto exit_idle;
 | |
| 
 | |
| 			max_latency_ns = U64_MAX;
 | |
| 		} else {
 | |
| 			max_latency_ns = dev->forced_idle_latency_limit_ns;
 | |
| 		}
 | |
| 
 | |
| 		tick_nohz_idle_stop_tick();
 | |
| 
 | |
| 		next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
 | |
| 		call_cpuidle(drv, dev, next_state);
 | |
| 	} else {
 | |
| 		bool stop_tick = true;
 | |
| 
 | |
| 		/*
 | |
| 		 * Ask the cpuidle framework to choose a convenient idle state.
 | |
| 		 */
 | |
| 		next_state = cpuidle_select(drv, dev, &stop_tick);
 | |
| 
 | |
| 		if (stop_tick || tick_nohz_tick_stopped())
 | |
| 			tick_nohz_idle_stop_tick();
 | |
| 		else
 | |
| 			tick_nohz_idle_retain_tick();
 | |
| 
 | |
| 		entered_state = call_cpuidle(drv, dev, next_state);
 | |
| 		/*
 | |
| 		 * Give the governor an opportunity to reflect on the outcome
 | |
| 		 */
 | |
| 		cpuidle_reflect(dev, entered_state);
 | |
| 	}
 | |
| 
 | |
| exit_idle:
 | |
| 	__current_set_polling();
 | |
| 
 | |
| 	/*
 | |
| 	 * It is up to the idle functions to reenable local interrupts
 | |
| 	 */
 | |
| 	if (WARN_ON_ONCE(irqs_disabled()))
 | |
| 		local_irq_enable();
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * Generic idle loop implementation
 | |
|  *
 | |
|  * Called with polling cleared.
 | |
|  */
 | |
| static void do_idle(void)
 | |
| {
 | |
| 	int cpu = smp_processor_id();
 | |
| 	/*
 | |
| 	 * If the arch has a polling bit, we maintain an invariant:
 | |
| 	 *
 | |
| 	 * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
 | |
| 	 * rq->idle). This means that, if rq->idle has the polling bit set,
 | |
| 	 * then setting need_resched is guaranteed to cause the CPU to
 | |
| 	 * reschedule.
 | |
| 	 */
 | |
| 
 | |
| 	__current_set_polling();
 | |
| 	tick_nohz_idle_enter();
 | |
| 
 | |
| 	while (!need_resched()) {
 | |
| 		rmb();
 | |
| 
 | |
| 		local_irq_disable();
 | |
| 
 | |
| 		if (cpu_is_offline(cpu)) {
 | |
| 			tick_nohz_idle_stop_tick();
 | |
| 			cpuhp_report_idle_dead();
 | |
| 			arch_cpu_idle_dead();
 | |
| 		}
 | |
| 
 | |
| 		arch_cpu_idle_enter();
 | |
| 
 | |
| 		/*
 | |
| 		 * In poll mode we reenable interrupts and spin. Also if we
 | |
| 		 * detected in the wakeup from idle path that the tick
 | |
| 		 * broadcast device expired for us, we don't want to go deep
 | |
| 		 * idle as we know that the IPI is going to arrive right away.
 | |
| 		 */
 | |
| 		if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
 | |
| 			tick_nohz_idle_restart_tick();
 | |
| 			cpu_idle_poll();
 | |
| 		} else {
 | |
| 			cpuidle_idle_call();
 | |
| 		}
 | |
| 		arch_cpu_idle_exit();
 | |
| 	}
 | |
| 
 | |
| 	/*
 | |
| 	 * Since we fell out of the loop above, we know TIF_NEED_RESCHED must
 | |
| 	 * be set, propagate it into PREEMPT_NEED_RESCHED.
 | |
| 	 *
 | |
| 	 * This is required because for polling idle loops we will not have had
 | |
| 	 * an IPI to fold the state for us.
 | |
| 	 */
 | |
| 	preempt_set_need_resched();
 | |
| 	tick_nohz_idle_exit();
 | |
| 	__current_clr_polling();
 | |
| 
 | |
| 	/*
 | |
| 	 * We promise to call sched_ttwu_pending() and reschedule if
 | |
| 	 * need_resched() is set while polling is set. That means that clearing
 | |
| 	 * polling needs to be visible before doing these things.
 | |
| 	 */
 | |
| 	smp_mb__after_atomic();
 | |
| 
 | |
| 	/*
 | |
| 	 * RCU relies on this call to be done outside of an RCU read-side
 | |
| 	 * critical section.
 | |
| 	 */
 | |
| 	flush_smp_call_function_from_idle();
 | |
| 	schedule_idle();
 | |
| 
 | |
| 	if (unlikely(klp_patch_pending(current)))
 | |
| 		klp_update_patch_state(current);
 | |
| }
 | |
| 
 | |
| bool cpu_in_idle(unsigned long pc)
 | |
| {
 | |
| 	return pc >= (unsigned long)__cpuidle_text_start &&
 | |
| 		pc < (unsigned long)__cpuidle_text_end;
 | |
| }
 | |
| 
 | |
| struct idle_timer {
 | |
| 	struct hrtimer timer;
 | |
| 	int done;
 | |
| };
 | |
| 
 | |
| static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
 | |
| {
 | |
| 	struct idle_timer *it = container_of(timer, struct idle_timer, timer);
 | |
| 
 | |
| 	WRITE_ONCE(it->done, 1);
 | |
| 	set_tsk_need_resched(current);
 | |
| 
 | |
| 	return HRTIMER_NORESTART;
 | |
| }
 | |
| 
 | |
| void play_idle_precise(u64 duration_ns, u64 latency_ns)
 | |
| {
 | |
| 	struct idle_timer it;
 | |
| 
 | |
| 	/*
 | |
| 	 * Only FIFO tasks can disable the tick since they don't need the forced
 | |
| 	 * preemption.
 | |
| 	 */
 | |
| 	WARN_ON_ONCE(current->policy != SCHED_FIFO);
 | |
| 	WARN_ON_ONCE(current->nr_cpus_allowed != 1);
 | |
| 	WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
 | |
| 	WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
 | |
| 	WARN_ON_ONCE(!duration_ns);
 | |
| 	WARN_ON_ONCE(current->mm);
 | |
| 
 | |
| 	rcu_sleep_check();
 | |
| 	preempt_disable();
 | |
| 	current->flags |= PF_IDLE;
 | |
| 	cpuidle_use_deepest_state(latency_ns);
 | |
| 
 | |
| 	it.done = 0;
 | |
| 	hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 | |
| 	it.timer.function = idle_inject_timer_fn;
 | |
| 	hrtimer_start(&it.timer, ns_to_ktime(duration_ns),
 | |
| 		      HRTIMER_MODE_REL_PINNED);
 | |
| 
 | |
| 	while (!READ_ONCE(it.done))
 | |
| 		do_idle();
 | |
| 
 | |
| 	cpuidle_use_deepest_state(0);
 | |
| 	current->flags &= ~PF_IDLE;
 | |
| 
 | |
| 	preempt_fold_need_resched();
 | |
| 	preempt_enable();
 | |
| }
 | |
| EXPORT_SYMBOL_GPL(play_idle_precise);
 | |
| 
 | |
| void cpu_startup_entry(enum cpuhp_state state)
 | |
| {
 | |
| 	arch_cpu_idle_prepare();
 | |
| 	cpuhp_online_idle(state);
 | |
| 	while (1)
 | |
| 		do_idle();
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * idle-task scheduling class.
 | |
|  */
 | |
| 
 | |
| #ifdef CONFIG_SMP
 | |
| static int
 | |
| select_task_rq_idle(struct task_struct *p, int cpu, int flags)
 | |
| {
 | |
| 	return task_cpu(p); /* IDLE tasks as never migrated */
 | |
| }
 | |
| 
 | |
| static int
 | |
| balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 | |
| {
 | |
| 	return WARN_ON_ONCE(1);
 | |
| }
 | |
| #endif
 | |
| 
 | |
| /*
 | |
|  * Idle tasks are unconditionally rescheduled:
 | |
|  */
 | |
| static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
 | |
| {
 | |
| 	resched_curr(rq);
 | |
| }
 | |
| 
 | |
| static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
 | |
| {
 | |
| }
 | |
| 
 | |
| static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
 | |
| {
 | |
| 	update_idle_core(rq);
 | |
| 	schedstat_inc(rq->sched_goidle);
 | |
| }
 | |
| 
 | |
| struct task_struct *pick_next_task_idle(struct rq *rq)
 | |
| {
 | |
| 	struct task_struct *next = rq->idle;
 | |
| 
 | |
| 	set_next_task_idle(rq, next, true);
 | |
| 
 | |
| 	return next;
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * It is not legal to sleep in the idle task - print a warning
 | |
|  * message if some code attempts to do it:
 | |
|  */
 | |
| static void
 | |
| dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
 | |
| {
 | |
| 	raw_spin_unlock_irq(&rq->lock);
 | |
| 	printk(KERN_ERR "bad: scheduling from the idle thread!\n");
 | |
| 	dump_stack();
 | |
| 	raw_spin_lock_irq(&rq->lock);
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * scheduler tick hitting a task of our scheduling class.
 | |
|  *
 | |
|  * NOTE: This function can be called remotely by the tick offload that
 | |
|  * goes along full dynticks. Therefore no local assumption can be made
 | |
|  * and everything must be accessed through the @rq and @curr passed in
 | |
|  * parameters.
 | |
|  */
 | |
| static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
 | |
| {
 | |
| }
 | |
| 
 | |
| static void switched_to_idle(struct rq *rq, struct task_struct *p)
 | |
| {
 | |
| 	BUG();
 | |
| }
 | |
| 
 | |
| static void
 | |
| prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
 | |
| {
 | |
| 	BUG();
 | |
| }
 | |
| 
 | |
| static void update_curr_idle(struct rq *rq)
 | |
| {
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * Simple, special scheduling class for the per-CPU idle tasks:
 | |
|  */
 | |
| DEFINE_SCHED_CLASS(idle) = {
 | |
| 
 | |
| 	/* no enqueue/yield_task for idle tasks */
 | |
| 
 | |
| 	/* dequeue is not valid, we print a debug message there: */
 | |
| 	.dequeue_task		= dequeue_task_idle,
 | |
| 
 | |
| 	.check_preempt_curr	= check_preempt_curr_idle,
 | |
| 
 | |
| 	.pick_next_task		= pick_next_task_idle,
 | |
| 	.put_prev_task		= put_prev_task_idle,
 | |
| 	.set_next_task          = set_next_task_idle,
 | |
| 
 | |
| #ifdef CONFIG_SMP
 | |
| 	.balance		= balance_idle,
 | |
| 	.select_task_rq		= select_task_rq_idle,
 | |
| 	.set_cpus_allowed	= set_cpus_allowed_common,
 | |
| #endif
 | |
| 
 | |
| 	.task_tick		= task_tick_idle,
 | |
| 
 | |
| 	.prio_changed		= prio_changed_idle,
 | |
| 	.switched_to		= switched_to_idle,
 | |
| 	.update_curr		= update_curr_idle,
 | |
| };
 |