forked from mirrors/linux
		
	 3f020399e4
			
		
	
	
		3f020399e4
		
	
	
	
	
		
			
			- Core facilities:
 
     - Add the "Lazy preemption" model (CONFIG_PREEMPT_LAZY=y), which optimizes
       fair-class preemption by delaying preemption requests to the
       tick boundary, while working as full preemption for RR/FIFO/DEADLINE
       classes. (Peter Zijlstra)
 
         - x86: Enable Lazy preemption (Peter Zijlstra)
         - riscv: Enable Lazy preemption (Jisheng Zhang)
 
     - Initialize idle tasks only once (Thomas Gleixner)
 
     - sched/ext: Remove sched_fork() hack (Thomas Gleixner)
 
  - Fair scheduler:
     - Optimize the PLACE_LAG when se->vlag is zero (Huang Shijie)
 
  - Idle loop:
       Optimize the generic idle loop by removing unnecessary
       memory barrier (Zhongqiu Han)
 
  - RSEQ:
     - Improve cache locality of RSEQ concurrency IDs for
       intermittent workloads (Mathieu Desnoyers)
 
  - Waitqueues:
     - Make wake_up_{bit,var} less fragile (Neil Brown)
 
  - PSI:
     - Pass enqueue/dequeue flags to psi callbacks directly (Johannes Weiner)
 
  - Preparatory patches for proxy execution:
     - core: Add move_queued_task_locked helper (Connor O'Brien)
     - core: Consolidate pick_*_task to task_is_pushable helper (Connor O'Brien)
     - core: Split out __schedule() deactivate task logic into a helper (John Stultz)
     - core: Split scheduler and execution contexts (Peter Zijlstra)
     - locking/mutex: Make mutex::wait_lock irq safe (Juri Lelli)
     - locking/mutex: Expose __mutex_owner() (Juri Lelli)
     - locking/mutex: Remove wakeups from under mutex::wait_lock (Peter Zijlstra)
 
  - Misc fixes and cleanups:
     - core: Remove unused __HAVE_THREAD_FUNCTIONS hook support (David Disseldorp)
     - core: Update the comment for TIF_NEED_RESCHED_LAZY (Sebastian Andrzej Siewior)
     - wait: Remove unused bit_wait_io_timeout (Dr. David Alan Gilbert)
     - fair: remove the DOUBLE_TICK feature (Huang Shijie)
     - fair: fix the comment for PREEMPT_SHORT (Huang Shijie)
     - uclamp: Fix unnused variable warning (Christian Loehle)
     - rt: No PREEMPT_RT=y for all{yes,mod}config
 
 Signed-off-by: Ingo Molnar <mingo@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmc7fnQRHG1pbmdvQGtl
 cm5lbC5vcmcACgkQEnMQ0APhK1hZTBAAozVdWA2m51aNa67HvAZta/olmrIagVbW
 inwbTgqa8b+UfeWEuKOfrZr5khjEh6pLgR3dBTib1uH6xxYj/Okds+qbPWSBPVLh
 yzavlm/zJZM1U1XtxE3eyVfqWik4GrY7DoIMDQQr+YH7rNXonJeJkll38OI2E5MC
 q3Q01qyMo8RJJX8qkf3f8ObOoP/51NsVniTw0Zb2fzEhXz8FjezLlxk6cMfgSkJG
 lg9gfIwUZ7Xg5neRo4kJcc3Ht31KYOhWSiupBJzRD1hss/N/AybvMcTX/Cm8d07w
 HIAdDDAn84o46miFo/a0V/hsJZ72idWbqxVJUCtaezrpOUiFkG+uInRvG/ynr0lF
 5dEI9f+6PUw8Nc7L72IyHkobjPqS2IefSaxYYCBKmxMX2qrenfTor/pKiWzzhBIl
 rX3MZSuUJ8NjV4rNGD/qXRM1IsMJrsDwxDyv+sRec3XdH33x286ds6aAUEPDQ6N7
 96VS0sOKcNUJN8776ErNjlIxRl8HTlpkaO3nZlQIfXgTlXUpRvOuKbEWqP+606lo
 oANgJTKgUhgJPWZnvmdRxDjSiOp93QcImjus9i1tN81FGiEDleONsJUxu2Di1E5+
 s1nCiytjq+cdvzCqFyiOZUh+g6kSZ4yXxNgLg2UvbXzX1zOeUQT3WtyKUhMPXhU8
 esh1TgbUbpE=
 =Zcqj
 -----END PGP SIGNATURE-----
Merge tag 'sched-core-2024-11-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
 "Core facilities:
   - Add the "Lazy preemption" model (CONFIG_PREEMPT_LAZY=y), which
     optimizes fair-class preemption by delaying preemption requests to
     the tick boundary, while working as full preemption for
     RR/FIFO/DEADLINE classes. (Peter Zijlstra)
        - x86: Enable Lazy preemption (Peter Zijlstra)
        - riscv: Enable Lazy preemption (Jisheng Zhang)
   - Initialize idle tasks only once (Thomas Gleixner)
   - sched/ext: Remove sched_fork() hack (Thomas Gleixner)
  Fair scheduler:
   - Optimize the PLACE_LAG when se->vlag is zero (Huang Shijie)
  Idle loop:
   - Optimize the generic idle loop by removing unnecessary memory
     barrier (Zhongqiu Han)
  RSEQ:
   - Improve cache locality of RSEQ concurrency IDs for intermittent
     workloads (Mathieu Desnoyers)
  Waitqueues:
   - Make wake_up_{bit,var} less fragile (Neil Brown)
  PSI:
   - Pass enqueue/dequeue flags to psi callbacks directly (Johannes
     Weiner)
  Preparatory patches for proxy execution:
   - Add move_queued_task_locked helper (Connor O'Brien)
   - Consolidate pick_*_task to task_is_pushable helper (Connor O'Brien)
   - Split out __schedule() deactivate task logic into a helper (John
     Stultz)
   - Split scheduler and execution contexts (Peter Zijlstra)
   - Make mutex::wait_lock irq safe (Juri Lelli)
   - Expose __mutex_owner() (Juri Lelli)
   - Remove wakeups from under mutex::wait_lock (Peter Zijlstra)
  Misc fixes and cleanups:
   - Remove unused __HAVE_THREAD_FUNCTIONS hook support (David
     Disseldorp)
   - Update the comment for TIF_NEED_RESCHED_LAZY (Sebastian Andrzej
     Siewior)
   - Remove unused bit_wait_io_timeout (Dr. David Alan Gilbert)
   - remove the DOUBLE_TICK feature (Huang Shijie)
   - fix the comment for PREEMPT_SHORT (Huang Shijie)
   - Fix unnused variable warning (Christian Loehle)
   - No PREEMPT_RT=y for all{yes,mod}config"
* tag 'sched-core-2024-11-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (33 commits)
  sched, x86: Update the comment for TIF_NEED_RESCHED_LAZY.
  sched: No PREEMPT_RT=y for all{yes,mod}config
  riscv: add PREEMPT_LAZY support
  sched, x86: Enable Lazy preemption
  sched: Enable PREEMPT_DYNAMIC for PREEMPT_RT
  sched: Add Lazy preemption model
  sched: Add TIF_NEED_RESCHED_LAZY infrastructure
  sched/ext: Remove sched_fork() hack
  sched: Initialize idle tasks only once
  sched: psi: pass enqueue/dequeue flags to psi callbacks directly
  sched/uclamp: Fix unnused variable warning
  sched: Split scheduler and execution contexts
  sched: Split out __schedule() deactivate task logic into a helper
  sched: Consolidate pick_*_task to task_is_pushable helper
  sched: Add move_queued_task_locked helper
  locking/mutex: Expose __mutex_owner()
  locking/mutex: Make mutex::wait_lock irq safe
  locking/mutex: Remove wakeups from under mutex::wait_lock
  sched: Improve cache locality of RSEQ concurrency IDs for intermittent workloads
  sched: idle: Optimize the generic idle loop by removing needless memory barrier
  ...
		
	
			
		
			
				
	
	
		
			119 lines
		
	
	
	
		
			2.9 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			119 lines
		
	
	
	
		
			2.9 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* SPDX-License-Identifier: GPL-2.0 */
 | |
| #ifndef _LINUX_SCHED_TASK_STACK_H
 | |
| #define _LINUX_SCHED_TASK_STACK_H
 | |
| 
 | |
| /*
 | |
|  * task->stack (kernel stack) handling interfaces:
 | |
|  */
 | |
| 
 | |
| #include <linux/sched.h>
 | |
| #include <linux/magic.h>
 | |
| #include <linux/refcount.h>
 | |
| #include <linux/kasan.h>
 | |
| 
 | |
| #ifdef CONFIG_THREAD_INFO_IN_TASK
 | |
| 
 | |
| /*
 | |
|  * When accessing the stack of a non-current task that might exit, use
 | |
|  * try_get_task_stack() instead.  task_stack_page will return a pointer
 | |
|  * that could get freed out from under you.
 | |
|  */
 | |
| static __always_inline void *task_stack_page(const struct task_struct *task)
 | |
| {
 | |
| 	return task->stack;
 | |
| }
 | |
| 
 | |
| #define setup_thread_stack(new,old)	do { } while(0)
 | |
| 
 | |
| static __always_inline unsigned long *end_of_stack(const struct task_struct *task)
 | |
| {
 | |
| #ifdef CONFIG_STACK_GROWSUP
 | |
| 	return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1;
 | |
| #else
 | |
| 	return task->stack;
 | |
| #endif
 | |
| }
 | |
| 
 | |
| #else
 | |
| 
 | |
| #define task_stack_page(task)	((void *)(task)->stack)
 | |
| 
 | |
| static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
 | |
| {
 | |
| 	*task_thread_info(p) = *task_thread_info(org);
 | |
| 	task_thread_info(p)->task = p;
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * Return the address of the last usable long on the stack.
 | |
|  *
 | |
|  * When the stack grows down, this is just above the thread
 | |
|  * info struct. Going any lower will corrupt the threadinfo.
 | |
|  *
 | |
|  * When the stack grows up, this is the highest address.
 | |
|  * Beyond that position, we corrupt data on the next page.
 | |
|  */
 | |
| static inline unsigned long *end_of_stack(struct task_struct *p)
 | |
| {
 | |
| #ifdef CONFIG_STACK_GROWSUP
 | |
| 	return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
 | |
| #else
 | |
| 	return (unsigned long *)(task_thread_info(p) + 1);
 | |
| #endif
 | |
| }
 | |
| 
 | |
| #endif
 | |
| 
 | |
| #ifdef CONFIG_THREAD_INFO_IN_TASK
 | |
| static inline void *try_get_task_stack(struct task_struct *tsk)
 | |
| {
 | |
| 	return refcount_inc_not_zero(&tsk->stack_refcount) ?
 | |
| 		task_stack_page(tsk) : NULL;
 | |
| }
 | |
| 
 | |
| extern void put_task_stack(struct task_struct *tsk);
 | |
| #else
 | |
| static inline void *try_get_task_stack(struct task_struct *tsk)
 | |
| {
 | |
| 	return task_stack_page(tsk);
 | |
| }
 | |
| 
 | |
| static inline void put_task_stack(struct task_struct *tsk) {}
 | |
| #endif
 | |
| 
 | |
| void exit_task_stack_account(struct task_struct *tsk);
 | |
| 
 | |
| #define task_stack_end_corrupted(task) \
 | |
| 		(*(end_of_stack(task)) != STACK_END_MAGIC)
 | |
| 
 | |
| static inline int object_is_on_stack(const void *obj)
 | |
| {
 | |
| 	void *stack = task_stack_page(current);
 | |
| 
 | |
| 	obj = kasan_reset_tag(obj);
 | |
| 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
 | |
| }
 | |
| 
 | |
| extern void thread_stack_cache_init(void);
 | |
| 
 | |
| #ifdef CONFIG_DEBUG_STACK_USAGE
 | |
| unsigned long stack_not_used(struct task_struct *p);
 | |
| #else
 | |
| static inline unsigned long stack_not_used(struct task_struct *p)
 | |
| {
 | |
| 	return 0;
 | |
| }
 | |
| #endif
 | |
| extern void set_task_stack_end_magic(struct task_struct *tsk);
 | |
| 
 | |
| #ifndef __HAVE_ARCH_KSTACK_END
 | |
| static inline int kstack_end(void *addr)
 | |
| {
 | |
| 	/* Reliable end of stack detection:
 | |
| 	 * Some APM bios versions misalign the stack
 | |
| 	 */
 | |
| 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
 | |
| }
 | |
| #endif
 | |
| 
 | |
| #endif /* _LINUX_SCHED_TASK_STACK_H */
 |