forked from mirrors/linux
		
	sched: Add wrapper for get_wchan() to keep task blocked
Having a stable wchan means the process must be blocked and for it to stay that way while performing stack unwinding. Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Acked-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk> [arm] Tested-by: Mark Rutland <mark.rutland@arm.com> [arm64] Link: https://lkml.kernel.org/r/20211008111626.332092234@infradead.org
This commit is contained in:
		
							parent
							
								
									bc9bbb8173
								
							
						
					
					
						commit
						42a20f86dc
					
				
					 50 changed files with 80 additions and 112 deletions
				
			
		|  | @ -42,7 +42,7 @@ extern void start_thread(struct pt_regs *, unsigned long, unsigned long); | |||
| struct task_struct; | ||||
| extern void release_thread(struct task_struct *); | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p); | ||||
| unsigned long __get_wchan(struct task_struct *p); | ||||
| 
 | ||||
| #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) | ||||
| 
 | ||||
|  |  | |||
|  | @ -376,12 +376,11 @@ thread_saved_pc(struct task_struct *t) | |||
| } | ||||
| 
 | ||||
| unsigned long | ||||
| get_wchan(struct task_struct *p) | ||||
| __get_wchan(struct task_struct *p) | ||||
| { | ||||
| 	unsigned long schedule_frame; | ||||
| 	unsigned long pc; | ||||
| 	if (!p || p == current || task_is_running(p)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * This one depends on the frame size of schedule().  Do a | ||||
| 	 * "disass schedule" in gdb to find the frame size.  Also, the | ||||
|  |  | |||
|  | @ -70,7 +70,7 @@ struct task_struct; | |||
| extern void start_thread(struct pt_regs * regs, unsigned long pc, | ||||
| 			 unsigned long usp); | ||||
| 
 | ||||
| extern unsigned int get_wchan(struct task_struct *p); | ||||
| extern unsigned int __get_wchan(struct task_struct *p); | ||||
| 
 | ||||
| #endif /* !__ASSEMBLY__ */ | ||||
| 
 | ||||
|  |  | |||
|  | @ -15,7 +15,7 @@ | |||
|  *      = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc) | ||||
|  * | ||||
|  *  vineetg: March 2009 | ||||
|  *  -Implemented correct versions of thread_saved_pc() and get_wchan() | ||||
|  *  -Implemented correct versions of thread_saved_pc() and __get_wchan() | ||||
|  * | ||||
|  *  rajeshwarr: 2008 | ||||
|  *  -Initial implementation | ||||
|  | @ -248,7 +248,7 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) | |||
|  * Of course just returning schedule( ) would be pointless so unwind until | ||||
|  * the function is not in schedular code | ||||
|  */ | ||||
| unsigned int get_wchan(struct task_struct *tsk) | ||||
| unsigned int __get_wchan(struct task_struct *tsk) | ||||
| { | ||||
| 	return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL); | ||||
| } | ||||
|  |  | |||
|  | @ -84,7 +84,7 @@ struct task_struct; | |||
| /* Free all resources held by a thread. */ | ||||
| extern void release_thread(struct task_struct *); | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p); | ||||
| unsigned long __get_wchan(struct task_struct *p); | ||||
| 
 | ||||
| #define task_pt_regs(p) \ | ||||
| 	((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) | ||||
|  |  | |||
|  | @ -276,13 +276,11 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, | |||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p) | ||||
| unsigned long __get_wchan(struct task_struct *p) | ||||
| { | ||||
| 	struct stackframe frame; | ||||
| 	unsigned long stack_page; | ||||
| 	int count = 0; | ||||
| 	if (!p || p == current || task_is_running(p)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	frame.fp = thread_saved_fp(p); | ||||
| 	frame.sp = thread_saved_sp(p); | ||||
|  |  | |||
|  | @ -257,7 +257,7 @@ struct task_struct; | |||
| /* Free all resources held by a thread. */ | ||||
| extern void release_thread(struct task_struct *); | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p); | ||||
| unsigned long __get_wchan(struct task_struct *p); | ||||
| 
 | ||||
| void update_sctlr_el1(u64 sctlr); | ||||
| 
 | ||||
|  |  | |||
|  | @ -528,13 +528,11 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, | |||
| 	return last; | ||||
| } | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p) | ||||
| unsigned long __get_wchan(struct task_struct *p) | ||||
| { | ||||
| 	struct stackframe frame; | ||||
| 	unsigned long stack_page, ret = 0; | ||||
| 	int count = 0; | ||||
| 	if (!p || p == current || task_is_running(p)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	stack_page = (unsigned long)try_get_task_stack(p); | ||||
| 	if (!stack_page) | ||||
|  |  | |||
|  | @ -81,7 +81,7 @@ static inline void release_thread(struct task_struct *dead_task) | |||
| 
 | ||||
| extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p); | ||||
| unsigned long __get_wchan(struct task_struct *p); | ||||
| 
 | ||||
| #define KSTK_EIP(tsk)		(task_pt_regs(tsk)->pc) | ||||
| #define KSTK_ESP(tsk)		(task_pt_regs(tsk)->usp) | ||||
|  |  | |||
|  | @ -111,12 +111,11 @@ static bool save_wchan(unsigned long pc, void *arg) | |||
| 	return false; | ||||
| } | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *task) | ||||
| unsigned long __get_wchan(struct task_struct *task) | ||||
| { | ||||
| 	unsigned long pc = 0; | ||||
| 
 | ||||
| 	if (likely(task && task != current && !task_is_running(task))) | ||||
| 		walk_stackframe(task, NULL, save_wchan, &pc); | ||||
| 	walk_stackframe(task, NULL, save_wchan, &pc); | ||||
| 	return pc; | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -105,7 +105,7 @@ static inline void release_thread(struct task_struct *dead_task) | |||
| { | ||||
| } | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p); | ||||
| unsigned long __get_wchan(struct task_struct *p); | ||||
| 
 | ||||
| #define	KSTK_EIP(tsk)	\ | ||||
| 	({			 \ | ||||
|  |  | |||
|  | @ -128,15 +128,12 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p) | ||||
| unsigned long __get_wchan(struct task_struct *p) | ||||
| { | ||||
| 	unsigned long fp, pc; | ||||
| 	unsigned long stack_page; | ||||
| 	int count = 0; | ||||
| 
 | ||||
| 	if (!p || p == current || task_is_running(p)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	stack_page = (unsigned long)p; | ||||
| 	fp = ((struct pt_regs *)p->thread.ksp)->er6; | ||||
| 	do { | ||||
|  |  | |||
|  | @ -64,7 +64,7 @@ struct thread_struct { | |||
| extern void release_thread(struct task_struct *dead_task); | ||||
| 
 | ||||
| /* Get wait channel for task P.  */ | ||||
| extern unsigned long get_wchan(struct task_struct *p); | ||||
| extern unsigned long __get_wchan(struct task_struct *p); | ||||
| 
 | ||||
| /*  The following stuff is pretty HEXAGON specific.  */ | ||||
| 
 | ||||
|  |  | |||
|  | @ -130,13 +130,11 @@ void flush_thread(void) | |||
|  * is an identification of the point at which the scheduler | ||||
|  * was invoked by a blocked thread. | ||||
|  */ | ||||
| unsigned long get_wchan(struct task_struct *p) | ||||
| unsigned long __get_wchan(struct task_struct *p) | ||||
| { | ||||
| 	unsigned long fp, pc; | ||||
| 	unsigned long stack_page; | ||||
| 	int count = 0; | ||||
| 	if (!p || p == current || task_is_running(p)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	stack_page = (unsigned long)task_stack_page(p); | ||||
| 	fp = ((struct hexagon_switch_stack *)p->thread.switch_sp)->fp; | ||||
|  |  | |||
|  | @ -330,7 +330,7 @@ struct task_struct; | |||
| #define release_thread(dead_task) | ||||
| 
 | ||||
| /* Get wait channel for task P.  */ | ||||
| extern unsigned long get_wchan (struct task_struct *p); | ||||
| extern unsigned long __get_wchan (struct task_struct *p); | ||||
| 
 | ||||
| /* Return instruction pointer of blocked task TSK.  */ | ||||
| #define KSTK_EIP(tsk)					\ | ||||
|  |  | |||
|  | @ -523,15 +523,12 @@ exit_thread (struct task_struct *tsk) | |||
| } | ||||
| 
 | ||||
| unsigned long | ||||
| get_wchan (struct task_struct *p) | ||||
| __get_wchan (struct task_struct *p) | ||||
| { | ||||
| 	struct unw_frame_info info; | ||||
| 	unsigned long ip; | ||||
| 	int count = 0; | ||||
| 
 | ||||
| 	if (!p || p == current || task_is_running(p)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Note: p may not be a blocked task (it could be current or | ||||
| 	 * another process running on some other CPU.  Rather than | ||||
|  |  | |||
|  | @ -150,7 +150,7 @@ static inline void release_thread(struct task_struct *dead_task) | |||
| { | ||||
| } | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p); | ||||
| unsigned long __get_wchan(struct task_struct *p); | ||||
| 
 | ||||
| #define	KSTK_EIP(tsk)	\ | ||||
|     ({			\ | ||||
|  |  | |||
|  | @ -263,13 +263,11 @@ int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu) | |||
| } | ||||
| EXPORT_SYMBOL(dump_fpu); | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p) | ||||
| unsigned long __get_wchan(struct task_struct *p) | ||||
| { | ||||
| 	unsigned long fp, pc; | ||||
| 	unsigned long stack_page; | ||||
| 	int count = 0; | ||||
| 	if (!p || p == current || task_is_running(p)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	stack_page = (unsigned long)task_stack_page(p); | ||||
| 	fp = ((struct switch_stack *)p->thread.ksp)->a6; | ||||
|  |  | |||
|  | @ -68,7 +68,7 @@ static inline void release_thread(struct task_struct *dead_task) | |||
| { | ||||
| } | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p); | ||||
| unsigned long __get_wchan(struct task_struct *p); | ||||
| 
 | ||||
| /* The size allocated for kernel stacks. This _must_ be a power of two! */ | ||||
| # define KERNEL_STACK_SIZE	0x2000 | ||||
|  |  | |||
|  | @ -112,7 +112,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, | |||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p) | ||||
| unsigned long __get_wchan(struct task_struct *p) | ||||
| { | ||||
| /* TBD (used by procfs) */ | ||||
| 	return 0; | ||||
|  |  | |||
|  | @ -369,7 +369,7 @@ static inline void flush_thread(void) | |||
| { | ||||
| } | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p); | ||||
| unsigned long __get_wchan(struct task_struct *p); | ||||
| 
 | ||||
| #define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \ | ||||
| 			 THREAD_SIZE - 32 - sizeof(struct pt_regs)) | ||||
|  |  | |||
|  | @ -511,7 +511,7 @@ static int __init frame_info_init(void) | |||
| 
 | ||||
| 	/*
 | ||||
| 	 * Without schedule() frame info, result given by | ||||
| 	 * thread_saved_pc() and get_wchan() are not reliable. | ||||
| 	 * thread_saved_pc() and __get_wchan() are not reliable. | ||||
| 	 */ | ||||
| 	if (schedule_mfi.pc_offset < 0) | ||||
| 		printk("Can't analyze schedule() prologue at %p\n", schedule); | ||||
|  | @ -652,9 +652,9 @@ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, | |||
| #endif | ||||
| 
 | ||||
| /*
 | ||||
|  * get_wchan - a maintenance nightmare^W^Wpain in the ass ... | ||||
|  * __get_wchan - a maintenance nightmare^W^Wpain in the ass ... | ||||
|  */ | ||||
| unsigned long get_wchan(struct task_struct *task) | ||||
| unsigned long __get_wchan(struct task_struct *task) | ||||
| { | ||||
| 	unsigned long pc = 0; | ||||
| #ifdef CONFIG_KALLSYMS | ||||
|  | @ -662,8 +662,6 @@ unsigned long get_wchan(struct task_struct *task) | |||
| 	unsigned long ra = 0; | ||||
| #endif | ||||
| 
 | ||||
| 	if (!task || task == current || task_is_running(task)) | ||||
| 		goto out; | ||||
| 	if (!task_stack_page(task)) | ||||
| 		goto out; | ||||
| 
 | ||||
|  |  | |||
|  | @ -83,7 +83,7 @@ extern struct task_struct *last_task_used_math; | |||
| /* Prepare to copy thread state - unlazy all lazy status */ | ||||
| #define prepare_to_copy(tsk)	do { } while (0) | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p); | ||||
| unsigned long __get_wchan(struct task_struct *p); | ||||
| 
 | ||||
| #define cpu_relax()			barrier() | ||||
| 
 | ||||
|  |  | |||
|  | @ -233,15 +233,12 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpu) | |||
| 
 | ||||
| EXPORT_SYMBOL(dump_fpu); | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p) | ||||
| unsigned long __get_wchan(struct task_struct *p) | ||||
| { | ||||
| 	unsigned long fp, lr; | ||||
| 	unsigned long stack_start, stack_end; | ||||
| 	int count = 0; | ||||
| 
 | ||||
| 	if (!p || p == current || task_is_running(p)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	if (IS_ENABLED(CONFIG_FRAME_POINTER)) { | ||||
| 		stack_start = (unsigned long)end_of_stack(p); | ||||
| 		stack_end = (unsigned long)task_stack_page(p) + THREAD_SIZE; | ||||
|  | @ -258,5 +255,3 @@ unsigned long get_wchan(struct task_struct *p) | |||
| 	} | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(get_wchan); | ||||
|  |  | |||
|  | @ -69,7 +69,7 @@ static inline void release_thread(struct task_struct *dead_task) | |||
| { | ||||
| } | ||||
| 
 | ||||
| extern unsigned long get_wchan(struct task_struct *p); | ||||
| extern unsigned long __get_wchan(struct task_struct *p); | ||||
| 
 | ||||
| #define task_pt_regs(p) \ | ||||
| 	((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1) | ||||
|  |  | |||
|  | @ -217,15 +217,12 @@ void dump(struct pt_regs *fp) | |||
| 	pr_emerg("\n\n"); | ||||
| } | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p) | ||||
| unsigned long __get_wchan(struct task_struct *p) | ||||
| { | ||||
| 	unsigned long fp, pc; | ||||
| 	unsigned long stack_page; | ||||
| 	int count = 0; | ||||
| 
 | ||||
| 	if (!p || p == current || task_is_running(p)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	stack_page = (unsigned long)p; | ||||
| 	fp = ((struct switch_stack *)p->thread.ksp)->fp;	/* ;dgt2 */ | ||||
| 	do { | ||||
|  |  | |||
|  | @ -73,7 +73,7 @@ struct thread_struct { | |||
| 
 | ||||
| void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp); | ||||
| void release_thread(struct task_struct *); | ||||
| unsigned long get_wchan(struct task_struct *p); | ||||
| unsigned long __get_wchan(struct task_struct *p); | ||||
| 
 | ||||
| #define cpu_relax()     barrier() | ||||
| 
 | ||||
|  |  | |||
|  | @ -263,7 +263,7 @@ void dump_elf_thread(elf_greg_t *dest, struct pt_regs* regs) | |||
| 	dest[35] = 0; | ||||
| } | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p) | ||||
| unsigned long __get_wchan(struct task_struct *p) | ||||
| { | ||||
| 	/* TODO */ | ||||
| 
 | ||||
|  |  | |||
|  | @ -273,7 +273,7 @@ struct mm_struct; | |||
| /* Free all resources held by a thread. */ | ||||
| extern void release_thread(struct task_struct *); | ||||
| 
 | ||||
| extern unsigned long get_wchan(struct task_struct *p); | ||||
| extern unsigned long __get_wchan(struct task_struct *p); | ||||
| 
 | ||||
| #define KSTK_EIP(tsk)	((tsk)->thread.regs.iaoq[0]) | ||||
| #define KSTK_ESP(tsk)	((tsk)->thread.regs.gr[30]) | ||||
|  |  | |||
|  | @ -240,15 +240,12 @@ copy_thread(unsigned long clone_flags, unsigned long usp, | |||
| } | ||||
| 
 | ||||
| unsigned long | ||||
| get_wchan(struct task_struct *p) | ||||
| __get_wchan(struct task_struct *p) | ||||
| { | ||||
| 	struct unwind_frame_info info; | ||||
| 	unsigned long ip; | ||||
| 	int count = 0; | ||||
| 
 | ||||
| 	if (!p || p == current || task_is_running(p)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * These bracket the sleeping functions.. | ||||
| 	 */ | ||||
|  |  | |||
|  | @ -300,7 +300,7 @@ struct thread_struct { | |||
| 
 | ||||
| #define task_pt_regs(tsk)	((tsk)->thread.regs) | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p); | ||||
| unsigned long __get_wchan(struct task_struct *p); | ||||
| 
 | ||||
| #define KSTK_EIP(tsk)  ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) | ||||
| #define KSTK_ESP(tsk)  ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0) | ||||
|  |  | |||
|  | @ -2111,14 +2111,11 @@ int validate_sp(unsigned long sp, struct task_struct *p, | |||
| 
 | ||||
| EXPORT_SYMBOL(validate_sp); | ||||
| 
 | ||||
| static unsigned long __get_wchan(struct task_struct *p) | ||||
| static unsigned long ___get_wchan(struct task_struct *p) | ||||
| { | ||||
| 	unsigned long ip, sp; | ||||
| 	int count = 0; | ||||
| 
 | ||||
| 	if (!p || p == current || task_is_running(p)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	sp = p->thread.ksp; | ||||
| 	if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) | ||||
| 		return 0; | ||||
|  | @ -2137,14 +2134,14 @@ static unsigned long __get_wchan(struct task_struct *p) | |||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p) | ||||
| unsigned long __get_wchan(struct task_struct *p) | ||||
| { | ||||
| 	unsigned long ret; | ||||
| 
 | ||||
| 	if (!try_get_task_stack(p)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	ret = __get_wchan(p); | ||||
| 	ret = ___get_wchan(p); | ||||
| 
 | ||||
| 	put_task_stack(p); | ||||
| 
 | ||||
|  |  | |||
|  | @ -66,7 +66,7 @@ static inline void release_thread(struct task_struct *dead_task) | |||
| { | ||||
| } | ||||
| 
 | ||||
| extern unsigned long get_wchan(struct task_struct *p); | ||||
| extern unsigned long __get_wchan(struct task_struct *p); | ||||
| 
 | ||||
| 
 | ||||
| static inline void wait_for_interrupt(void) | ||||
|  |  | |||
|  | @ -128,16 +128,14 @@ static bool save_wchan(void *arg, unsigned long pc) | |||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *task) | ||||
| unsigned long __get_wchan(struct task_struct *task) | ||||
| { | ||||
| 	unsigned long pc = 0; | ||||
| 
 | ||||
| 	if (likely(task && task != current && !task_is_running(task))) { | ||||
| 		if (!try_get_task_stack(task)) | ||||
| 			return 0; | ||||
| 		walk_stackframe(task, NULL, save_wchan, &pc); | ||||
| 		put_task_stack(task); | ||||
| 	} | ||||
| 	if (!try_get_task_stack(task)) | ||||
| 		return 0; | ||||
| 	walk_stackframe(task, NULL, save_wchan, &pc); | ||||
| 	put_task_stack(task); | ||||
| 	return pc; | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -192,7 +192,7 @@ static inline void release_thread(struct task_struct *tsk) { } | |||
| void guarded_storage_release(struct task_struct *tsk); | ||||
| void gs_load_bc_cb(struct pt_regs *regs); | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p); | ||||
| unsigned long __get_wchan(struct task_struct *p); | ||||
| #define task_pt_regs(tsk) ((struct pt_regs *) \ | ||||
|         (task_stack_page(tsk) + THREAD_SIZE) - 1) | ||||
| #define KSTK_EIP(tsk)	(task_pt_regs(tsk)->psw.addr) | ||||
|  |  | |||
|  | @ -181,12 +181,12 @@ void execve_tail(void) | |||
| 	asm volatile("sfpc %0" : : "d" (0)); | ||||
| } | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p) | ||||
| unsigned long __get_wchan(struct task_struct *p) | ||||
| { | ||||
| 	struct unwind_state state; | ||||
| 	unsigned long ip = 0; | ||||
| 
 | ||||
| 	if (!p || p == current || task_is_running(p) || !task_stack_page(p)) | ||||
| 	if (!task_stack_page(p)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	if (!try_get_task_stack(p)) | ||||
|  |  | |||
|  | @ -180,7 +180,7 @@ static inline void show_code(struct pt_regs *regs) | |||
| } | ||||
| #endif | ||||
| 
 | ||||
| extern unsigned long get_wchan(struct task_struct *p); | ||||
| extern unsigned long __get_wchan(struct task_struct *p); | ||||
| 
 | ||||
| #define KSTK_EIP(tsk)  (task_pt_regs(tsk)->pc) | ||||
| #define KSTK_ESP(tsk)  (task_pt_regs(tsk)->regs[15]) | ||||
|  |  | |||
|  | @ -182,13 +182,10 @@ __switch_to(struct task_struct *prev, struct task_struct *next) | |||
| 	return prev; | ||||
| } | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p) | ||||
| unsigned long __get_wchan(struct task_struct *p) | ||||
| { | ||||
| 	unsigned long pc; | ||||
| 
 | ||||
| 	if (!p || p == current || task_is_running(p)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * The same comment as on the Alpha applies here, too ... | ||||
| 	 */ | ||||
|  |  | |||
|  | @ -89,7 +89,7 @@ static inline void start_thread(struct pt_regs * regs, unsigned long pc, | |||
| /* Free all resources held by a thread. */ | ||||
| #define release_thread(tsk)		do { } while(0) | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *); | ||||
| unsigned long __get_wchan(struct task_struct *); | ||||
| 
 | ||||
| #define task_pt_regs(tsk) ((tsk)->thread.kregs) | ||||
| #define KSTK_EIP(tsk)  ((tsk)->thread.kregs->pc) | ||||
|  |  | |||
|  | @ -183,7 +183,7 @@ do { \ | |||
| /* Free all resources held by a thread. */ | ||||
| #define release_thread(tsk)		do { } while (0) | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *task); | ||||
| unsigned long __get_wchan(struct task_struct *task); | ||||
| 
 | ||||
| #define task_pt_regs(tsk) (task_thread_info(tsk)->kregs) | ||||
| #define KSTK_EIP(tsk)  (task_pt_regs(tsk)->tpc) | ||||
|  |  | |||
|  | @ -365,7 +365,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, | |||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *task) | ||||
| unsigned long __get_wchan(struct task_struct *task) | ||||
| { | ||||
| 	unsigned long pc, fp, bias = 0; | ||||
| 	unsigned long task_base = (unsigned long) task; | ||||
|  | @ -373,9 +373,6 @@ unsigned long get_wchan(struct task_struct *task) | |||
| 	struct reg_window32 *rw; | ||||
| 	int count = 0; | ||||
| 
 | ||||
| 	if (!task || task == current || task_is_running(task)) | ||||
| 		goto out; | ||||
| 
 | ||||
| 	fp = task_thread_info(task)->ksp + bias; | ||||
| 	do { | ||||
| 		/* Bogus frame pointer? */ | ||||
|  |  | |||
|  | @ -663,7 +663,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | |||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *task) | ||||
| unsigned long __get_wchan(struct task_struct *task) | ||||
| { | ||||
| 	unsigned long pc, fp, bias = 0; | ||||
| 	struct thread_info *tp; | ||||
|  | @ -671,9 +671,6 @@ unsigned long get_wchan(struct task_struct *task) | |||
|         unsigned long ret = 0; | ||||
| 	int count = 0;  | ||||
| 
 | ||||
| 	if (!task || task == current || task_is_running(task)) | ||||
| 		goto out; | ||||
| 
 | ||||
| 	tp = task_thread_info(task); | ||||
| 	bias = STACK_BIAS; | ||||
| 	fp = task_thread_info(task)->ksp + bias; | ||||
|  |  | |||
|  | @ -106,6 +106,6 @@ extern struct cpuinfo_um boot_cpu_data; | |||
| #define cache_line_size()	(boot_cpu_data.cache_alignment) | ||||
| 
 | ||||
| #define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf) | ||||
| extern unsigned long get_wchan(struct task_struct *p); | ||||
| extern unsigned long __get_wchan(struct task_struct *p); | ||||
| 
 | ||||
| #endif | ||||
|  |  | |||
|  | @ -364,14 +364,11 @@ unsigned long arch_align_stack(unsigned long sp) | |||
| } | ||||
| #endif | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p) | ||||
| unsigned long __get_wchan(struct task_struct *p) | ||||
| { | ||||
| 	unsigned long stack_page, sp, ip; | ||||
| 	bool seen_sched = 0; | ||||
| 
 | ||||
| 	if ((p == NULL) || (p == current) || task_is_running(p)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	stack_page = (unsigned long) task_stack_page(p); | ||||
| 	/* Bail if the process has no kernel stack for some reason */ | ||||
| 	if (stack_page == 0) | ||||
|  |  | |||
|  | @ -589,7 +589,7 @@ static inline void load_sp0(unsigned long sp0) | |||
| /* Free all resources held by a thread. */ | ||||
| extern void release_thread(struct task_struct *); | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p); | ||||
| unsigned long __get_wchan(struct task_struct *p); | ||||
| 
 | ||||
| /*
 | ||||
|  * Generic CPUID function | ||||
|  |  | |||
|  | @ -942,13 +942,10 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) | |||
|  * because the task might wake up and we might look at a stack | ||||
|  * changing under us. | ||||
|  */ | ||||
| unsigned long get_wchan(struct task_struct *p) | ||||
| unsigned long __get_wchan(struct task_struct *p) | ||||
| { | ||||
| 	unsigned long entry = 0; | ||||
| 
 | ||||
| 	if (p == current || task_is_running(p)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	stack_trace_save_tsk(p, &entry, 1, 0); | ||||
| 	return entry; | ||||
| } | ||||
|  |  | |||
|  | @ -215,7 +215,7 @@ struct mm_struct; | |||
| /* Free all resources held by a thread. */ | ||||
| #define release_thread(thread) do { } while(0) | ||||
| 
 | ||||
| extern unsigned long get_wchan(struct task_struct *p); | ||||
| extern unsigned long __get_wchan(struct task_struct *p); | ||||
| 
 | ||||
| #define KSTK_EIP(tsk)		(task_pt_regs(tsk)->pc) | ||||
| #define KSTK_ESP(tsk)		(task_pt_regs(tsk)->areg[1]) | ||||
|  |  | |||
|  | @ -298,15 +298,12 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, | |||
|  * These bracket the sleeping functions.. | ||||
|  */ | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p) | ||||
| unsigned long __get_wchan(struct task_struct *p) | ||||
| { | ||||
| 	unsigned long sp, pc; | ||||
| 	unsigned long stack_page = (unsigned long) task_stack_page(p); | ||||
| 	int count = 0; | ||||
| 
 | ||||
| 	if (!p || p == current || task_is_running(p)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	sp = p->thread.sp; | ||||
| 	pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp); | ||||
| 
 | ||||
|  |  | |||
|  | @ -2139,6 +2139,7 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) | |||
| #endif /* CONFIG_SMP */ | ||||
| 
 | ||||
| extern bool sched_task_on_rq(struct task_struct *p); | ||||
| extern unsigned long get_wchan(struct task_struct *p); | ||||
| 
 | ||||
| /*
 | ||||
|  * In order to reduce various lock holder preemption latencies provide an | ||||
|  |  | |||
|  | @ -1966,6 +1966,25 @@ bool sched_task_on_rq(struct task_struct *p) | |||
| 	return task_on_rq_queued(p); | ||||
| } | ||||
| 
 | ||||
| unsigned long get_wchan(struct task_struct *p) | ||||
| { | ||||
| 	unsigned long ip = 0; | ||||
| 	unsigned int state; | ||||
| 
 | ||||
| 	if (!p || p == current) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	/* Only get wchan if task is blocked and we can keep it that way. */ | ||||
| 	raw_spin_lock_irq(&p->pi_lock); | ||||
| 	state = READ_ONCE(p->__state); | ||||
| 	smp_rmb(); /* see try_to_wake_up() */ | ||||
| 	if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq) | ||||
| 		ip = __get_wchan(p); | ||||
| 	raw_spin_unlock_irq(&p->pi_lock); | ||||
| 
 | ||||
| 	return ip; | ||||
| } | ||||
| 
 | ||||
| static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) | ||||
| { | ||||
| 	if (!(flags & ENQUEUE_NOCLOCK)) | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Kees Cook
						Kees Cook