forked from mirrors/linux
		
	fork: Provide usercopy whitelisting for task_struct
While the blocked and saved_sigmask fields of task_struct are copied to
userspace (via sigmask_to_save() and setup_rt_frame()), it is always
copied with a static length (i.e. sizeof(sigset_t)).
The only portion of task_struct that is potentially dynamically sized and
may be copied to userspace is in the architecture-specific thread_struct
at the end of task_struct.
cache object allocation:
    kernel/fork.c:
        alloc_task_struct_node(...):
            return kmem_cache_alloc_node(task_struct_cachep, ...);
        dup_task_struct(...):
            ...
            tsk = alloc_task_struct_node(node);
        copy_process(...):
            ...
            dup_task_struct(...)
        _do_fork(...):
            ...
            copy_process(...)
example usage trace:
    arch/x86/kernel/fpu/signal.c:
        __fpu__restore_sig(...):
            ...
            struct task_struct *tsk = current;
            struct fpu *fpu = &tsk->thread.fpu;
            ...
            __copy_from_user(&fpu->state.xsave, ..., state_size);
        fpu__restore_sig(...):
            ...
            return __fpu__restore_sig(...);
    arch/x86/kernel/signal.c:
        restore_sigcontext(...):
            ...
            fpu__restore_sig(...)
This introduces arch_thread_struct_whitelist() to let an architecture
declare specifically where the whitelist should be within thread_struct.
If undefined, the entire thread_struct field is left whitelisted.
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Laura Abbott <labbott@redhat.com>
Cc: "Mickaël Salaün" <mic@digikod.net>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Kees Cook <keescook@chromium.org>
Acked-by: Rik van Riel <riel@redhat.com>
			
			
This commit is contained in:
		
							parent
							
								
									f9d29946c5
								
							
						
					
					
						commit
						5905429ad8
					
				
					 3 changed files with 45 additions and 2 deletions
				
			
		
							
								
								
									
										11
									
								
								arch/Kconfig
									
									
									
									
									
								
							
							
						
						
									
										11
									
								
								arch/Kconfig
									
									
									
									
									
								
							|  | @ -242,6 +242,17 @@ config ARCH_INIT_TASK | |||
| config ARCH_TASK_STRUCT_ALLOCATOR | ||||
| 	bool | ||||
| 
 | ||||
| config HAVE_ARCH_THREAD_STRUCT_WHITELIST | ||||
| 	bool | ||||
| 	depends on !ARCH_TASK_STRUCT_ALLOCATOR | ||||
| 	help | ||||
| 	  An architecture should select this to provide hardened usercopy | ||||
| 	  knowledge about what region of the thread_struct should be | ||||
| 	  whitelisted for copying to userspace. Normally this is only the | ||||
| 	  FPU registers. Specifically, arch_thread_struct_whitelist() | ||||
| 	  should be implemented. Without this, the entire thread_struct | ||||
| 	  field in task_struct will be left whitelisted. | ||||
| 
 | ||||
| # Select if arch has its private alloc_thread_stack() function | ||||
| config ARCH_THREAD_STACK_ALLOCATOR | ||||
| 	bool | ||||
|  |  | |||
|  | @ -104,6 +104,20 @@ extern int arch_task_struct_size __read_mostly; | |||
| # define arch_task_struct_size (sizeof(struct task_struct)) | ||||
| #endif | ||||
| 
 | ||||
| #ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST | ||||
| /*
 | ||||
|  * If an architecture has not declared a thread_struct whitelist we | ||||
|  * must assume something there may need to be copied to userspace. | ||||
|  */ | ||||
| static inline void arch_thread_struct_whitelist(unsigned long *offset, | ||||
| 						unsigned long *size) | ||||
| { | ||||
| 	*offset = 0; | ||||
| 	/* Handle dynamically sized thread_struct. */ | ||||
| 	*size = arch_task_struct_size - offsetof(struct task_struct, thread); | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
| #ifdef CONFIG_VMAP_STACK | ||||
| static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) | ||||
| { | ||||
|  |  | |||
|  | @ -458,6 +458,21 @@ static void set_max_threads(unsigned int max_threads_suggested) | |||
| int arch_task_struct_size __read_mostly; | ||||
| #endif | ||||
| 
 | ||||
| static void task_struct_whitelist(unsigned long *offset, unsigned long *size) | ||||
| { | ||||
| 	/* Fetch thread_struct whitelist for the architecture. */ | ||||
| 	arch_thread_struct_whitelist(offset, size); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Handle zero-sized whitelist or empty thread_struct, otherwise | ||||
| 	 * adjust offset to position of thread_struct in task_struct. | ||||
| 	 */ | ||||
| 	if (unlikely(*size == 0)) | ||||
| 		*offset = 0; | ||||
| 	else | ||||
| 		*offset += offsetof(struct task_struct, thread); | ||||
| } | ||||
| 
 | ||||
| void __init fork_init(void) | ||||
| { | ||||
| 	int i; | ||||
|  | @ -466,11 +481,14 @@ void __init fork_init(void) | |||
| #define ARCH_MIN_TASKALIGN	0 | ||||
| #endif | ||||
| 	int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN); | ||||
| 	unsigned long useroffset, usersize; | ||||
| 
 | ||||
| 	/* create a slab on which task_structs can be allocated */ | ||||
| 	task_struct_cachep = kmem_cache_create("task_struct", | ||||
| 	task_struct_whitelist(&useroffset, &usersize); | ||||
| 	task_struct_cachep = kmem_cache_create_usercopy("task_struct", | ||||
| 			arch_task_struct_size, align, | ||||
| 			SLAB_PANIC|SLAB_ACCOUNT, NULL); | ||||
| 			SLAB_PANIC|SLAB_ACCOUNT, | ||||
| 			useroffset, usersize, NULL); | ||||
| #endif | ||||
| 
 | ||||
| 	/* do the arch specific task caches init */ | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Kees Cook
						Kees Cook