mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	x86: support user address masking instead of non-speculative conditional
The Spectre-v1 mitigations made "access_ok()" much more expensive, since
it has to serialize execution with the test for a valid user address.
All the normal user copy routines avoid this by just masking the user
address with a data-dependent mask instead, but the fast
"unsafe_user_read()" kind of patterms that were supposed to be a fast
case got slowed down.
This introduces a notion of using
	src = masked_user_access_begin(src);
to do the user address sanity using a data-dependent mask instead of the
more traditional conditional
	if (user_read_access_begin(src, len)) {
model.
This model only works for dense accesses that start at 'src' and on
architectures that have a guard region that is guaranteed to fault in
between the user space and the kernel space area.
With this, the user access doesn't need to be manually checked, because
a bad address is guaranteed to fault (by some architecture masking
trick: on x86-64 this involves just turning an invalid user address into
all ones, since we don't map the top of address space).
This only converts a couple of examples for now.  Example x86-64 code
generation for loading two words from user space:
        stac
        mov    %rax,%rcx
        sar    $0x3f,%rcx
        or     %rax,%rcx
        mov    (%rcx),%r13
        mov    0x8(%rcx),%r14
        clac
where all the error handling and -EFAULT is now purely handled out of
line by the exception path.
Of course, if the micro-architecture does badly at 'clac' and 'stac',
the above is still pitifully slow.  But at least we did as well as we
could.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
			
			
This commit is contained in:
		
							parent
							
								
									0c38364824
								
							
						
					
					
						commit
						2865baf540
					
				
					 5 changed files with 36 additions and 1 deletions
				
			
		| 
						 | 
					@ -53,6 +53,14 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
#define valid_user_address(x) ((__force long)(x) >= 0)
 | 
					#define valid_user_address(x) ((__force long)(x) >= 0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Masking the user address is an alternative to a conditional
 | 
				
			||||||
 | 
					 * user_access_begin that can avoid the fencing. This only works
 | 
				
			||||||
 | 
					 * for dense accesses starting at the address.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					#define mask_user_address(x) ((typeof(x))((long)(x)|((long)(x)>>63)))
 | 
				
			||||||
 | 
					#define masked_user_access_begin(x) ({ __uaccess_begin(); mask_user_address(x); })
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * User pointers can have tag bits on x86-64.  This scheme tolerates
 | 
					 * User pointers can have tag bits on x86-64.  This scheme tolerates
 | 
				
			||||||
 * arbitrary values in those bits rather then masking them off.
 | 
					 * arbitrary values in those bits rather then masking them off.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -780,7 +780,9 @@ static inline int get_sigset_argpack(struct sigset_argpack *to,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	// the path is hot enough for overhead of copy_from_user() to matter
 | 
						// the path is hot enough for overhead of copy_from_user() to matter
 | 
				
			||||||
	if (from) {
 | 
						if (from) {
 | 
				
			||||||
		if (!user_read_access_begin(from, sizeof(*from)))
 | 
							if (can_do_masked_user_access())
 | 
				
			||||||
 | 
								from = masked_user_access_begin(from);
 | 
				
			||||||
 | 
							else if (!user_read_access_begin(from, sizeof(*from)))
 | 
				
			||||||
			return -EFAULT;
 | 
								return -EFAULT;
 | 
				
			||||||
		unsafe_get_user(to->p, &from->p, Efault);
 | 
							unsafe_get_user(to->p, &from->p, Efault);
 | 
				
			||||||
		unsafe_get_user(to->size, &from->size, Efault);
 | 
							unsafe_get_user(to->size, &from->size, Efault);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -32,6 +32,13 @@
 | 
				
			||||||
})
 | 
					})
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef masked_user_access_begin
 | 
				
			||||||
 | 
					 #define can_do_masked_user_access() 1
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
					 #define can_do_masked_user_access() 0
 | 
				
			||||||
 | 
					 #define masked_user_access_begin(src) NULL
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Architectures should provide two primitives (raw_copy_{to,from}_user())
 | 
					 * Architectures should provide two primitives (raw_copy_{to,from}_user())
 | 
				
			||||||
 * and get rid of their private instances of copy_{to,from}_user() and
 | 
					 * and get rid of their private instances of copy_{to,from}_user() and
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -120,6 +120,15 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
 | 
				
			||||||
	if (unlikely(count <= 0))
 | 
						if (unlikely(count <= 0))
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (can_do_masked_user_access()) {
 | 
				
			||||||
 | 
							long retval;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							src = masked_user_access_begin(src);
 | 
				
			||||||
 | 
							retval = do_strncpy_from_user(dst, src, count, count);
 | 
				
			||||||
 | 
							user_read_access_end();
 | 
				
			||||||
 | 
							return retval;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	max_addr = TASK_SIZE_MAX;
 | 
						max_addr = TASK_SIZE_MAX;
 | 
				
			||||||
	src_addr = (unsigned long)untagged_addr(src);
 | 
						src_addr = (unsigned long)untagged_addr(src);
 | 
				
			||||||
	if (likely(src_addr < max_addr)) {
 | 
						if (likely(src_addr < max_addr)) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -96,6 +96,15 @@ long strnlen_user(const char __user *str, long count)
 | 
				
			||||||
	if (unlikely(count <= 0))
 | 
						if (unlikely(count <= 0))
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (can_do_masked_user_access()) {
 | 
				
			||||||
 | 
							long retval;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							str = masked_user_access_begin(str);
 | 
				
			||||||
 | 
							retval = do_strnlen_user(str, count, count);
 | 
				
			||||||
 | 
							user_read_access_end();
 | 
				
			||||||
 | 
							return retval;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	max_addr = TASK_SIZE_MAX;
 | 
						max_addr = TASK_SIZE_MAX;
 | 
				
			||||||
	src_addr = (unsigned long)untagged_addr(str);
 | 
						src_addr = (unsigned long)untagged_addr(str);
 | 
				
			||||||
	if (likely(src_addr < max_addr)) {
 | 
						if (likely(src_addr < max_addr)) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue