forked from mirrors/linux
		
	x86/asm/memcpy_mcsafe: Define copy_to_iter_mcsafe()
Use the updated memcpy_mcsafe() implementation to define copy_user_mcsafe() and copy_to_iter_mcsafe(). The most significant difference from typical copy_to_iter() is that the ITER_KVEC and ITER_BVEC iterator types can fail to complete a full transfer. Signed-off-by: Dan Williams <dan.j.williams@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: hch@lst.de Cc: linux-fsdevel@vger.kernel.org Cc: linux-nvdimm@lists.01.org Link: http://lkml.kernel.org/r/152539239150.31796.9189779163576449784.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
							parent
							
								
									12c89130a5
								
							
						
					
					
						commit
						8780356ef6
					
				
					 4 changed files with 88 additions and 0 deletions
				
			
		|  | @ -60,6 +60,7 @@ config X86 | |||
| 	select ARCH_HAS_PMEM_API		if X86_64 | ||||
| 	select ARCH_HAS_REFCOUNT | ||||
| 	select ARCH_HAS_UACCESS_FLUSHCACHE	if X86_64 | ||||
| 	select ARCH_HAS_UACCESS_MCSAFE		if X86_64 | ||||
| 	select ARCH_HAS_SET_MEMORY | ||||
| 	select ARCH_HAS_SG_CHAIN | ||||
| 	select ARCH_HAS_STRICT_KERNEL_RWX | ||||
|  |  | |||
|  | @ -46,6 +46,17 @@ copy_user_generic(void *to, const void *from, unsigned len) | |||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static __always_inline __must_check unsigned long | ||||
| copy_to_user_mcsafe(void *to, const void *from, unsigned len) | ||||
| { | ||||
| 	unsigned long ret; | ||||
| 
 | ||||
| 	__uaccess_begin(); | ||||
| 	ret = memcpy_mcsafe(to, from, len); | ||||
| 	__uaccess_end(); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static __always_inline __must_check unsigned long | ||||
| raw_copy_from_user(void *dst, const void __user *src, unsigned long size) | ||||
| { | ||||
|  |  | |||
|  | @ -154,6 +154,12 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i); | |||
| #define _copy_from_iter_flushcache _copy_from_iter_nocache | ||||
| #endif | ||||
| 
 | ||||
| #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE | ||||
| size_t _copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i); | ||||
| #else | ||||
| #define _copy_to_iter_mcsafe _copy_to_iter | ||||
| #endif | ||||
| 
 | ||||
| static __always_inline __must_check | ||||
| size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) | ||||
| { | ||||
|  | @ -163,6 +169,15 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) | |||
| 		return _copy_from_iter_flushcache(addr, bytes, i); | ||||
| } | ||||
| 
 | ||||
| static __always_inline __must_check | ||||
| size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i) | ||||
| { | ||||
| 	if (unlikely(!check_copy_size(addr, bytes, false))) | ||||
| 		return 0; | ||||
| 	else | ||||
| 		return _copy_to_iter_mcsafe(addr, bytes, i); | ||||
| } | ||||
| 
 | ||||
| size_t iov_iter_zero(size_t bytes, struct iov_iter *); | ||||
| unsigned long iov_iter_alignment(const struct iov_iter *i); | ||||
| unsigned long iov_iter_gap_alignment(const struct iov_iter *i); | ||||
|  |  | |||
|  | @ -573,6 +573,67 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) | |||
| } | ||||
| EXPORT_SYMBOL(_copy_to_iter); | ||||
| 
 | ||||
| #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE | ||||
| static int copyout_mcsafe(void __user *to, const void *from, size_t n) | ||||
| { | ||||
| 	if (access_ok(VERIFY_WRITE, to, n)) { | ||||
| 		kasan_check_read(from, n); | ||||
| 		n = copy_to_user_mcsafe((__force void *) to, from, n); | ||||
| 	} | ||||
| 	return n; | ||||
| } | ||||
| 
 | ||||
| static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset, | ||||
| 		const char *from, size_t len) | ||||
| { | ||||
| 	unsigned long ret; | ||||
| 	char *to; | ||||
| 
 | ||||
| 	to = kmap_atomic(page); | ||||
| 	ret = memcpy_mcsafe(to + offset, from, len); | ||||
| 	kunmap_atomic(to); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i) | ||||
| { | ||||
| 	const char *from = addr; | ||||
| 	unsigned long rem, curr_addr, s_addr = (unsigned long) addr; | ||||
| 
 | ||||
| 	if (unlikely(i->type & ITER_PIPE)) { | ||||
| 		WARN_ON(1); | ||||
| 		return 0; | ||||
| 	} | ||||
| 	if (iter_is_iovec(i)) | ||||
| 		might_fault(); | ||||
| 	iterate_and_advance(i, bytes, v, | ||||
| 		copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), | ||||
| 		({ | ||||
| 		rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset, | ||||
|                                (from += v.bv_len) - v.bv_len, v.bv_len); | ||||
| 		if (rem) { | ||||
| 			curr_addr = (unsigned long) from; | ||||
| 			bytes = curr_addr - s_addr - rem; | ||||
| 			return bytes; | ||||
| 		} | ||||
| 		}), | ||||
| 		({ | ||||
| 		rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, | ||||
| 				v.iov_len); | ||||
| 		if (rem) { | ||||
| 			curr_addr = (unsigned long) from; | ||||
| 			bytes = curr_addr - s_addr - rem; | ||||
| 			return bytes; | ||||
| 		} | ||||
| 		}) | ||||
| 	) | ||||
| 
 | ||||
| 	return bytes; | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe); | ||||
| #endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */ | ||||
| 
 | ||||
| size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) | ||||
| { | ||||
| 	char *to = addr; | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Dan Williams
						Dan Williams