mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	x86/uaccess: Provide untagged_addr() and remove tags before address check
untagged_addr() is a helper used by the core-mm to strip tag bits and get the address to the canonical shape based on rules of the current thread. It only handles userspace addresses. The untagging mask is stored in per-CPU variable and set on context switching to the task. The tags must not be included into check whether it's okay to access the userspace address. Strip tags in access_ok(). Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Alexander Potapenko <glider@google.com> Link: https://lore.kernel.org/all/20230312112612.31869-7-kirill.shutemov%40linux.intel.com
This commit is contained in:
		
							parent
							
								
									428e106ae1
								
							
						
					
					
						commit
						74c228d20a
					
				
					 6 changed files with 69 additions and 2 deletions
				
			
		| 
						 | 
					@ -45,6 +45,9 @@ typedef struct {
 | 
				
			||||||
#ifdef CONFIG_ADDRESS_MASKING
 | 
					#ifdef CONFIG_ADDRESS_MASKING
 | 
				
			||||||
	/* Active LAM mode:  X86_CR3_LAM_U48 or X86_CR3_LAM_U57 or 0 (disabled) */
 | 
						/* Active LAM mode:  X86_CR3_LAM_U48 or X86_CR3_LAM_U57 or 0 (disabled) */
 | 
				
			||||||
	unsigned long lam_cr3_mask;
 | 
						unsigned long lam_cr3_mask;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Significant bits of the virtual address. Excludes tag bits. */
 | 
				
			||||||
 | 
						u64 untag_mask;
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	struct mutex lock;
 | 
						struct mutex lock;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -101,6 +101,12 @@ static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
 | 
				
			||||||
static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm)
 | 
					static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	mm->context.lam_cr3_mask = oldmm->context.lam_cr3_mask;
 | 
						mm->context.lam_cr3_mask = oldmm->context.lam_cr3_mask;
 | 
				
			||||||
 | 
						mm->context.untag_mask = oldmm->context.untag_mask;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void mm_reset_untag_mask(struct mm_struct *mm)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						mm->context.untag_mask = -1UL;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
| 
						 | 
					@ -113,6 +119,10 @@ static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
 | 
				
			||||||
static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm)
 | 
					static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void mm_reset_untag_mask(struct mm_struct *mm)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define enter_lazy_tlb enter_lazy_tlb
 | 
					#define enter_lazy_tlb enter_lazy_tlb
 | 
				
			||||||
| 
						 | 
					@ -139,6 +149,7 @@ static inline int init_new_context(struct task_struct *tsk,
 | 
				
			||||||
		mm->context.execute_only_pkey = -1;
 | 
							mm->context.execute_only_pkey = -1;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
						mm_reset_untag_mask(mm);
 | 
				
			||||||
	init_new_context_ldt(mm);
 | 
						init_new_context_ldt(mm);
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -54,6 +54,15 @@ static inline void cr4_clear_bits(unsigned long mask)
 | 
				
			||||||
	local_irq_restore(flags);
 | 
						local_irq_restore(flags);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_ADDRESS_MASKING
 | 
				
			||||||
 | 
					DECLARE_PER_CPU(u64, tlbstate_untag_mask);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline u64 current_untag_mask(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return this_cpu_read(tlbstate_untag_mask);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifndef MODULE
 | 
					#ifndef MODULE
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * 6 because 6 should be plenty and struct tlb_state will fit in two cache
 | 
					 * 6 because 6 should be plenty and struct tlb_state will fit in two cache
 | 
				
			||||||
| 
						 | 
					@ -380,6 +389,7 @@ static inline void set_tlbstate_lam_mode(struct mm_struct *mm)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	this_cpu_write(cpu_tlbstate.lam,
 | 
						this_cpu_write(cpu_tlbstate.lam,
 | 
				
			||||||
		       mm->context.lam_cr3_mask >> X86_CR3_LAM_U57_BIT);
 | 
							       mm->context.lam_cr3_mask >> X86_CR3_LAM_U57_BIT);
 | 
				
			||||||
 | 
						this_cpu_write(tlbstate_untag_mask, mm->context.untag_mask);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -7,11 +7,13 @@
 | 
				
			||||||
#include <linux/compiler.h>
 | 
					#include <linux/compiler.h>
 | 
				
			||||||
#include <linux/instrumented.h>
 | 
					#include <linux/instrumented.h>
 | 
				
			||||||
#include <linux/kasan-checks.h>
 | 
					#include <linux/kasan-checks.h>
 | 
				
			||||||
 | 
					#include <linux/mm_types.h>
 | 
				
			||||||
#include <linux/string.h>
 | 
					#include <linux/string.h>
 | 
				
			||||||
#include <asm/asm.h>
 | 
					#include <asm/asm.h>
 | 
				
			||||||
#include <asm/page.h>
 | 
					#include <asm/page.h>
 | 
				
			||||||
#include <asm/smap.h>
 | 
					#include <asm/smap.h>
 | 
				
			||||||
#include <asm/extable.h>
 | 
					#include <asm/extable.h>
 | 
				
			||||||
 | 
					#include <asm/tlbflush.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 | 
					#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 | 
				
			||||||
static inline bool pagefault_disabled(void);
 | 
					static inline bool pagefault_disabled(void);
 | 
				
			||||||
| 
						 | 
					@ -21,6 +23,39 @@ static inline bool pagefault_disabled(void);
 | 
				
			||||||
# define WARN_ON_IN_IRQ()
 | 
					# define WARN_ON_IN_IRQ()
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_ADDRESS_MASKING
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Mask out tag bits from the address.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Magic with the 'sign' allows to untag userspace pointer without any branches
 | 
				
			||||||
 | 
					 * while leaving kernel addresses intact.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static inline unsigned long __untagged_addr(unsigned long addr,
 | 
				
			||||||
 | 
										    unsigned long mask)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						long sign = addr >> 63;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						addr &= mask | sign;
 | 
				
			||||||
 | 
						return addr;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define untagged_addr(addr)	({					\
 | 
				
			||||||
 | 
						u64 __addr = (__force u64)(addr);				\
 | 
				
			||||||
 | 
						__addr = __untagged_addr(__addr, current_untag_mask());		\
 | 
				
			||||||
 | 
						(__force __typeof__(addr))__addr;				\
 | 
				
			||||||
 | 
					})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define untagged_addr_remote(mm, addr)	({				\
 | 
				
			||||||
 | 
						u64 __addr = (__force u64)(addr);				\
 | 
				
			||||||
 | 
						mmap_assert_locked(mm);						\
 | 
				
			||||||
 | 
						__addr = __untagged_addr(__addr, (mm)->context.untag_mask);	\
 | 
				
			||||||
 | 
						(__force __typeof__(addr))__addr;				\
 | 
				
			||||||
 | 
					})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
					#define untagged_addr(addr)	(addr)
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * access_ok - Checks if a user space pointer is valid
 | 
					 * access_ok - Checks if a user space pointer is valid
 | 
				
			||||||
 * @addr: User space pointer to start of block to check
 | 
					 * @addr: User space pointer to start of block to check
 | 
				
			||||||
| 
						 | 
					@ -41,7 +76,7 @@ static inline bool pagefault_disabled(void);
 | 
				
			||||||
#define access_ok(addr, size)						\
 | 
					#define access_ok(addr, size)						\
 | 
				
			||||||
({									\
 | 
					({									\
 | 
				
			||||||
	WARN_ON_IN_IRQ();						\
 | 
						WARN_ON_IN_IRQ();						\
 | 
				
			||||||
	likely(__access_ok(addr, size));				\
 | 
						likely(__access_ok(untagged_addr(addr), size));			\
 | 
				
			||||||
})
 | 
					})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include <asm-generic/access_ok.h>
 | 
					#include <asm-generic/access_ok.h>
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -48,6 +48,7 @@
 | 
				
			||||||
#include <asm/frame.h>
 | 
					#include <asm/frame.h>
 | 
				
			||||||
#include <asm/unwind.h>
 | 
					#include <asm/unwind.h>
 | 
				
			||||||
#include <asm/tdx.h>
 | 
					#include <asm/tdx.h>
 | 
				
			||||||
 | 
					#include <asm/mmu_context.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include "process.h"
 | 
					#include "process.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -368,6 +369,8 @@ void arch_setup_new_exec(void)
 | 
				
			||||||
		task_clear_spec_ssb_noexec(current);
 | 
							task_clear_spec_ssb_noexec(current);
 | 
				
			||||||
		speculation_ctrl_update(read_thread_flags());
 | 
							speculation_ctrl_update(read_thread_flags());
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mm_reset_untag_mask(current->mm);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_X86_IOPL_IOPERM
 | 
					#ifdef CONFIG_X86_IOPL_IOPERM
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1048,6 +1048,11 @@ __visible DEFINE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate) = {
 | 
				
			||||||
	.cr4 = ~0UL,	/* fail hard if we screw up cr4 shadow initialization */
 | 
						.cr4 = ~0UL,	/* fail hard if we screw up cr4 shadow initialization */
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_ADDRESS_MASKING
 | 
				
			||||||
 | 
					DEFINE_PER_CPU(u64, tlbstate_untag_mask);
 | 
				
			||||||
 | 
					EXPORT_PER_CPU_SYMBOL(tlbstate_untag_mask);
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
 | 
					void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	/* entry 0 MUST be WB (hardwired to speed up translations) */
 | 
						/* entry 0 MUST be WB (hardwired to speed up translations) */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue