forked from mirrors/linux
		
	arm uses a top-down mmap layout by default that exactly fits the generic functions, so get rid of arch specific code and use the generic version by selecting ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT. As ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT selects ARCH_HAS_ELF_RANDOMIZE, use the generic version of arch_randomize_brk since it also fits. Note that this commit also removes the possibility for arm to have elf randomization and no MMU: without MMU, the security added by randomization is worth nothing. Note that it is safe to remove STACK_RND_MASK since it matches the default value. Link: http://lkml.kernel.org/r/20190730055113.23635-9-alex@ghiti.fr Signed-off-by: Alexandre Ghiti <alex@ghiti.fr> Acked-by: Kees Cook <keescook@chromium.org> Reviewed-by: Luis Chamberlain <mcgrof@kernel.org> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Christoph Hellwig <hch@lst.de> Cc: James Hogan <jhogan@kernel.org> Cc: Palmer Dabbelt <palmer@sifive.com> Cc: Paul Burton <paul.burton@mips.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
			
				
	
	
		
			145 lines
		
	
	
	
		
			3.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			145 lines
		
	
	
	
		
			3.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/* SPDX-License-Identifier: GPL-2.0-only */
 | 
						|
/*
 | 
						|
 *  arch/arm/include/asm/processor.h
 | 
						|
 *
 | 
						|
 *  Copyright (C) 1995-1999 Russell King
 | 
						|
 */
 | 
						|
 | 
						|
#ifndef __ASM_ARM_PROCESSOR_H
 | 
						|
#define __ASM_ARM_PROCESSOR_H
 | 
						|
 | 
						|
#ifdef __KERNEL__
 | 
						|
 | 
						|
#include <asm/hw_breakpoint.h>
 | 
						|
#include <asm/ptrace.h>
 | 
						|
#include <asm/types.h>
 | 
						|
#include <asm/unified.h>
 | 
						|
 | 
						|
#ifdef __KERNEL__
 | 
						|
#define STACK_TOP	((current->personality & ADDR_LIMIT_32BIT) ? \
 | 
						|
			 TASK_SIZE : TASK_SIZE_26)
 | 
						|
#define STACK_TOP_MAX	TASK_SIZE
 | 
						|
#endif
 | 
						|
 | 
						|
struct debug_info {
 | 
						|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
 | 
						|
	struct perf_event	*hbp[ARM_MAX_HBP_SLOTS];
 | 
						|
#endif
 | 
						|
};
 | 
						|
 | 
						|
struct thread_struct {
 | 
						|
							/* fault info	  */
 | 
						|
	unsigned long		address;
 | 
						|
	unsigned long		trap_no;
 | 
						|
	unsigned long		error_code;
 | 
						|
							/* debugging	  */
 | 
						|
	struct debug_info	debug;
 | 
						|
};
 | 
						|
 | 
						|
/*
 | 
						|
 * Everything usercopied to/from thread_struct is statically-sized, so
 | 
						|
 * no hardened usercopy whitelist is needed.
 | 
						|
 */
 | 
						|
static inline void arch_thread_struct_whitelist(unsigned long *offset,
 | 
						|
						unsigned long *size)
 | 
						|
{
 | 
						|
	*offset = *size = 0;
 | 
						|
}
 | 
						|
 | 
						|
#define INIT_THREAD  {	}
 | 
						|
 | 
						|
#define start_thread(regs,pc,sp)					\
 | 
						|
({									\
 | 
						|
	unsigned long r7, r8, r9;					\
 | 
						|
									\
 | 
						|
	if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC)) {			\
 | 
						|
		r7 = regs->ARM_r7;					\
 | 
						|
		r8 = regs->ARM_r8;					\
 | 
						|
		r9 = regs->ARM_r9;					\
 | 
						|
	}								\
 | 
						|
	memset(regs->uregs, 0, sizeof(regs->uregs));			\
 | 
						|
	if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) &&			\
 | 
						|
	    current->personality & FDPIC_FUNCPTRS) {			\
 | 
						|
		regs->ARM_r7 = r7;					\
 | 
						|
		regs->ARM_r8 = r8;					\
 | 
						|
		regs->ARM_r9 = r9;					\
 | 
						|
		regs->ARM_r10 = current->mm->start_data;		\
 | 
						|
	} else if (!IS_ENABLED(CONFIG_MMU))				\
 | 
						|
		regs->ARM_r10 = current->mm->start_data;		\
 | 
						|
	if (current->personality & ADDR_LIMIT_32BIT)			\
 | 
						|
		regs->ARM_cpsr = USR_MODE;				\
 | 
						|
	else								\
 | 
						|
		regs->ARM_cpsr = USR26_MODE;				\
 | 
						|
	if (elf_hwcap & HWCAP_THUMB && pc & 1)				\
 | 
						|
		regs->ARM_cpsr |= PSR_T_BIT;				\
 | 
						|
	regs->ARM_cpsr |= PSR_ENDSTATE;					\
 | 
						|
	regs->ARM_pc = pc & ~1;		/* pc */			\
 | 
						|
	regs->ARM_sp = sp;		/* sp */			\
 | 
						|
})
 | 
						|
 | 
						|
/* Forward declaration, a strange C thing */
 | 
						|
struct task_struct;
 | 
						|
 | 
						|
/* Free all resources held by a thread. */
 | 
						|
extern void release_thread(struct task_struct *);
 | 
						|
 | 
						|
unsigned long get_wchan(struct task_struct *p);
 | 
						|
 | 
						|
#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
 | 
						|
#define cpu_relax()						\
 | 
						|
	do {							\
 | 
						|
		smp_mb();					\
 | 
						|
		__asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;");	\
 | 
						|
	} while (0)
 | 
						|
#else
 | 
						|
#define cpu_relax()			barrier()
 | 
						|
#endif
 | 
						|
 | 
						|
#define task_pt_regs(p) \
 | 
						|
	((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
 | 
						|
 | 
						|
#define KSTK_EIP(tsk)	task_pt_regs(tsk)->ARM_pc
 | 
						|
#define KSTK_ESP(tsk)	task_pt_regs(tsk)->ARM_sp
 | 
						|
 | 
						|
#ifdef CONFIG_SMP
 | 
						|
#define __ALT_SMP_ASM(smp, up)						\
 | 
						|
	"9998:	" smp "\n"						\
 | 
						|
	"	.pushsection \".alt.smp.init\", \"a\"\n"		\
 | 
						|
	"	.long	9998b\n"					\
 | 
						|
	"	" up "\n"						\
 | 
						|
	"	.popsection\n"
 | 
						|
#else
 | 
						|
#define __ALT_SMP_ASM(smp, up)	up
 | 
						|
#endif
 | 
						|
 | 
						|
/*
 | 
						|
 * Prefetching support - only ARMv5.
 | 
						|
 */
 | 
						|
#if __LINUX_ARM_ARCH__ >= 5
 | 
						|
 | 
						|
#define ARCH_HAS_PREFETCH
 | 
						|
static inline void prefetch(const void *ptr)
 | 
						|
{
 | 
						|
	__asm__ __volatile__(
 | 
						|
		"pld\t%a0"
 | 
						|
		:: "p" (ptr));
 | 
						|
}
 | 
						|
 | 
						|
#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
 | 
						|
#define ARCH_HAS_PREFETCHW
 | 
						|
static inline void prefetchw(const void *ptr)
 | 
						|
{
 | 
						|
	__asm__ __volatile__(
 | 
						|
		".arch_extension	mp\n"
 | 
						|
		__ALT_SMP_ASM(
 | 
						|
			"pldw\t%a0",
 | 
						|
			"pld\t%a0"
 | 
						|
		)
 | 
						|
		:: "p" (ptr));
 | 
						|
}
 | 
						|
#endif
 | 
						|
#endif
 | 
						|
 | 
						|
#endif
 | 
						|
 | 
						|
#endif /* __ASM_ARM_PROCESSOR_H */
 |