forked from mirrors/linux
		
	Some of these functions have grown beyond inline sanity, move them out-of-line. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Requested-by: Andrew Morton <akpm@linux-foundation.org> Requested-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
			
				
	
	
		
			164 lines
		
	
	
	
		
			4.8 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			164 lines
		
	
	
	
		
			4.8 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/* include/asm-generic/tlb.h
 | 
						|
 *
 | 
						|
 *	Generic TLB shootdown code
 | 
						|
 *
 | 
						|
 * Copyright 2001 Red Hat, Inc.
 | 
						|
 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
 | 
						|
 *
 | 
						|
 * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
 | 
						|
 *
 | 
						|
 * This program is free software; you can redistribute it and/or
 | 
						|
 * modify it under the terms of the GNU General Public License
 | 
						|
 * as published by the Free Software Foundation; either version
 | 
						|
 * 2 of the License, or (at your option) any later version.
 | 
						|
 */
 | 
						|
#ifndef _ASM_GENERIC__TLB_H
 | 
						|
#define _ASM_GENERIC__TLB_H
 | 
						|
 | 
						|
#include <linux/swap.h>
 | 
						|
#include <asm/pgalloc.h>
 | 
						|
#include <asm/tlbflush.h>
 | 
						|
 | 
						|
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 | 
						|
/*
 | 
						|
 * Semi RCU freeing of the page directories.
 | 
						|
 *
 | 
						|
 * This is needed by some architectures to implement software pagetable walkers.
 | 
						|
 *
 | 
						|
 * gup_fast() and other software pagetable walkers do a lockless page-table
 | 
						|
 * walk and therefore needs some synchronization with the freeing of the page
 | 
						|
 * directories. The chosen means to accomplish that is by disabling IRQs over
 | 
						|
 * the walk.
 | 
						|
 *
 | 
						|
 * Architectures that use IPIs to flush TLBs will then automagically DTRT,
 | 
						|
 * since we unlink the page, flush TLBs, free the page. Since the disabling of
 | 
						|
 * IRQs delays the completion of the TLB flush we can never observe an already
 | 
						|
 * freed page.
 | 
						|
 *
 | 
						|
 * Architectures that do not have this (PPC) need to delay the freeing by some
 | 
						|
 * other means, this is that means.
 | 
						|
 *
 | 
						|
 * What we do is batch the freed directory pages (tables) and RCU free them.
 | 
						|
 * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
 | 
						|
 * holds off grace periods.
 | 
						|
 *
 | 
						|
 * However, in order to batch these pages we need to allocate storage, this
 | 
						|
 * allocation is deep inside the MM code and can thus easily fail on memory
 | 
						|
 * pressure. To guarantee progress we fall back to single table freeing, see
 | 
						|
 * the implementation of tlb_remove_table_one().
 | 
						|
 *
 | 
						|
 */
 | 
						|
struct mmu_table_batch {
 | 
						|
	struct rcu_head		rcu;
 | 
						|
	unsigned int		nr;
 | 
						|
	void			*tables[0];
 | 
						|
};
 | 
						|
 | 
						|
#define MAX_TABLE_BATCH		\
 | 
						|
	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
 | 
						|
 | 
						|
extern void tlb_table_flush(struct mmu_gather *tlb);
 | 
						|
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
 | 
						|
 | 
						|
#endif
 | 
						|
 | 
						|
/*
 | 
						|
 * If we can't allocate a page to make a big batch of page pointers
 | 
						|
 * to work on, then just handle a few from the on-stack structure.
 | 
						|
 */
 | 
						|
#define MMU_GATHER_BUNDLE	8
 | 
						|
 | 
						|
struct mmu_gather_batch {
 | 
						|
	struct mmu_gather_batch	*next;
 | 
						|
	unsigned int		nr;
 | 
						|
	unsigned int		max;
 | 
						|
	struct page		*pages[0];
 | 
						|
};
 | 
						|
 | 
						|
#define MAX_GATHER_BATCH	\
 | 
						|
	((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
 | 
						|
 | 
						|
/* struct mmu_gather is an opaque type used by the mm code for passing around
 | 
						|
 * any data needed by arch specific code for tlb_remove_page.
 | 
						|
 */
 | 
						|
struct mmu_gather {
 | 
						|
	struct mm_struct	*mm;
 | 
						|
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 | 
						|
	struct mmu_table_batch	*batch;
 | 
						|
#endif
 | 
						|
	unsigned int		need_flush : 1,	/* Did free PTEs */
 | 
						|
				fast_mode  : 1; /* No batching   */
 | 
						|
 | 
						|
	unsigned int		fullmm;
 | 
						|
 | 
						|
	struct mmu_gather_batch *active;
 | 
						|
	struct mmu_gather_batch	local;
 | 
						|
	struct page		*__pages[MMU_GATHER_BUNDLE];
 | 
						|
};
 | 
						|
 | 
						|
#define HAVE_GENERIC_MMU_GATHER
 | 
						|
 | 
						|
static inline int tlb_fast_mode(struct mmu_gather *tlb)
 | 
						|
{
 | 
						|
#ifdef CONFIG_SMP
 | 
						|
	return tlb->fast_mode;
 | 
						|
#else
 | 
						|
	/*
 | 
						|
	 * For UP we don't need to worry about TLB flush
 | 
						|
	 * and page free order so much..
 | 
						|
	 */
 | 
						|
	return 1;
 | 
						|
#endif
 | 
						|
}
 | 
						|
 | 
						|
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
 | 
						|
void tlb_flush_mmu(struct mmu_gather *tlb);
 | 
						|
void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end);
 | 
						|
int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
 | 
						|
 | 
						|
/* tlb_remove_page
 | 
						|
 *	Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
 | 
						|
 *	required.
 | 
						|
 */
 | 
						|
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 | 
						|
{
 | 
						|
	if (!__tlb_remove_page(tlb, page))
 | 
						|
		tlb_flush_mmu(tlb);
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
 | 
						|
 *
 | 
						|
 * Record the fact that pte's were really umapped in ->need_flush, so we can
 | 
						|
 * later optimise away the tlb invalidate.   This helps when userspace is
 | 
						|
 * unmapping already-unmapped pages, which happens quite a lot.
 | 
						|
 */
 | 
						|
#define tlb_remove_tlb_entry(tlb, ptep, address)		\
 | 
						|
	do {							\
 | 
						|
		tlb->need_flush = 1;				\
 | 
						|
		__tlb_remove_tlb_entry(tlb, ptep, address);	\
 | 
						|
	} while (0)
 | 
						|
 | 
						|
#define pte_free_tlb(tlb, ptep, address)			\
 | 
						|
	do {							\
 | 
						|
		tlb->need_flush = 1;				\
 | 
						|
		__pte_free_tlb(tlb, ptep, address);		\
 | 
						|
	} while (0)
 | 
						|
 | 
						|
#ifndef __ARCH_HAS_4LEVEL_HACK
 | 
						|
#define pud_free_tlb(tlb, pudp, address)			\
 | 
						|
	do {							\
 | 
						|
		tlb->need_flush = 1;				\
 | 
						|
		__pud_free_tlb(tlb, pudp, address);		\
 | 
						|
	} while (0)
 | 
						|
#endif
 | 
						|
 | 
						|
#define pmd_free_tlb(tlb, pmdp, address)			\
 | 
						|
	do {							\
 | 
						|
		tlb->need_flush = 1;				\
 | 
						|
		__pmd_free_tlb(tlb, pmdp, address);		\
 | 
						|
	} while (0)
 | 
						|
 | 
						|
#define tlb_migrate_finish(mm) do {} while (0)
 | 
						|
 | 
						|
#endif /* _ASM_GENERIC__TLB_H */
 |