mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	mmu_gather: move minimal range calculations into generic code
On architectures with hardware broadcasting of TLB invalidation messages , it makes sense to reduce the range of the mmu_gather structure when unmapping page ranges based on the dirty address information passed to tlb_remove_tlb_entry. arm64 already does this by directly manipulating the start/end fields of the gather structure, but this confuses the generic code which does not expect these fields to change and can end up calculating invalid, negative ranges when forcing a flush in zap_pte_range. This patch moves the minimal range calculation out of the arm64 code and into the generic implementation, simplifying zap_pte_range in the process (which no longer needs to care about start/end, since they will point to the appropriate ranges already). With the range being tracked by core code, the need_flush flag is dropped in favour of checking that the end of the range has actually been set. Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King - ARM Linux <linux@arm.linux.org.uk> Cc: Michal Simek <monstr@monstr.eu> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
		
							parent
							
								
									63648dd20f
								
							
						
					
					
						commit
						fb7332a9fe
					
				
					 7 changed files with 63 additions and 100 deletions
				
			
		| 
						 | 
					@ -19,10 +19,6 @@
 | 
				
			||||||
#ifndef __ASM_TLB_H
 | 
					#ifndef __ASM_TLB_H
 | 
				
			||||||
#define __ASM_TLB_H
 | 
					#define __ASM_TLB_H
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define  __tlb_remove_pmd_tlb_entry __tlb_remove_pmd_tlb_entry
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#include <asm-generic/tlb.h>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#include <linux/pagemap.h>
 | 
					#include <linux/pagemap.h>
 | 
				
			||||||
#include <linux/swap.h>
 | 
					#include <linux/swap.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -37,71 +33,22 @@ static inline void __tlb_remove_table(void *_table)
 | 
				
			||||||
#define tlb_remove_entry(tlb, entry)	tlb_remove_page(tlb, entry)
 | 
					#define tlb_remove_entry(tlb, entry)	tlb_remove_page(tlb, entry)
 | 
				
			||||||
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
 | 
					#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					#include <asm-generic/tlb.h>
 | 
				
			||||||
 * There's three ways the TLB shootdown code is used:
 | 
					
 | 
				
			||||||
 *  1. Unmapping a range of vmas.  See zap_page_range(), unmap_region().
 | 
					 | 
				
			||||||
 *     tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
 | 
					 | 
				
			||||||
 *  2. Unmapping all vmas.  See exit_mmap().
 | 
					 | 
				
			||||||
 *     tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
 | 
					 | 
				
			||||||
 *     Page tables will be freed.
 | 
					 | 
				
			||||||
 *  3. Unmapping argument pages.  See shift_arg_pages().
 | 
					 | 
				
			||||||
 *     tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
static inline void tlb_flush(struct mmu_gather *tlb)
 | 
					static inline void tlb_flush(struct mmu_gather *tlb)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (tlb->fullmm) {
 | 
						if (tlb->fullmm) {
 | 
				
			||||||
		flush_tlb_mm(tlb->mm);
 | 
							flush_tlb_mm(tlb->mm);
 | 
				
			||||||
	} else if (tlb->end > 0) {
 | 
						} else {
 | 
				
			||||||
		struct vm_area_struct vma = { .vm_mm = tlb->mm, };
 | 
							struct vm_area_struct vma = { .vm_mm = tlb->mm, };
 | 
				
			||||||
		flush_tlb_range(&vma, tlb->start, tlb->end);
 | 
							flush_tlb_range(&vma, tlb->start, tlb->end);
 | 
				
			||||||
		tlb->start = TASK_SIZE;
 | 
					 | 
				
			||||||
		tlb->end = 0;
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (!tlb->fullmm) {
 | 
					 | 
				
			||||||
		tlb->start = min(tlb->start, addr);
 | 
					 | 
				
			||||||
		tlb->end = max(tlb->end, addr + PAGE_SIZE);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * Memorize the range for the TLB flush.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
 | 
					 | 
				
			||||||
					  unsigned long addr)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	tlb_add_flush(tlb, addr);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * In the case of tlb vma handling, we can optimise these away in the
 | 
					 | 
				
			||||||
 * case where we're doing a full MM flush.  When we're doing a munmap,
 | 
					 | 
				
			||||||
 * the vmas are adjusted to only cover the region to be torn down.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
static inline void tlb_start_vma(struct mmu_gather *tlb,
 | 
					 | 
				
			||||||
				 struct vm_area_struct *vma)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (!tlb->fullmm) {
 | 
					 | 
				
			||||||
		tlb->start = TASK_SIZE;
 | 
					 | 
				
			||||||
		tlb->end = 0;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline void tlb_end_vma(struct mmu_gather *tlb,
 | 
					 | 
				
			||||||
			       struct vm_area_struct *vma)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (!tlb->fullmm)
 | 
					 | 
				
			||||||
		tlb_flush(tlb);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
 | 
					static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
 | 
				
			||||||
				  unsigned long addr)
 | 
									  unsigned long addr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	pgtable_page_dtor(pte);
 | 
						pgtable_page_dtor(pte);
 | 
				
			||||||
	tlb_add_flush(tlb, addr);
 | 
					 | 
				
			||||||
	tlb_remove_entry(tlb, pte);
 | 
						tlb_remove_entry(tlb, pte);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -109,7 +56,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
 | 
				
			||||||
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
 | 
					static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
 | 
				
			||||||
				  unsigned long addr)
 | 
									  unsigned long addr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	tlb_add_flush(tlb, addr);
 | 
					 | 
				
			||||||
	tlb_remove_entry(tlb, virt_to_page(pmdp));
 | 
						tlb_remove_entry(tlb, virt_to_page(pmdp));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					@ -118,15 +64,8 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
 | 
				
			||||||
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
 | 
					static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
 | 
				
			||||||
				  unsigned long addr)
 | 
									  unsigned long addr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	tlb_add_flush(tlb, addr);
 | 
					 | 
				
			||||||
	tlb_remove_entry(tlb, virt_to_page(pudp));
 | 
						tlb_remove_entry(tlb, virt_to_page(pudp));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void __tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp,
 | 
					 | 
				
			||||||
						unsigned long address)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	tlb_add_flush(tlb, address);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -14,7 +14,6 @@
 | 
				
			||||||
#define tlb_flush(tlb)	flush_tlb_mm((tlb)->mm)
 | 
					#define tlb_flush(tlb)	flush_tlb_mm((tlb)->mm)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include <linux/pagemap.h>
 | 
					#include <linux/pagemap.h>
 | 
				
			||||||
#include <asm-generic/tlb.h>
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_MMU
 | 
					#ifdef CONFIG_MMU
 | 
				
			||||||
#define tlb_start_vma(tlb, vma)		do { } while (0)
 | 
					#define tlb_start_vma(tlb, vma)		do { } while (0)
 | 
				
			||||||
| 
						 | 
					@ -22,4 +21,6 @@
 | 
				
			||||||
#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
 | 
					#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <asm-generic/tlb.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* _ASM_MICROBLAZE_TLB_H */
 | 
					#endif /* _ASM_MICROBLAZE_TLB_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3,7 +3,6 @@
 | 
				
			||||||
#ifdef __KERNEL__
 | 
					#ifdef __KERNEL__
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include <linux/mm.h>
 | 
					#include <linux/mm.h>
 | 
				
			||||||
#include <asm-generic/tlb.h>
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_PPC_BOOK3E
 | 
					#ifdef CONFIG_PPC_BOOK3E
 | 
				
			||||||
extern void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address);
 | 
					extern void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address);
 | 
				
			||||||
| 
						 | 
					@ -14,6 +13,8 @@ static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#endif /* !CONFIG_PPC_BOOK3E */
 | 
					#endif /* !CONFIG_PPC_BOOK3E */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_PPC64
 | 
					#ifdef CONFIG_PPC64
 | 
				
			||||||
#include <asm/pgalloc-64.h>
 | 
					#include <asm/pgalloc-64.h>
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -27,6 +27,7 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define tlb_start_vma(tlb, vma)	do { } while (0)
 | 
					#define tlb_start_vma(tlb, vma)	do { } while (0)
 | 
				
			||||||
#define tlb_end_vma(tlb, vma)	do { } while (0)
 | 
					#define tlb_end_vma(tlb, vma)	do { } while (0)
 | 
				
			||||||
 | 
					#define __tlb_remove_tlb_entry	__tlb_remove_tlb_entry
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern void tlb_flush(struct mmu_gather *tlb);
 | 
					extern void tlb_flush(struct mmu_gather *tlb);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -517,8 +517,6 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
 | 
				
			||||||
	for (i = 0; i < num_hugepd; i++, hpdp++)
 | 
						for (i = 0; i < num_hugepd; i++, hpdp++)
 | 
				
			||||||
		hpdp->pd = 0;
 | 
							hpdp->pd = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	tlb->need_flush = 1;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifdef CONFIG_PPC_FSL_BOOK3E
 | 
					#ifdef CONFIG_PPC_FSL_BOOK3E
 | 
				
			||||||
	hugepd_free(tlb, hugepte);
 | 
						hugepd_free(tlb, hugepte);
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -96,10 +96,9 @@ struct mmu_gather {
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
	unsigned long		start;
 | 
						unsigned long		start;
 | 
				
			||||||
	unsigned long		end;
 | 
						unsigned long		end;
 | 
				
			||||||
	unsigned int		need_flush : 1,	/* Did free PTEs */
 | 
					 | 
				
			||||||
	/* we are in the middle of an operation to clear
 | 
						/* we are in the middle of an operation to clear
 | 
				
			||||||
	 * a full mm and can make some optimizations */
 | 
						 * a full mm and can make some optimizations */
 | 
				
			||||||
				fullmm : 1,
 | 
						unsigned int		fullmm : 1,
 | 
				
			||||||
	/* we have performed an operation which
 | 
						/* we have performed an operation which
 | 
				
			||||||
	 * requires a complete flush of the tlb */
 | 
						 * requires a complete flush of the tlb */
 | 
				
			||||||
				need_flush_all : 1;
 | 
									need_flush_all : 1;
 | 
				
			||||||
| 
						 | 
					@ -128,16 +127,54 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 | 
				
			||||||
		tlb_flush_mmu(tlb);
 | 
							tlb_flush_mmu(tlb);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void __tlb_adjust_range(struct mmu_gather *tlb,
 | 
				
			||||||
 | 
									      unsigned long address)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						tlb->start = min(tlb->start, address);
 | 
				
			||||||
 | 
						tlb->end = max(tlb->end, address + PAGE_SIZE);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void __tlb_reset_range(struct mmu_gather *tlb)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						tlb->start = TASK_SIZE;
 | 
				
			||||||
 | 
						tlb->end = 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * In the case of tlb vma handling, we can optimise these away in the
 | 
				
			||||||
 | 
					 * case where we're doing a full MM flush.  When we're doing a munmap,
 | 
				
			||||||
 | 
					 * the vmas are adjusted to only cover the region to be torn down.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					#ifndef tlb_start_vma
 | 
				
			||||||
 | 
					#define tlb_start_vma(tlb, vma) do { } while (0)
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define __tlb_end_vma(tlb, vma)					\
 | 
				
			||||||
 | 
						do {							\
 | 
				
			||||||
 | 
							if (!tlb->fullmm && tlb->end) {			\
 | 
				
			||||||
 | 
								tlb_flush(tlb);				\
 | 
				
			||||||
 | 
								__tlb_reset_range(tlb);			\
 | 
				
			||||||
 | 
							}						\
 | 
				
			||||||
 | 
						} while (0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifndef tlb_end_vma
 | 
				
			||||||
 | 
					#define tlb_end_vma	__tlb_end_vma
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifndef __tlb_remove_tlb_entry
 | 
				
			||||||
 | 
					#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
 | 
					 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * Record the fact that pte's were really umapped in ->need_flush, so we can
 | 
					 * Record the fact that pte's were really unmapped by updating the range,
 | 
				
			||||||
 * later optimise away the tlb invalidate.   This helps when userspace is
 | 
					 * so we can later optimise away the tlb invalidate.   This helps when
 | 
				
			||||||
 * unmapping already-unmapped pages, which happens quite a lot.
 | 
					 * userspace is unmapping already-unmapped pages, which happens quite a lot.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
#define tlb_remove_tlb_entry(tlb, ptep, address)		\
 | 
					#define tlb_remove_tlb_entry(tlb, ptep, address)		\
 | 
				
			||||||
	do {							\
 | 
						do {							\
 | 
				
			||||||
		tlb->need_flush = 1;				\
 | 
							__tlb_adjust_range(tlb, address);		\
 | 
				
			||||||
		__tlb_remove_tlb_entry(tlb, ptep, address);	\
 | 
							__tlb_remove_tlb_entry(tlb, ptep, address);	\
 | 
				
			||||||
	} while (0)
 | 
						} while (0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -151,27 +188,27 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)		\
 | 
					#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)		\
 | 
				
			||||||
	do {							\
 | 
						do {							\
 | 
				
			||||||
		tlb->need_flush = 1;				\
 | 
							__tlb_adjust_range(tlb, address);		\
 | 
				
			||||||
		__tlb_remove_pmd_tlb_entry(tlb, pmdp, address);	\
 | 
							__tlb_remove_pmd_tlb_entry(tlb, pmdp, address);	\
 | 
				
			||||||
	} while (0)
 | 
						} while (0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define pte_free_tlb(tlb, ptep, address)			\
 | 
					#define pte_free_tlb(tlb, ptep, address)			\
 | 
				
			||||||
	do {							\
 | 
						do {							\
 | 
				
			||||||
		tlb->need_flush = 1;				\
 | 
							__tlb_adjust_range(tlb, address);		\
 | 
				
			||||||
		__pte_free_tlb(tlb, ptep, address);		\
 | 
							__pte_free_tlb(tlb, ptep, address);		\
 | 
				
			||||||
	} while (0)
 | 
						} while (0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifndef __ARCH_HAS_4LEVEL_HACK
 | 
					#ifndef __ARCH_HAS_4LEVEL_HACK
 | 
				
			||||||
#define pud_free_tlb(tlb, pudp, address)			\
 | 
					#define pud_free_tlb(tlb, pudp, address)			\
 | 
				
			||||||
	do {							\
 | 
						do {							\
 | 
				
			||||||
		tlb->need_flush = 1;				\
 | 
							__tlb_adjust_range(tlb, address);		\
 | 
				
			||||||
		__pud_free_tlb(tlb, pudp, address);		\
 | 
							__pud_free_tlb(tlb, pudp, address);		\
 | 
				
			||||||
	} while (0)
 | 
						} while (0)
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define pmd_free_tlb(tlb, pmdp, address)			\
 | 
					#define pmd_free_tlb(tlb, pmdp, address)			\
 | 
				
			||||||
	do {							\
 | 
						do {							\
 | 
				
			||||||
		tlb->need_flush = 1;				\
 | 
							__tlb_adjust_range(tlb, address);		\
 | 
				
			||||||
		__pmd_free_tlb(tlb, pmdp, address);		\
 | 
							__pmd_free_tlb(tlb, pmdp, address);		\
 | 
				
			||||||
	} while (0)
 | 
						} while (0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										30
									
								
								mm/memory.c
									
									
									
									
									
								
							
							
						
						
									
										30
									
								
								mm/memory.c
									
									
									
									
									
								
							| 
						 | 
					@ -220,9 +220,6 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
 | 
				
			||||||
	/* Is it from 0 to ~0? */
 | 
						/* Is it from 0 to ~0? */
 | 
				
			||||||
	tlb->fullmm     = !(start | (end+1));
 | 
						tlb->fullmm     = !(start | (end+1));
 | 
				
			||||||
	tlb->need_flush_all = 0;
 | 
						tlb->need_flush_all = 0;
 | 
				
			||||||
	tlb->start	= start;
 | 
					 | 
				
			||||||
	tlb->end	= end;
 | 
					 | 
				
			||||||
	tlb->need_flush = 0;
 | 
					 | 
				
			||||||
	tlb->local.next = NULL;
 | 
						tlb->local.next = NULL;
 | 
				
			||||||
	tlb->local.nr   = 0;
 | 
						tlb->local.nr   = 0;
 | 
				
			||||||
	tlb->local.max  = ARRAY_SIZE(tlb->__pages);
 | 
						tlb->local.max  = ARRAY_SIZE(tlb->__pages);
 | 
				
			||||||
| 
						 | 
					@ -232,15 +229,20 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
 | 
				
			||||||
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 | 
					#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 | 
				
			||||||
	tlb->batch = NULL;
 | 
						tlb->batch = NULL;
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						__tlb_reset_range(tlb);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 | 
					static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	tlb->need_flush = 0;
 | 
						if (!tlb->end)
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	tlb_flush(tlb);
 | 
						tlb_flush(tlb);
 | 
				
			||||||
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 | 
					#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 | 
				
			||||||
	tlb_table_flush(tlb);
 | 
						tlb_table_flush(tlb);
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
						__tlb_reset_range(tlb);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void tlb_flush_mmu_free(struct mmu_gather *tlb)
 | 
					static void tlb_flush_mmu_free(struct mmu_gather *tlb)
 | 
				
			||||||
| 
						 | 
					@ -256,8 +258,6 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void tlb_flush_mmu(struct mmu_gather *tlb)
 | 
					void tlb_flush_mmu(struct mmu_gather *tlb)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (!tlb->need_flush)
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
	tlb_flush_mmu_tlbonly(tlb);
 | 
						tlb_flush_mmu_tlbonly(tlb);
 | 
				
			||||||
	tlb_flush_mmu_free(tlb);
 | 
						tlb_flush_mmu_free(tlb);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -292,7 +292,7 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct mmu_gather_batch *batch;
 | 
						struct mmu_gather_batch *batch;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	VM_BUG_ON(!tlb->need_flush);
 | 
						VM_BUG_ON(!tlb->end);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	batch = tlb->active;
 | 
						batch = tlb->active;
 | 
				
			||||||
	batch->pages[batch->nr++] = page;
 | 
						batch->pages[batch->nr++] = page;
 | 
				
			||||||
| 
						 | 
					@ -359,8 +359,6 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct mmu_table_batch **batch = &tlb->batch;
 | 
						struct mmu_table_batch **batch = &tlb->batch;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	tlb->need_flush = 1;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * When there's less then two users of this mm there cannot be a
 | 
						 * When there's less then two users of this mm there cannot be a
 | 
				
			||||||
	 * concurrent page-table walk.
 | 
						 * concurrent page-table walk.
 | 
				
			||||||
| 
						 | 
					@ -1185,20 +1183,8 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
 | 
				
			||||||
	arch_leave_lazy_mmu_mode();
 | 
						arch_leave_lazy_mmu_mode();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Do the actual TLB flush before dropping ptl */
 | 
						/* Do the actual TLB flush before dropping ptl */
 | 
				
			||||||
	if (force_flush) {
 | 
						if (force_flush)
 | 
				
			||||||
		unsigned long old_end;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		/*
 | 
					 | 
				
			||||||
		 * Flush the TLB just for the previous segment,
 | 
					 | 
				
			||||||
		 * then update the range to be the remaining
 | 
					 | 
				
			||||||
		 * TLB range.
 | 
					 | 
				
			||||||
		 */
 | 
					 | 
				
			||||||
		old_end = tlb->end;
 | 
					 | 
				
			||||||
		tlb->end = addr;
 | 
					 | 
				
			||||||
		tlb_flush_mmu_tlbonly(tlb);
 | 
							tlb_flush_mmu_tlbonly(tlb);
 | 
				
			||||||
		tlb->start = addr;
 | 
					 | 
				
			||||||
		tlb->end = old_end;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	pte_unmap_unlock(start_pte, ptl);
 | 
						pte_unmap_unlock(start_pte, ptl);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue