mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mmu_gather: Let there be one tlb_{start,end}_vma() implementation
Now that architectures are no longer allowed to override
tlb_{start,end}_vma() re-arrange code so that there is only one
implementation for each of these functions.
This much simplifies trying to figure out what they actually do.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will@kernel.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
			
			
This commit is contained in:
		
							parent
							
								
									1d7708e75c
								
							
						
					
					
						commit
						18ba064e42
					
				
					 1 changed files with 2 additions and 13 deletions
				
			
		| 
						 | 
				
			
			@ -349,8 +349,8 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
 | 
			
		|||
 | 
			
		||||
#ifdef CONFIG_MMU_GATHER_NO_RANGE
 | 
			
		||||
 | 
			
		||||
#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
 | 
			
		||||
#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
 | 
			
		||||
#if defined(tlb_flush)
 | 
			
		||||
#error MMU_GATHER_NO_RANGE relies on default tlb_flush()
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			@ -370,17 +370,10 @@ static inline void tlb_flush(struct mmu_gather *tlb)
 | 
			
		|||
static inline void
 | 
			
		||||
tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
 | 
			
		||||
 | 
			
		||||
#define tlb_end_vma tlb_end_vma
 | 
			
		||||
static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
 | 
			
		||||
 | 
			
		||||
#else /* CONFIG_MMU_GATHER_NO_RANGE */
 | 
			
		||||
 | 
			
		||||
#ifndef tlb_flush
 | 
			
		||||
 | 
			
		||||
#if defined(tlb_start_vma) || defined(tlb_end_vma)
 | 
			
		||||
#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * When an architecture does not provide its own tlb_flush() implementation
 | 
			
		||||
 * but does have a reasonably efficient flush_vma_range() implementation
 | 
			
		||||
| 
						 | 
				
			
			@ -501,7 +494,6 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
 | 
			
		|||
 * case where we're doing a full MM flush.  When we're doing a munmap,
 | 
			
		||||
 * the vmas are adjusted to only cover the region to be torn down.
 | 
			
		||||
 */
 | 
			
		||||
#ifndef tlb_start_vma
 | 
			
		||||
static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
 | 
			
		||||
{
 | 
			
		||||
	if (tlb->fullmm)
 | 
			
		||||
| 
						 | 
				
			
			@ -512,9 +504,7 @@ static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *
 | 
			
		|||
	flush_cache_range(vma, vma->vm_start, vma->vm_end);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifndef tlb_end_vma
 | 
			
		||||
static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
 | 
			
		||||
{
 | 
			
		||||
	if (tlb->fullmm || IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS))
 | 
			
		||||
| 
						 | 
				
			
			@ -528,7 +518,6 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
 | 
			
		|||
	 */
 | 
			
		||||
	tlb_flush_mmu_tlbonly(tlb);
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue