mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	mm: refactor TLB gathering API
This patch is a preparatory patch for solving race problems caused by TLB batch. For that, we will increase/decrease TLB flush pending count of mm_struct whenever tlb_[gather|finish]_mmu is called. Before making it simple, this patch separates architecture specific part and rename it to arch_tlb_[gather|finish]_mmu and generic part just calls it. It shouldn't change any behavior. Link: http://lkml.kernel.org/r/20170802000818.4760-5-namit@vmware.com Signed-off-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Nadav Amit <namit@vmware.com> Acked-by: Mel Gorman <mgorman@techsingularity.net> Cc: Ingo Molnar <mingo@redhat.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Tony Luck <tony.luck@intel.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: Jeff Dike <jdike@addtoit.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Nadav Amit <nadav.amit@gmail.com> Cc: Rik van Riel <riel@redhat.com> Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									a9b802500e
								
							
						
					
					
						commit
						56236a5955
					
				
					 8 changed files with 54 additions and 25 deletions
				
			
		| 
						 | 
				
			
			@ -148,7 +148,8 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 | 
			
		||||
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 | 
			
		||||
			unsigned long start, unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	tlb->mm = mm;
 | 
			
		||||
	tlb->fullmm = !(start | (end+1));
 | 
			
		||||
| 
						 | 
				
			
			@ -166,7 +167,8 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
 | 
			
		||||
arch_tlb_finish_mmu(struct mmu_gather *tlb,
 | 
			
		||||
			unsigned long start, unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	tlb_flush_mmu(tlb);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -168,7 +168,8 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
 | 
			
		|||
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 | 
			
		||||
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 | 
			
		||||
			unsigned long start, unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	tlb->mm = mm;
 | 
			
		||||
	tlb->max = ARRAY_SIZE(tlb->local);
 | 
			
		||||
| 
						 | 
				
			
			@ -185,7 +186,8 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
 | 
			
		|||
 * collected.
 | 
			
		||||
 */
 | 
			
		||||
static inline void
 | 
			
		||||
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
 | 
			
		||||
arch_tlb_finish_mmu(struct mmu_gather *tlb,
 | 
			
		||||
			unsigned long start, unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	/*
 | 
			
		||||
	 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -47,10 +47,9 @@ struct mmu_table_batch {
 | 
			
		|||
extern void tlb_table_flush(struct mmu_gather *tlb);
 | 
			
		||||
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
 | 
			
		||||
 | 
			
		||||
static inline void tlb_gather_mmu(struct mmu_gather *tlb,
 | 
			
		||||
				  struct mm_struct *mm,
 | 
			
		||||
				  unsigned long start,
 | 
			
		||||
				  unsigned long end)
 | 
			
		||||
static inline void
 | 
			
		||||
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 | 
			
		||||
			unsigned long start, unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	tlb->mm = mm;
 | 
			
		||||
	tlb->start = start;
 | 
			
		||||
| 
						 | 
				
			
			@ -76,8 +75,9 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 | 
			
		|||
	tlb_flush_mmu_free(tlb);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void tlb_finish_mmu(struct mmu_gather *tlb,
 | 
			
		||||
				  unsigned long start, unsigned long end)
 | 
			
		||||
static inline void
 | 
			
		||||
arch_tlb_finish_mmu(struct mmu_gather *tlb,
 | 
			
		||||
		unsigned long start, unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	tlb_flush_mmu(tlb);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -36,7 +36,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 | 
			
		||||
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 | 
			
		||||
		unsigned long start, unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	tlb->mm = mm;
 | 
			
		||||
	tlb->start = start;
 | 
			
		||||
| 
						 | 
				
			
			@ -47,7 +48,8 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
 | 
			
		||||
arch_tlb_finish_mmu(struct mmu_gather *tlb,
 | 
			
		||||
		unsigned long start, unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	if (tlb->fullmm)
 | 
			
		||||
		flush_tlb_mm(tlb->mm);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -45,7 +45,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 | 
			
		||||
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 | 
			
		||||
		unsigned long start, unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	tlb->mm = mm;
 | 
			
		||||
	tlb->start = start;
 | 
			
		||||
| 
						 | 
				
			
			@ -80,12 +81,13 @@ tlb_flush_mmu(struct mmu_gather *tlb)
 | 
			
		|||
	tlb_flush_mmu_free(tlb);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* tlb_finish_mmu
 | 
			
		||||
/* arch_tlb_finish_mmu
 | 
			
		||||
 *	Called at the end of the shootdown operation to free up any resources
 | 
			
		||||
 *	that were required.
 | 
			
		||||
 */
 | 
			
		||||
static inline void
 | 
			
		||||
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
 | 
			
		||||
arch_tlb_finish_mmu(struct mmu_gather *tlb,
 | 
			
		||||
		unsigned long start, unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	tlb_flush_mmu(tlb);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -112,10 +112,11 @@ struct mmu_gather {
 | 
			
		|||
 | 
			
		||||
#define HAVE_GENERIC_MMU_GATHER
 | 
			
		||||
 | 
			
		||||
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
 | 
			
		||||
void arch_tlb_gather_mmu(struct mmu_gather *tlb,
 | 
			
		||||
	struct mm_struct *mm, unsigned long start, unsigned long end);
 | 
			
		||||
void tlb_flush_mmu(struct mmu_gather *tlb);
 | 
			
		||||
void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
 | 
			
		||||
							unsigned long end);
 | 
			
		||||
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
 | 
			
		||||
			 unsigned long start, unsigned long end);
 | 
			
		||||
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
 | 
			
		||||
				   int page_size);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -522,6 +522,12 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
 | 
			
		|||
	return mm->cpu_vm_mask_var;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct mmu_gather;
 | 
			
		||||
extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 | 
			
		||||
				unsigned long start, unsigned long end);
 | 
			
		||||
extern void tlb_finish_mmu(struct mmu_gather *tlb,
 | 
			
		||||
				unsigned long start, unsigned long end);
 | 
			
		||||
 | 
			
		||||
#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
 | 
			
		||||
/*
 | 
			
		||||
 * Memory barriers to keep this state in sync are graciously provided by
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										28
									
								
								mm/memory.c
									
									
									
									
									
								
							
							
						
						
									
										28
									
								
								mm/memory.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -215,12 +215,8 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
 | 
			
		|||
	return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* tlb_gather_mmu
 | 
			
		||||
 *	Called to initialize an (on-stack) mmu_gather structure for page-table
 | 
			
		||||
 *	tear-down from @mm. The @fullmm argument is used when @mm is without
 | 
			
		||||
 *	users and we're going to destroy the full address space (exit/execve).
 | 
			
		||||
 */
 | 
			
		||||
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 | 
			
		||||
void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 | 
			
		||||
				unsigned long start, unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	tlb->mm = mm;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -275,7 +271,8 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
 | 
			
		|||
 *	Called at the end of the shootdown operation to free up any resources
 | 
			
		||||
 *	that were required.
 | 
			
		||||
 */
 | 
			
		||||
void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
 | 
			
		||||
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
 | 
			
		||||
		unsigned long start, unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	struct mmu_gather_batch *batch, *next;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -398,6 +395,23 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
 | 
			
		|||
 | 
			
		||||
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
 | 
			
		||||
 | 
			
		||||
/* tlb_gather_mmu
 | 
			
		||||
 *	Called to initialize an (on-stack) mmu_gather structure for page-table
 | 
			
		||||
 *	tear-down from @mm. The @fullmm argument is used when @mm is without
 | 
			
		||||
 *	users and we're going to destroy the full address space (exit/execve).
 | 
			
		||||
 */
 | 
			
		||||
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 | 
			
		||||
			unsigned long start, unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	arch_tlb_gather_mmu(tlb, mm, start, end);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void tlb_finish_mmu(struct mmu_gather *tlb,
 | 
			
		||||
		unsigned long start, unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	arch_tlb_finish_mmu(tlb, start, end);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Note: this doesn't free the actual pages themselves. That
 | 
			
		||||
 * has been handled earlier when unmapping all the memory regions.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue