mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm/tlbbatch: introduce arch_flush_tlb_batched_pending()
Currently we'll flush the mm in flush_tlb_batched_pending() to avoid race between reclaim unmaps pages by batched TLB flush and mprotect/munmap/etc. Other architectures like arm64 may only need a synchronization barrier(dsb) here rather than a full mm flush. So add arch_flush_tlb_batched_pending() to allow an arch-specific implementation here. This intends no functional changes on x86 since still a full mm flush for x86. Link: https://lkml.kernel.org/r/20230717131004.12662-4-yangyicong@huawei.com Signed-off-by: Yicong Yang <yangyicong@hisilicon.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Barry Song <baohua@kernel.org> Cc: Barry Song <v-songbaohua@oppo.com> Cc: Darren Hart <darren@os.amperecomputing.com> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: lipeifeng <lipeifeng@oppo.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Nadav Amit <namit@vmware.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Punit Agrawal <punit.agrawal@bytedance.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Steven Miao <realmz6@gmail.com> Cc: Will Deacon <will@kernel.org> Cc: Xin Hao <xhao@linux.alibaba.com> Cc: Zeng Tao <prime.zeng@hisilicon.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									f73419bb89
								
							
						
					
					
						commit
						db6c1f6f23
					
				
					 2 changed files with 6 additions and 1 deletions
				
			
		| 
						 | 
					@ -284,6 +284,11 @@ static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *b
 | 
				
			||||||
	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
 | 
						cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						flush_tlb_mm(mm);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
 | 
					extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline bool pte_flags_need_flush(unsigned long oldflags,
 | 
					static inline bool pte_flags_need_flush(unsigned long oldflags,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -717,7 +717,7 @@ void flush_tlb_batched_pending(struct mm_struct *mm)
 | 
				
			||||||
	int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT;
 | 
						int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (pending != flushed) {
 | 
						if (pending != flushed) {
 | 
				
			||||||
		flush_tlb_mm(mm);
 | 
							arch_flush_tlb_batched_pending(mm);
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * If the new TLB flushing is pending during flushing, leave
 | 
							 * If the new TLB flushing is pending during flushing, leave
 | 
				
			||||||
		 * mm->tlb_flush_batched as is, to avoid losing flushing.
 | 
							 * mm->tlb_flush_batched as is, to avoid losing flushing.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue