mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	x86/mm/tlb: Restructure switch_mm_irqs_off()
Move some code that will be needed for the lazy -> !lazy state transition when a lazy TLB CPU has gotten out of date. No functional changes, since the if (real_prev == next) branch always returns. Suggested-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Rik van Riel <riel@surriel.com> Acked-by: Dave Hansen <dave.hansen@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: efault@gmx.de Cc: kernel-team@fb.com Link: http://lkml.kernel.org/r/20180716190337.26133-4-riel@surriel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
							parent
							
								
									2ff6ddf19c
								
							
						
					
					
						commit
						61d0beb579
					
				
					 1 changed files with 30 additions and 30 deletions
				
			
		| 
						 | 
				
			
			@ -187,6 +187,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
 | 
			
		|||
	u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
 | 
			
		||||
	unsigned cpu = smp_processor_id();
 | 
			
		||||
	u64 next_tlb_gen;
 | 
			
		||||
	bool need_flush;
 | 
			
		||||
	u16 new_asid;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * NB: The scheduler will call us with prev == next when switching
 | 
			
		||||
| 
						 | 
				
			
			@ -252,8 +254,6 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
 | 
			
		|||
 | 
			
		||||
		return;
 | 
			
		||||
	} else {
 | 
			
		||||
		u16 new_asid;
 | 
			
		||||
		bool need_flush;
 | 
			
		||||
		u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
| 
						 | 
				
			
			@ -297,41 +297,41 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
 | 
			
		|||
		next_tlb_gen = atomic64_read(&next->context.tlb_gen);
 | 
			
		||||
 | 
			
		||||
		choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
		if (need_flush) {
 | 
			
		||||
			this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
 | 
			
		||||
			this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
 | 
			
		||||
			load_new_mm_cr3(next->pgd, new_asid, true);
 | 
			
		||||
 | 
			
		||||
			/*
 | 
			
		||||
			 * NB: This gets called via leave_mm() in the idle path
 | 
			
		||||
			 * where RCU functions differently.  Tracing normally
 | 
			
		||||
			 * uses RCU, so we need to use the _rcuidle variant.
 | 
			
		||||
			 *
 | 
			
		||||
			 * (There is no good reason for this.  The idle code should
 | 
			
		||||
			 *  be rearranged to call this before rcu_idle_enter().)
 | 
			
		||||
			 */
 | 
			
		||||
			trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
 | 
			
		||||
		} else {
 | 
			
		||||
			/* The new ASID is already up to date. */
 | 
			
		||||
			load_new_mm_cr3(next->pgd, new_asid, false);
 | 
			
		||||
 | 
			
		||||
			/* See above wrt _rcuidle. */
 | 
			
		||||
			trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
 | 
			
		||||
		}
 | 
			
		||||
	if (need_flush) {
 | 
			
		||||
		this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
 | 
			
		||||
		this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
 | 
			
		||||
		load_new_mm_cr3(next->pgd, new_asid, true);
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * Record last user mm's context id, so we can avoid
 | 
			
		||||
		 * flushing branch buffer with IBPB if we switch back
 | 
			
		||||
		 * to the same user.
 | 
			
		||||
		 * NB: This gets called via leave_mm() in the idle path
 | 
			
		||||
		 * where RCU functions differently.  Tracing normally
 | 
			
		||||
		 * uses RCU, so we need to use the _rcuidle variant.
 | 
			
		||||
		 *
 | 
			
		||||
		 * (There is no good reason for this.  The idle code should
 | 
			
		||||
		 *  be rearranged to call this before rcu_idle_enter().)
 | 
			
		||||
		 */
 | 
			
		||||
		if (next != &init_mm)
 | 
			
		||||
			this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
 | 
			
		||||
		trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
 | 
			
		||||
	} else {
 | 
			
		||||
		/* The new ASID is already up to date. */
 | 
			
		||||
		load_new_mm_cr3(next->pgd, new_asid, false);
 | 
			
		||||
 | 
			
		||||
		this_cpu_write(cpu_tlbstate.loaded_mm, next);
 | 
			
		||||
		this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
 | 
			
		||||
		/* See above wrt _rcuidle. */
 | 
			
		||||
		trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Record last user mm's context id, so we can avoid
 | 
			
		||||
	 * flushing branch buffer with IBPB if we switch back
 | 
			
		||||
	 * to the same user.
 | 
			
		||||
	 */
 | 
			
		||||
	if (next != &init_mm)
 | 
			
		||||
		this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
 | 
			
		||||
 | 
			
		||||
	this_cpu_write(cpu_tlbstate.loaded_mm, next);
 | 
			
		||||
	this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
 | 
			
		||||
 | 
			
		||||
	load_mm_cr4(next);
 | 
			
		||||
	switch_ldt(real_prev, next);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue