mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	sched/core: Add switch_mm_irqs_off() and use it in the scheduler
By default, this is the same thing as switch_mm(). x86 will override it as an optimization. Signed-off-by: Andy Lutomirski <luto@kernel.org> Reviewed-by: Borislav Petkov <bp@suse.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/df401df47bdd6be3e389c6f1e3f5310d70e81b2c.1461688545.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
							parent
							
								
									8efd755ac2
								
							
						
					
					
						commit
						f98db6013c
					
				
					 2 changed files with 10 additions and 3 deletions
				
			
		| 
						 | 
					@ -1,9 +1,16 @@
 | 
				
			||||||
#ifndef _LINUX_MMU_CONTEXT_H
 | 
					#ifndef _LINUX_MMU_CONTEXT_H
 | 
				
			||||||
#define _LINUX_MMU_CONTEXT_H
 | 
					#define _LINUX_MMU_CONTEXT_H
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <asm/mmu_context.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct mm_struct;
 | 
					struct mm_struct;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void use_mm(struct mm_struct *mm);
 | 
					void use_mm(struct mm_struct *mm);
 | 
				
			||||||
void unuse_mm(struct mm_struct *mm);
 | 
					void unuse_mm(struct mm_struct *mm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* Architectures that care about IRQ state in switch_mm can override this. */
 | 
				
			||||||
 | 
					#ifndef switch_mm_irqs_off
 | 
				
			||||||
 | 
					# define switch_mm_irqs_off switch_mm
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -33,7 +33,7 @@
 | 
				
			||||||
#include <linux/init.h>
 | 
					#include <linux/init.h>
 | 
				
			||||||
#include <linux/uaccess.h>
 | 
					#include <linux/uaccess.h>
 | 
				
			||||||
#include <linux/highmem.h>
 | 
					#include <linux/highmem.h>
 | 
				
			||||||
#include <asm/mmu_context.h>
 | 
					#include <linux/mmu_context.h>
 | 
				
			||||||
#include <linux/interrupt.h>
 | 
					#include <linux/interrupt.h>
 | 
				
			||||||
#include <linux/capability.h>
 | 
					#include <linux/capability.h>
 | 
				
			||||||
#include <linux/completion.h>
 | 
					#include <linux/completion.h>
 | 
				
			||||||
| 
						 | 
					@ -2733,7 +2733,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
 | 
				
			||||||
		atomic_inc(&oldmm->mm_count);
 | 
							atomic_inc(&oldmm->mm_count);
 | 
				
			||||||
		enter_lazy_tlb(oldmm, next);
 | 
							enter_lazy_tlb(oldmm, next);
 | 
				
			||||||
	} else
 | 
						} else
 | 
				
			||||||
		switch_mm(oldmm, mm, next);
 | 
							switch_mm_irqs_off(oldmm, mm, next);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!prev->mm) {
 | 
						if (!prev->mm) {
 | 
				
			||||||
		prev->active_mm = NULL;
 | 
							prev->active_mm = NULL;
 | 
				
			||||||
| 
						 | 
					@ -5274,7 +5274,7 @@ void idle_task_exit(void)
 | 
				
			||||||
	BUG_ON(cpu_online(smp_processor_id()));
 | 
						BUG_ON(cpu_online(smp_processor_id()));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (mm != &init_mm) {
 | 
						if (mm != &init_mm) {
 | 
				
			||||||
		switch_mm(mm, &init_mm, current);
 | 
							switch_mm_irqs_off(mm, &init_mm, current);
 | 
				
			||||||
		finish_arch_post_lock_switch();
 | 
							finish_arch_post_lock_switch();
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	mmdrop(mm);
 | 
						mmdrop(mm);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue