mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm: add new mmgrab() helper
Apart from adding the helper function itself, the rest of the kernel is converted mechanically using: git grep -l 'atomic_inc.*mm_count' | xargs sed -i 's/atomic_inc(&\(.*\)->mm_count);/mmgrab\(\1\);/' git grep -l 'atomic_inc.*mm_count' | xargs sed -i 's/atomic_inc(&\(.*\)\.mm_count);/mmgrab\(\&\1\);/' This is needed for a later patch that hooks into the helper, but might be a worthwhile cleanup on its own. (Michal Hocko provided most of the kerneldoc comment.) Link: http://lkml.kernel.org/r/20161218123229.22952-1-vegard.nossum@oracle.com Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									522b837c67
								
							
						
					
					
						commit
						f1f1007644
					
				
					 40 changed files with 65 additions and 43 deletions
				
			
		| 
						 | 
					@ -144,7 +144,7 @@ smp_callin(void)
 | 
				
			||||||
		alpha_mv.smp_callin();
 | 
							alpha_mv.smp_callin();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* All kernel threads share the same mm context.  */
 | 
						/* All kernel threads share the same mm context.  */
 | 
				
			||||||
	atomic_inc(&init_mm.mm_count);
 | 
						mmgrab(&init_mm);
 | 
				
			||||||
	current->active_mm = &init_mm;
 | 
						current->active_mm = &init_mm;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* inform the notifiers about the new cpu */
 | 
						/* inform the notifiers about the new cpu */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -140,7 +140,7 @@ void start_kernel_secondary(void)
 | 
				
			||||||
	setup_processor();
 | 
						setup_processor();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	atomic_inc(&mm->mm_users);
 | 
						atomic_inc(&mm->mm_users);
 | 
				
			||||||
	atomic_inc(&mm->mm_count);
 | 
						mmgrab(mm);
 | 
				
			||||||
	current->active_mm = mm;
 | 
						current->active_mm = mm;
 | 
				
			||||||
	cpumask_set_cpu(cpu, mm_cpumask(mm));
 | 
						cpumask_set_cpu(cpu, mm_cpumask(mm));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -371,7 +371,7 @@ asmlinkage void secondary_start_kernel(void)
 | 
				
			||||||
	 * reference and switch to it.
 | 
						 * reference and switch to it.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	cpu = smp_processor_id();
 | 
						cpu = smp_processor_id();
 | 
				
			||||||
	atomic_inc(&mm->mm_count);
 | 
						mmgrab(mm);
 | 
				
			||||||
	current->active_mm = mm;
 | 
						current->active_mm = mm;
 | 
				
			||||||
	cpumask_set_cpu(cpu, mm_cpumask(mm));
 | 
						cpumask_set_cpu(cpu, mm_cpumask(mm));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -222,7 +222,7 @@ asmlinkage void secondary_start_kernel(void)
 | 
				
			||||||
	 * All kernel threads share the same mm context; grab a
 | 
						 * All kernel threads share the same mm context; grab a
 | 
				
			||||||
	 * reference and switch to it.
 | 
						 * reference and switch to it.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	atomic_inc(&mm->mm_count);
 | 
						mmgrab(mm);
 | 
				
			||||||
	current->active_mm = mm;
 | 
						current->active_mm = mm;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -308,7 +308,7 @@ void secondary_start_kernel(void)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Attach the new idle task to the global mm. */
 | 
						/* Attach the new idle task to the global mm. */
 | 
				
			||||||
	atomic_inc(&mm->mm_users);
 | 
						atomic_inc(&mm->mm_users);
 | 
				
			||||||
	atomic_inc(&mm->mm_count);
 | 
						mmgrab(mm);
 | 
				
			||||||
	current->active_mm = mm;
 | 
						current->active_mm = mm;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	preempt_disable();
 | 
						preempt_disable();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -162,7 +162,7 @@ void start_secondary(void)
 | 
				
			||||||
	);
 | 
						);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*  Set the memory struct  */
 | 
						/*  Set the memory struct  */
 | 
				
			||||||
	atomic_inc(&init_mm.mm_count);
 | 
						mmgrab(&init_mm);
 | 
				
			||||||
	current->active_mm = &init_mm;
 | 
						current->active_mm = &init_mm;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cpu = smp_processor_id();
 | 
						cpu = smp_processor_id();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -994,7 +994,7 @@ cpu_init (void)
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	ia64_setreg(_IA64_REG_CR_DCR,  (  IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
 | 
						ia64_setreg(_IA64_REG_CR_DCR,  (  IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
 | 
				
			||||||
					| IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
 | 
										| IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
 | 
				
			||||||
	atomic_inc(&init_mm.mm_count);
 | 
						mmgrab(&init_mm);
 | 
				
			||||||
	current->active_mm = &init_mm;
 | 
						current->active_mm = &init_mm;
 | 
				
			||||||
	BUG_ON(current->mm);
 | 
						BUG_ON(current->mm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -403,7 +403,7 @@ void __init cpu_init (void)
 | 
				
			||||||
	printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
 | 
						printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Set up and load the per-CPU TSS and LDT */
 | 
						/* Set up and load the per-CPU TSS and LDT */
 | 
				
			||||||
	atomic_inc(&init_mm.mm_count);
 | 
						mmgrab(&init_mm);
 | 
				
			||||||
	current->active_mm = &init_mm;
 | 
						current->active_mm = &init_mm;
 | 
				
			||||||
	if (current->mm)
 | 
						if (current->mm)
 | 
				
			||||||
		BUG();
 | 
							BUG();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -345,7 +345,7 @@ asmlinkage void secondary_start_kernel(void)
 | 
				
			||||||
	 * reference and switch to it.
 | 
						 * reference and switch to it.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	atomic_inc(&mm->mm_users);
 | 
						atomic_inc(&mm->mm_users);
 | 
				
			||||||
	atomic_inc(&mm->mm_count);
 | 
						mmgrab(mm);
 | 
				
			||||||
	current->active_mm = mm;
 | 
						current->active_mm = mm;
 | 
				
			||||||
	cpumask_set_cpu(cpu, mm_cpumask(mm));
 | 
						cpumask_set_cpu(cpu, mm_cpumask(mm));
 | 
				
			||||||
	enter_lazy_tlb(mm, current);
 | 
						enter_lazy_tlb(mm, current);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2232,7 +2232,7 @@ void per_cpu_trap_init(bool is_boot_cpu)
 | 
				
			||||||
	if (!cpu_data[cpu].asid_cache)
 | 
						if (!cpu_data[cpu].asid_cache)
 | 
				
			||||||
		cpu_data[cpu].asid_cache = asid_first_version(cpu);
 | 
							cpu_data[cpu].asid_cache = asid_first_version(cpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	atomic_inc(&init_mm.mm_count);
 | 
						mmgrab(&init_mm);
 | 
				
			||||||
	current->active_mm = &init_mm;
 | 
						current->active_mm = &init_mm;
 | 
				
			||||||
	BUG_ON(current->mm);
 | 
						BUG_ON(current->mm);
 | 
				
			||||||
	enter_lazy_tlb(&init_mm, current);
 | 
						enter_lazy_tlb(&init_mm, current);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -589,7 +589,7 @@ static void __init smp_cpu_init(void)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
 | 
						printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	atomic_inc(&init_mm.mm_count);
 | 
						mmgrab(&init_mm);
 | 
				
			||||||
	current->active_mm = &init_mm;
 | 
						current->active_mm = &init_mm;
 | 
				
			||||||
	BUG_ON(current->mm);
 | 
						BUG_ON(current->mm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -279,7 +279,7 @@ smp_cpu_init(int cpunum)
 | 
				
			||||||
	set_cpu_online(cpunum, true);
 | 
						set_cpu_online(cpunum, true);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Initialise the idle task for this CPU */
 | 
						/* Initialise the idle task for this CPU */
 | 
				
			||||||
	atomic_inc(&init_mm.mm_count);
 | 
						mmgrab(&init_mm);
 | 
				
			||||||
	current->active_mm = &init_mm;
 | 
						current->active_mm = &init_mm;
 | 
				
			||||||
	BUG_ON(current->mm);
 | 
						BUG_ON(current->mm);
 | 
				
			||||||
	enter_lazy_tlb(&init_mm, current);
 | 
						enter_lazy_tlb(&init_mm, current);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -707,7 +707,7 @@ void start_secondary(void *unused)
 | 
				
			||||||
	unsigned int cpu = smp_processor_id();
 | 
						unsigned int cpu = smp_processor_id();
 | 
				
			||||||
	int i, base;
 | 
						int i, base;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	atomic_inc(&init_mm.mm_count);
 | 
						mmgrab(&init_mm);
 | 
				
			||||||
	current->active_mm = &init_mm;
 | 
						current->active_mm = &init_mm;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	smp_store_cpu_info(cpu);
 | 
						smp_store_cpu_info(cpu);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -73,7 +73,7 @@ void cpu_init(void)
 | 
				
			||||||
	get_cpu_id(id);
 | 
						get_cpu_id(id);
 | 
				
			||||||
	if (machine_has_cpu_mhz)
 | 
						if (machine_has_cpu_mhz)
 | 
				
			||||||
		update_cpu_mhz(NULL);
 | 
							update_cpu_mhz(NULL);
 | 
				
			||||||
	atomic_inc(&init_mm.mm_count);
 | 
						mmgrab(&init_mm);
 | 
				
			||||||
	current->active_mm = &init_mm;
 | 
						current->active_mm = &init_mm;
 | 
				
			||||||
	BUG_ON(current->mm);
 | 
						BUG_ON(current->mm);
 | 
				
			||||||
	enter_lazy_tlb(&init_mm, current);
 | 
						enter_lazy_tlb(&init_mm, current);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -336,7 +336,7 @@ void __init trap_init(void)
 | 
				
			||||||
	set_except_vector(18, handle_dbe);
 | 
						set_except_vector(18, handle_dbe);
 | 
				
			||||||
	flush_icache_range(DEBUG_VECTOR_BASE_ADDR, IRQ_VECTOR_BASE_ADDR);
 | 
						flush_icache_range(DEBUG_VECTOR_BASE_ADDR, IRQ_VECTOR_BASE_ADDR);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	atomic_inc(&init_mm.mm_count);
 | 
						mmgrab(&init_mm);
 | 
				
			||||||
	current->active_mm = &init_mm;
 | 
						current->active_mm = &init_mm;
 | 
				
			||||||
	cpu_cache_init();
 | 
						cpu_cache_init();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -178,7 +178,7 @@ asmlinkage void start_secondary(void)
 | 
				
			||||||
	struct mm_struct *mm = &init_mm;
 | 
						struct mm_struct *mm = &init_mm;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	enable_mmu();
 | 
						enable_mmu();
 | 
				
			||||||
	atomic_inc(&mm->mm_count);
 | 
						mmgrab(mm);
 | 
				
			||||||
	atomic_inc(&mm->mm_users);
 | 
						atomic_inc(&mm->mm_users);
 | 
				
			||||||
	current->active_mm = mm;
 | 
						current->active_mm = mm;
 | 
				
			||||||
#ifdef CONFIG_MMU
 | 
					#ifdef CONFIG_MMU
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -93,7 +93,7 @@ void leon_cpu_pre_online(void *arg)
 | 
				
			||||||
			     : "memory" /* paranoid */);
 | 
								     : "memory" /* paranoid */);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Attach to the address space of init_task. */
 | 
						/* Attach to the address space of init_task. */
 | 
				
			||||||
	atomic_inc(&init_mm.mm_count);
 | 
						mmgrab(&init_mm);
 | 
				
			||||||
	current->active_mm = &init_mm;
 | 
						current->active_mm = &init_mm;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
 | 
						while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -122,7 +122,7 @@ void smp_callin(void)
 | 
				
			||||||
	current_thread_info()->new_child = 0;
 | 
						current_thread_info()->new_child = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Attach to the address space of init_task. */
 | 
						/* Attach to the address space of init_task. */
 | 
				
			||||||
	atomic_inc(&init_mm.mm_count);
 | 
						mmgrab(&init_mm);
 | 
				
			||||||
	current->active_mm = &init_mm;
 | 
						current->active_mm = &init_mm;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* inform the notifiers about the new cpu */
 | 
						/* inform the notifiers about the new cpu */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -93,7 +93,7 @@ void sun4d_cpu_pre_online(void *arg)
 | 
				
			||||||
	show_leds(cpuid);
 | 
						show_leds(cpuid);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Attach to the address space of init_task. */
 | 
						/* Attach to the address space of init_task. */
 | 
				
			||||||
	atomic_inc(&init_mm.mm_count);
 | 
						mmgrab(&init_mm);
 | 
				
			||||||
	current->active_mm = &init_mm;
 | 
						current->active_mm = &init_mm;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	local_ops->cache_all();
 | 
						local_ops->cache_all();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -59,7 +59,7 @@ void sun4m_cpu_pre_online(void *arg)
 | 
				
			||||||
			     : "memory" /* paranoid */);
 | 
								     : "memory" /* paranoid */);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Attach to the address space of init_task. */
 | 
						/* Attach to the address space of init_task. */
 | 
				
			||||||
	atomic_inc(&init_mm.mm_count);
 | 
						mmgrab(&init_mm);
 | 
				
			||||||
	current->active_mm = &init_mm;
 | 
						current->active_mm = &init_mm;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
 | 
						while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -448,7 +448,7 @@ void trap_init(void)
 | 
				
			||||||
		thread_info_offsets_are_bolixed_pete();
 | 
							thread_info_offsets_are_bolixed_pete();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Attach to the address space of init_task. */
 | 
						/* Attach to the address space of init_task. */
 | 
				
			||||||
	atomic_inc(&init_mm.mm_count);
 | 
						mmgrab(&init_mm);
 | 
				
			||||||
	current->active_mm = &init_mm;
 | 
						current->active_mm = &init_mm;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* NOTE: Other cpus have this done as they are started
 | 
						/* NOTE: Other cpus have this done as they are started
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2837,6 +2837,6 @@ void __init trap_init(void)
 | 
				
			||||||
	/* Attach to the address space of init_task.  On SMP we
 | 
						/* Attach to the address space of init_task.  On SMP we
 | 
				
			||||||
	 * do this in smp.c:smp_callin for other cpus.
 | 
						 * do this in smp.c:smp_callin for other cpus.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	atomic_inc(&init_mm.mm_count);
 | 
						mmgrab(&init_mm);
 | 
				
			||||||
	current->active_mm = &init_mm;
 | 
						current->active_mm = &init_mm;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -160,7 +160,7 @@ static void start_secondary(void)
 | 
				
			||||||
	__this_cpu_write(current_asid, min_asid);
 | 
						__this_cpu_write(current_asid, min_asid);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Set up this thread as another owner of the init_mm */
 | 
						/* Set up this thread as another owner of the init_mm */
 | 
				
			||||||
	atomic_inc(&init_mm.mm_count);
 | 
						mmgrab(&init_mm);
 | 
				
			||||||
	current->active_mm = &init_mm;
 | 
						current->active_mm = &init_mm;
 | 
				
			||||||
	if (current->mm)
 | 
						if (current->mm)
 | 
				
			||||||
		BUG();
 | 
							BUG();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1510,7 +1510,7 @@ void cpu_init(void)
 | 
				
			||||||
	for (i = 0; i <= IO_BITMAP_LONGS; i++)
 | 
						for (i = 0; i <= IO_BITMAP_LONGS; i++)
 | 
				
			||||||
		t->io_bitmap[i] = ~0UL;
 | 
							t->io_bitmap[i] = ~0UL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	atomic_inc(&init_mm.mm_count);
 | 
						mmgrab(&init_mm);
 | 
				
			||||||
	me->active_mm = &init_mm;
 | 
						me->active_mm = &init_mm;
 | 
				
			||||||
	BUG_ON(me->mm);
 | 
						BUG_ON(me->mm);
 | 
				
			||||||
	enter_lazy_tlb(&init_mm, me);
 | 
						enter_lazy_tlb(&init_mm, me);
 | 
				
			||||||
| 
						 | 
					@ -1561,7 +1561,7 @@ void cpu_init(void)
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Set up and load the per-CPU TSS and LDT
 | 
						 * Set up and load the per-CPU TSS and LDT
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	atomic_inc(&init_mm.mm_count);
 | 
						mmgrab(&init_mm);
 | 
				
			||||||
	curr->active_mm = &init_mm;
 | 
						curr->active_mm = &init_mm;
 | 
				
			||||||
	BUG_ON(curr->mm);
 | 
						BUG_ON(curr->mm);
 | 
				
			||||||
	enter_lazy_tlb(&init_mm, curr);
 | 
						enter_lazy_tlb(&init_mm, curr);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -136,7 +136,7 @@ void secondary_start_kernel(void)
 | 
				
			||||||
	/* All kernel threads share the same mm context. */
 | 
						/* All kernel threads share the same mm context. */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	atomic_inc(&mm->mm_users);
 | 
						atomic_inc(&mm->mm_users);
 | 
				
			||||||
	atomic_inc(&mm->mm_count);
 | 
						mmgrab(mm);
 | 
				
			||||||
	current->active_mm = mm;
 | 
						current->active_mm = mm;
 | 
				
			||||||
	cpumask_set_cpu(cpu, mm_cpumask(mm));
 | 
						cpumask_set_cpu(cpu, mm_cpumask(mm));
 | 
				
			||||||
	enter_lazy_tlb(mm, current);
 | 
						enter_lazy_tlb(mm, current);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -262,7 +262,7 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
 | 
				
			||||||
	 * and because the mmu_notifier_unregister function also drop
 | 
						 * and because the mmu_notifier_unregister function also drop
 | 
				
			||||||
	 * mm_count we need to take an extra count here.
 | 
						 * mm_count we need to take an extra count here.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	atomic_inc(&p->mm->mm_count);
 | 
						mmgrab(p->mm);
 | 
				
			||||||
	mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm);
 | 
						mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm);
 | 
				
			||||||
	mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
 | 
						mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -334,7 +334,7 @@ i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
 | 
				
			||||||
		mm->i915 = to_i915(obj->base.dev);
 | 
							mm->i915 = to_i915(obj->base.dev);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		mm->mm = current->mm;
 | 
							mm->mm = current->mm;
 | 
				
			||||||
		atomic_inc(¤t->mm->mm_count);
 | 
							mmgrab(current->mm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		mm->mn = NULL;
 | 
							mm->mn = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -185,7 +185,7 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
 | 
				
			||||||
	if (fd) {
 | 
						if (fd) {
 | 
				
			||||||
		fd->rec_cpu_num = -1; /* no cpu affinity by default */
 | 
							fd->rec_cpu_num = -1; /* no cpu affinity by default */
 | 
				
			||||||
		fd->mm = current->mm;
 | 
							fd->mm = current->mm;
 | 
				
			||||||
		atomic_inc(&fd->mm->mm_count);
 | 
							mmgrab(fd->mm);
 | 
				
			||||||
		fp->private_data = fd;
 | 
							fp->private_data = fd;
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		fp->private_data = NULL;
 | 
							fp->private_data = NULL;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -766,7 +766,7 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!IS_ERR_OR_NULL(mm)) {
 | 
							if (!IS_ERR_OR_NULL(mm)) {
 | 
				
			||||||
			/* ensure this mm_struct can't be freed */
 | 
								/* ensure this mm_struct can't be freed */
 | 
				
			||||||
			atomic_inc(&mm->mm_count);
 | 
								mmgrab(mm);
 | 
				
			||||||
			/* but do not pin its memory */
 | 
								/* but do not pin its memory */
 | 
				
			||||||
			mmput(mm);
 | 
								mmput(mm);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					@ -1064,7 +1064,7 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
 | 
				
			||||||
		if (p) {
 | 
							if (p) {
 | 
				
			||||||
			if (atomic_read(&p->mm->mm_users) > 1) {
 | 
								if (atomic_read(&p->mm->mm_users) > 1) {
 | 
				
			||||||
				mm = p->mm;
 | 
									mm = p->mm;
 | 
				
			||||||
				atomic_inc(&mm->mm_count);
 | 
									mmgrab(mm);
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			task_unlock(p);
 | 
								task_unlock(p);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1847,7 +1847,7 @@ static struct file *userfaultfd_file_create(int flags)
 | 
				
			||||||
	ctx->released = false;
 | 
						ctx->released = false;
 | 
				
			||||||
	ctx->mm = current->mm;
 | 
						ctx->mm = current->mm;
 | 
				
			||||||
	/* prevent the mm struct to be freed */
 | 
						/* prevent the mm struct to be freed */
 | 
				
			||||||
	atomic_inc(&ctx->mm->mm_count);
 | 
						mmgrab(ctx->mm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx,
 | 
						file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx,
 | 
				
			||||||
				  O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
 | 
									  O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2904,6 +2904,28 @@ static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
extern struct mm_struct * mm_alloc(void);
 | 
					extern struct mm_struct * mm_alloc(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * mmgrab() - Pin a &struct mm_struct.
 | 
				
			||||||
 | 
					 * @mm: The &struct mm_struct to pin.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Make sure that @mm will not get freed even after the owning task
 | 
				
			||||||
 | 
					 * exits. This doesn't guarantee that the associated address space
 | 
				
			||||||
 | 
					 * will still exist later on and mmget_not_zero() has to be used before
 | 
				
			||||||
 | 
					 * accessing it.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * This is a preferred way to to pin @mm for a longer/unbounded amount
 | 
				
			||||||
 | 
					 * of time.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Use mmdrop() to release the reference acquired by mmgrab().
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
 | 
				
			||||||
 | 
					 * of &mm_struct.mm_count vs &mm_struct.mm_users.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static inline void mmgrab(struct mm_struct *mm)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						atomic_inc(&mm->mm_count);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* mmdrop drops the mm and the page tables */
 | 
					/* mmdrop drops the mm and the page tables */
 | 
				
			||||||
extern void __mmdrop(struct mm_struct *);
 | 
					extern void __mmdrop(struct mm_struct *);
 | 
				
			||||||
static inline void mmdrop(struct mm_struct *mm)
 | 
					static inline void mmdrop(struct mm_struct *mm)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -539,7 +539,7 @@ static void exit_mm(void)
 | 
				
			||||||
		__set_current_state(TASK_RUNNING);
 | 
							__set_current_state(TASK_RUNNING);
 | 
				
			||||||
		down_read(&mm->mmap_sem);
 | 
							down_read(&mm->mmap_sem);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	atomic_inc(&mm->mm_count);
 | 
						mmgrab(mm);
 | 
				
			||||||
	BUG_ON(mm != current->active_mm);
 | 
						BUG_ON(mm != current->active_mm);
 | 
				
			||||||
	/* more a memory barrier than a real lock */
 | 
						/* more a memory barrier than a real lock */
 | 
				
			||||||
	task_lock(current);
 | 
						task_lock(current);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -338,7 +338,7 @@ static inline bool should_fail_futex(bool fshared)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void futex_get_mm(union futex_key *key)
 | 
					static inline void futex_get_mm(union futex_key *key)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	atomic_inc(&key->private.mm->mm_count);
 | 
						mmgrab(key->private.mm);
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Ensure futex_get_mm() implies a full barrier such that
 | 
						 * Ensure futex_get_mm() implies a full barrier such that
 | 
				
			||||||
	 * get_futex_key() implies a full barrier. This is relied upon
 | 
						 * get_futex_key() implies a full barrier. This is relied upon
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2847,7 +2847,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!mm) {
 | 
						if (!mm) {
 | 
				
			||||||
		next->active_mm = oldmm;
 | 
							next->active_mm = oldmm;
 | 
				
			||||||
		atomic_inc(&oldmm->mm_count);
 | 
							mmgrab(oldmm);
 | 
				
			||||||
		enter_lazy_tlb(oldmm, next);
 | 
							enter_lazy_tlb(oldmm, next);
 | 
				
			||||||
	} else
 | 
						} else
 | 
				
			||||||
		switch_mm_irqs_off(oldmm, mm, next);
 | 
							switch_mm_irqs_off(oldmm, mm, next);
 | 
				
			||||||
| 
						 | 
					@ -6098,7 +6098,7 @@ void __init sched_init(void)
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * The boot idle thread does lazy MMU switching as well:
 | 
						 * The boot idle thread does lazy MMU switching as well:
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	atomic_inc(&init_mm.mm_count);
 | 
						mmgrab(&init_mm);
 | 
				
			||||||
	enter_lazy_tlb(&init_mm, current);
 | 
						enter_lazy_tlb(&init_mm, current);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -420,7 +420,7 @@ int __khugepaged_enter(struct mm_struct *mm)
 | 
				
			||||||
	list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
 | 
						list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
 | 
				
			||||||
	spin_unlock(&khugepaged_mm_lock);
 | 
						spin_unlock(&khugepaged_mm_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	atomic_inc(&mm->mm_count);
 | 
						mmgrab(mm);
 | 
				
			||||||
	if (wakeup)
 | 
						if (wakeup)
 | 
				
			||||||
		wake_up_interruptible(&khugepaged_wait);
 | 
							wake_up_interruptible(&khugepaged_wait);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										2
									
								
								mm/ksm.c
									
									
									
									
									
								
							
							
						
						
									
										2
									
								
								mm/ksm.c
									
									
									
									
									
								
							| 
						 | 
					@ -1854,7 +1854,7 @@ int __ksm_enter(struct mm_struct *mm)
 | 
				
			||||||
	spin_unlock(&ksm_mmlist_lock);
 | 
						spin_unlock(&ksm_mmlist_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	set_bit(MMF_VM_MERGEABLE, &mm->flags);
 | 
						set_bit(MMF_VM_MERGEABLE, &mm->flags);
 | 
				
			||||||
	atomic_inc(&mm->mm_count);
 | 
						mmgrab(mm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (needs_wakeup)
 | 
						if (needs_wakeup)
 | 
				
			||||||
		wake_up_interruptible(&ksm_thread_wait);
 | 
							wake_up_interruptible(&ksm_thread_wait);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -25,7 +25,7 @@ void use_mm(struct mm_struct *mm)
 | 
				
			||||||
	task_lock(tsk);
 | 
						task_lock(tsk);
 | 
				
			||||||
	active_mm = tsk->active_mm;
 | 
						active_mm = tsk->active_mm;
 | 
				
			||||||
	if (active_mm != mm) {
 | 
						if (active_mm != mm) {
 | 
				
			||||||
		atomic_inc(&mm->mm_count);
 | 
							mmgrab(mm);
 | 
				
			||||||
		tsk->active_mm = mm;
 | 
							tsk->active_mm = mm;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	tsk->mm = mm;
 | 
						tsk->mm = mm;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -275,7 +275,7 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
 | 
				
			||||||
		mm->mmu_notifier_mm = mmu_notifier_mm;
 | 
							mm->mmu_notifier_mm = mmu_notifier_mm;
 | 
				
			||||||
		mmu_notifier_mm = NULL;
 | 
							mmu_notifier_mm = NULL;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	atomic_inc(&mm->mm_count);
 | 
						mmgrab(mm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Serialize the update against mmu_notifier_unregister. A
 | 
						 * Serialize the update against mmu_notifier_unregister. A
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -653,7 +653,7 @@ static void mark_oom_victim(struct task_struct *tsk)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* oom_mm is bound to the signal struct life time. */
 | 
						/* oom_mm is bound to the signal struct life time. */
 | 
				
			||||||
	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
 | 
						if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
 | 
				
			||||||
		atomic_inc(&tsk->signal->oom_mm->mm_count);
 | 
							mmgrab(tsk->signal->oom_mm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Make sure that the task is woken up from uninterruptible sleep
 | 
						 * Make sure that the task is woken up from uninterruptible sleep
 | 
				
			||||||
| 
						 | 
					@ -870,7 +870,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Get a reference to safely compare mm after task_unlock(victim) */
 | 
						/* Get a reference to safely compare mm after task_unlock(victim) */
 | 
				
			||||||
	mm = victim->mm;
 | 
						mm = victim->mm;
 | 
				
			||||||
	atomic_inc(&mm->mm_count);
 | 
						mmgrab(mm);
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * We should send SIGKILL before setting TIF_MEMDIE in order to prevent
 | 
						 * We should send SIGKILL before setting TIF_MEMDIE in order to prevent
 | 
				
			||||||
	 * the OOM victim from depleting the memory reserves from the user
 | 
						 * the OOM victim from depleting the memory reserves from the user
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -611,7 +611,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
 | 
				
			||||||
		return ERR_PTR(-ENOMEM);
 | 
							return ERR_PTR(-ENOMEM);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock_init(&kvm->mmu_lock);
 | 
						spin_lock_init(&kvm->mmu_lock);
 | 
				
			||||||
	atomic_inc(¤t->mm->mm_count);
 | 
						mmgrab(current->mm);
 | 
				
			||||||
	kvm->mm = current->mm;
 | 
						kvm->mm = current->mm;
 | 
				
			||||||
	kvm_eventfd_init(kvm);
 | 
						kvm_eventfd_init(kvm);
 | 
				
			||||||
	mutex_init(&kvm->lock);
 | 
						mutex_init(&kvm->lock);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue