mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	kmemleak: powerpc: skip scanning holes in the .bss section
Commit 2d4f567103 ("KVM: PPC: Introduce kvm_tmp framework") adds
kvm_tmp[] into the .bss section and then free the rest of unused spaces
back to the page allocator.
kernel_init
  kvm_guest_init
    kvm_free_tmp
      free_reserved_area
        free_unref_page
          free_unref_page_prepare
With DEBUG_PAGEALLOC=y, it will unmap those pages from kernel.  As the
result, kmemleak scan will trigger a panic when it scans the .bss
section with unmapped pages.
This patch creates dedicated kmemleak objects for the .data, .bss and
potentially .data..ro_after_init sections to allow partial freeing via
the kmemleak_free_part() in the powerpc kvm_free_tmp() function.
Link: http://lkml.kernel.org/r/20190321171917.62049-1-catalin.marinas@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Reported-by: Qian Cai <cai@lca.pw>
Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc)
Tested-by: Qian Cai <cai@lca.pw>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Avi Kivity <avi@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krcmar <rkrcmar@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
			
			
This commit is contained in:
		
							parent
							
								
									5f074f3e19
								
							
						
					
					
						commit
						298a32b132
					
				
					 2 changed files with 18 additions and 5 deletions
				
			
		| 
						 | 
				
			
			@ -22,6 +22,7 @@
 | 
			
		|||
#include <linux/kvm_host.h>
 | 
			
		||||
#include <linux/init.h>
 | 
			
		||||
#include <linux/export.h>
 | 
			
		||||
#include <linux/kmemleak.h>
 | 
			
		||||
#include <linux/kvm_para.h>
 | 
			
		||||
#include <linux/slab.h>
 | 
			
		||||
#include <linux/of.h>
 | 
			
		||||
| 
						 | 
				
			
			@ -712,6 +713,12 @@ static void kvm_use_magic_page(void)
 | 
			
		|||
 | 
			
		||||
static __init void kvm_free_tmp(void)
 | 
			
		||||
{
 | 
			
		||||
	/*
 | 
			
		||||
	 * Inform kmemleak about the hole in the .bss section since the
 | 
			
		||||
	 * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y.
 | 
			
		||||
	 */
 | 
			
		||||
	kmemleak_free_part(&kvm_tmp[kvm_tmp_index],
 | 
			
		||||
			   ARRAY_SIZE(kvm_tmp) - kvm_tmp_index);
 | 
			
		||||
	free_reserved_area(&kvm_tmp[kvm_tmp_index],
 | 
			
		||||
			   &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1529,11 +1529,6 @@ static void kmemleak_scan(void)
 | 
			
		|||
	}
 | 
			
		||||
	rcu_read_unlock();
 | 
			
		||||
 | 
			
		||||
	/* data/bss scanning */
 | 
			
		||||
	scan_large_block(_sdata, _edata);
 | 
			
		||||
	scan_large_block(__bss_start, __bss_stop);
 | 
			
		||||
	scan_large_block(__start_ro_after_init, __end_ro_after_init);
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_SMP
 | 
			
		||||
	/* per-cpu sections scanning */
 | 
			
		||||
	for_each_possible_cpu(i)
 | 
			
		||||
| 
						 | 
				
			
			@ -2071,6 +2066,17 @@ void __init kmemleak_init(void)
 | 
			
		|||
	}
 | 
			
		||||
	local_irq_restore(flags);
 | 
			
		||||
 | 
			
		||||
	/* register the data/bss sections */
 | 
			
		||||
	create_object((unsigned long)_sdata, _edata - _sdata,
 | 
			
		||||
		      KMEMLEAK_GREY, GFP_ATOMIC);
 | 
			
		||||
	create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
 | 
			
		||||
		      KMEMLEAK_GREY, GFP_ATOMIC);
 | 
			
		||||
	/* only register .data..ro_after_init if not within .data */
 | 
			
		||||
	if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
 | 
			
		||||
		create_object((unsigned long)__start_ro_after_init,
 | 
			
		||||
			      __end_ro_after_init - __start_ro_after_init,
 | 
			
		||||
			      KMEMLEAK_GREY, GFP_ATOMIC);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * This is the point where tracking allocations is safe. Automatic
 | 
			
		||||
	 * scanning is started during the late initcall. Add the early logged
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue