forked from mirrors/linux
		
	mm/memblock.c: skip kmemleak for kasan_init()
Kmemleak does not play well with KASAN (tested on both HPE Apollo 70 and
Huawei TaiShan 2280 aarch64 servers).
After calling start_kernel()->setup_arch()->kasan_init(), kmemleak early
log buffer went from something like 280 to 260000 which caused kmemleak
disabled and crash dump memory reservation failed.  The multitude of
kmemleak_alloc() calls is from nested loops while KASAN is setting up full
memory mappings, so let early kmemleak allocations skip those
memblock_alloc_internal() calls came from kasan_init() given that those
early KASAN memory mappings should not reference to other memory.  Hence,
no kmemleak false positives.
kasan_init
  kasan_map_populate [1]
    kasan_pgd_populate [2]
      kasan_pud_populate [3]
        kasan_pmd_populate [4]
          kasan_pte_populate [5]
            kasan_alloc_zeroed_page
              memblock_alloc_try_nid
                memblock_alloc_internal
                  kmemleak_alloc
[1] for_each_memblock(memory, reg)
[2] while (pgdp++, addr = next, addr != end)
[3] while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)))
[4] while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)))
[5] while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)))
Link: http://lkml.kernel.org/r/1543442925-17794-1-git-send-email-cai@gmx.us
Signed-off-by: Qian Cai <cai@gmx.us>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
			
			
This commit is contained in:
		
							parent
							
								
									65c7878413
								
							
						
					
					
						commit
						fed84c7852
					
				
					 3 changed files with 13 additions and 9 deletions
				
			
		| 
						 | 
					@ -39,7 +39,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
 | 
						void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
 | 
				
			||||||
					      __pa(MAX_DMA_ADDRESS),
 | 
										      __pa(MAX_DMA_ADDRESS),
 | 
				
			||||||
					      MEMBLOCK_ALLOC_ACCESSIBLE, node);
 | 
										      MEMBLOCK_ALLOC_KASAN, node);
 | 
				
			||||||
	return __pa(p);
 | 
						return __pa(p);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -319,6 +319,7 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
 | 
				
			||||||
/* Flags for memblock allocation APIs */
 | 
					/* Flags for memblock allocation APIs */
 | 
				
			||||||
#define MEMBLOCK_ALLOC_ANYWHERE	(~(phys_addr_t)0)
 | 
					#define MEMBLOCK_ALLOC_ANYWHERE	(~(phys_addr_t)0)
 | 
				
			||||||
#define MEMBLOCK_ALLOC_ACCESSIBLE	0
 | 
					#define MEMBLOCK_ALLOC_ACCESSIBLE	0
 | 
				
			||||||
 | 
					#define MEMBLOCK_ALLOC_KASAN		1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* We are using top down, so it is safe to use 0 here */
 | 
					/* We are using top down, so it is safe to use 0 here */
 | 
				
			||||||
#define MEMBLOCK_LOW_LIMIT 0
 | 
					#define MEMBLOCK_LOW_LIMIT 0
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -262,7 +262,8 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
 | 
				
			||||||
	phys_addr_t kernel_end, ret;
 | 
						phys_addr_t kernel_end, ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* pump up @end */
 | 
						/* pump up @end */
 | 
				
			||||||
	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
 | 
						if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
 | 
				
			||||||
 | 
						    end == MEMBLOCK_ALLOC_KASAN)
 | 
				
			||||||
		end = memblock.current_limit;
 | 
							end = memblock.current_limit;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* avoid allocating the first page */
 | 
						/* avoid allocating the first page */
 | 
				
			||||||
| 
						 | 
					@ -1419,11 +1420,13 @@ static void * __init memblock_alloc_internal(
 | 
				
			||||||
done:
 | 
					done:
 | 
				
			||||||
	ptr = phys_to_virt(alloc);
 | 
						ptr = phys_to_virt(alloc);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Skip kmemleak for kasan_init() due to high volume. */
 | 
				
			||||||
 | 
						if (max_addr != MEMBLOCK_ALLOC_KASAN)
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
	 * The min_count is set to 0 so that bootmem allocated blocks
 | 
							 * The min_count is set to 0 so that bootmem allocated
 | 
				
			||||||
	 * are never reported as leaks. This is because many of these blocks
 | 
							 * blocks are never reported as leaks. This is because many
 | 
				
			||||||
	 * are only referred via the physical address which is not
 | 
							 * of these blocks are only referred via the physical
 | 
				
			||||||
	 * looked up by kmemleak.
 | 
							 * address which is not looked up by kmemleak.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		kmemleak_alloc(ptr, size, 0, 0);
 | 
							kmemleak_alloc(ptr, size, 0, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue