forked from mirrors/linux
		
	mm: Make mem_dump_obj() handle vmalloc() memory
This commit adds vmalloc() support to mem_dump_obj(). Note that the vmalloc_dump_obj() function combines the checking and dumping, in contrast with the split between kmem_valid_obj() and kmem_dump_obj(). The reason for the difference is that the checking in the vmalloc() case involves acquiring a global lock, and redundant acquisitions of global locks should be avoided, even on not-so-fast paths. Note that this change causes on-stack variables to be reported as vmalloc() storage from kernel_clone() or similar, depending on the degree of inlining that your compiler does. This is likely more helpful than the earlier "non-paged (local) memory". Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: <linux-mm@kvack.org> Reported-by: Andrii Nakryiko <andrii@kernel.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Tested-by: Naresh Kamboju <naresh.kamboju@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
		
							parent
							
								
									b70fa3b12f
								
							
						
					
					
						commit
						98f180837a
					
				
					 3 changed files with 26 additions and 6 deletions
				
			
		|  | @ -246,4 +246,10 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) | |||
| int register_vmap_purge_notifier(struct notifier_block *nb); | ||||
| int unregister_vmap_purge_notifier(struct notifier_block *nb); | ||||
| 
 | ||||
| #ifdef CONFIG_MMU | ||||
| bool vmalloc_dump_obj(void *object); | ||||
| #else | ||||
| static inline bool vmalloc_dump_obj(void *object) { return false; } | ||||
| #endif | ||||
| 
 | ||||
| #endif /* _LINUX_VMALLOC_H */ | ||||
|  |  | |||
							
								
								
									
										14
									
								
								mm/util.c
									
									
									
									
									
								
							
							
						
						
									
										14
									
								
								mm/util.c
									
									
									
									
									
								
							|  | @ -996,18 +996,20 @@ int __weak memcmp_pages(struct page *page1, struct page *page2) | |||
|  */ | ||||
| void mem_dump_obj(void *object) | ||||
| { | ||||
| 	if (kmem_valid_obj(object)) { | ||||
| 		kmem_dump_obj(object); | ||||
| 		return; | ||||
| 	} | ||||
| 	if (vmalloc_dump_obj(object)) | ||||
| 		return; | ||||
| 	if (!virt_addr_valid(object)) { | ||||
| 		if (object == NULL) | ||||
| 			pr_cont(" NULL pointer.\n"); | ||||
| 		else if (object == ZERO_SIZE_PTR) | ||||
| 			pr_cont(" zero-size pointer.\n"); | ||||
| 		else | ||||
| 			pr_cont(" non-paged (local) memory.\n"); | ||||
| 			pr_cont(" non-paged memory.\n"); | ||||
| 		return; | ||||
| 	} | ||||
| 	if (kmem_valid_obj(object)) { | ||||
| 		kmem_dump_obj(object); | ||||
| 		return; | ||||
| 	} | ||||
| 	pr_cont(" non-slab memory.\n"); | ||||
| 	pr_cont(" non-slab/vmalloc memory.\n"); | ||||
| } | ||||
|  |  | |||
							
								
								
									
										12
									
								
								mm/vmalloc.c
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								mm/vmalloc.c
									
									
									
									
									
								
							|  | @ -3448,6 +3448,18 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) | |||
| } | ||||
| #endif	/* CONFIG_SMP */ | ||||
| 
 | ||||
| bool vmalloc_dump_obj(void *object) | ||||
| { | ||||
| 	struct vm_struct *vm; | ||||
| 	void *objp = (void *)PAGE_ALIGN((unsigned long)object); | ||||
| 
 | ||||
| 	vm = find_vm_area(objp); | ||||
| 	if (!vm) | ||||
| 		return false; | ||||
| 	pr_cont(" vmalloc allocated at %pS\n", vm->caller); | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_PROC_FS | ||||
| static void *s_start(struct seq_file *m, loff_t *pos) | ||||
| 	__acquires(&vmap_purge_lock) | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Paul E. McKenney
						Paul E. McKenney