mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	vmalloc() heavy workloads in UML are extremely slow, due to flushing the entire kernel VM space (flush_tlb_kernel_vm()) on the first segfault. Implement flush_cache_vmap() to avoid that, and while at it also add flush_cache_vunmap() since it's trivial. This speeds up my vmalloc() heavy test of copying files out from /sys/kernel/debug/gcov/ by 30x (from 30s to 1s.) Signed-off-by: Johannes Berg <johannes.berg@intel.com> Acked-By: Anton Ivanov <anton.ivanov@cambridgegreys.com> Signed-off-by: Richard Weinberger <richard@nod.at>
		
			
				
	
	
		
			9 lines
		
	
	
	
		
			257 B
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			9 lines
		
	
	
	
		
			257 B
		
	
	
	
		
			C
		
	
	
	
	
	
#ifndef __UM_ASM_CACHEFLUSH_H
 | 
						|
#define __UM_ASM_CACHEFLUSH_H
 | 
						|
 | 
						|
#include <asm/tlbflush.h>
 | 
						|
#define flush_cache_vmap flush_tlb_kernel_range
 | 
						|
#define flush_cache_vunmap flush_tlb_kernel_range
 | 
						|
 | 
						|
#include <asm-generic/cacheflush.h>
 | 
						|
#endif /* __UM_ASM_CACHEFLUSH_H */
 |