forked from mirrors/linux
		
	mm: don't wait on congested zones in balance_pgdat()
From: Zlatko Calusic <zlatko.calusic@iskon.hr>
Commit 92df3a723f ("mm: vmscan: throttle reclaim if encountering too
many dirty pages under writeback") introduced waiting on congested zones
based on a sane algorithm in shrink_inactive_list().
What this means is that there's no more need for throttling and
additional heuristics in balance_pgdat().  So, let's remove it and tidy
up the code.
Signed-off-by: Zlatko Calusic <zlatko.calusic@iskon.hr>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Johannes Weiner <jweiner@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
			
			
This commit is contained in:
		
							parent
							
								
									4db0e950c5
								
							
						
					
					
						commit
						258401a60c
					
				
					 3 changed files with 1 additions and 30 deletions
				
			
		|  | @ -36,7 +36,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | ||||||
| #endif | #endif | ||||||
| 		PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL, | 		PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL, | ||||||
| 		KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, | 		KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, | ||||||
| 		KSWAPD_SKIP_CONGESTION_WAIT, |  | ||||||
| 		PAGEOUTRUN, ALLOCSTALL, PGROTATED, | 		PAGEOUTRUN, ALLOCSTALL, PGROTATED, | ||||||
| #ifdef CONFIG_NUMA_BALANCING | #ifdef CONFIG_NUMA_BALANCING | ||||||
| 		NUMA_PTE_UPDATES, | 		NUMA_PTE_UPDATES, | ||||||
|  |  | ||||||
							
								
								
									
										29
									
								
								mm/vmscan.c
									
									
									
									
									
								
							
							
						
						
									
										29
									
								
								mm/vmscan.c
									
									
									
									
									
								
							|  | @ -2617,7 +2617,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, | ||||||
| 							int *classzone_idx) | 							int *classzone_idx) | ||||||
| { | { | ||||||
| 	bool pgdat_is_balanced = false; | 	bool pgdat_is_balanced = false; | ||||||
| 	struct zone *unbalanced_zone; |  | ||||||
| 	int i; | 	int i; | ||||||
| 	int end_zone = 0;	/* Inclusive.  0 = ZONE_DMA */ | 	int end_zone = 0;	/* Inclusive.  0 = ZONE_DMA */ | ||||||
| 	unsigned long total_scanned; | 	unsigned long total_scanned; | ||||||
|  | @ -2648,9 +2647,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, | ||||||
| 
 | 
 | ||||||
| 	do { | 	do { | ||||||
| 		unsigned long lru_pages = 0; | 		unsigned long lru_pages = 0; | ||||||
| 		int has_under_min_watermark_zone = 0; |  | ||||||
| 
 |  | ||||||
| 		unbalanced_zone = NULL; |  | ||||||
| 
 | 
 | ||||||
| 		/*
 | 		/*
 | ||||||
| 		 * Scan in the highmem->dma direction for the highest | 		 * Scan in the highmem->dma direction for the highest | ||||||
|  | @ -2790,17 +2786,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, | ||||||
| 				continue; | 				continue; | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			if (!zone_balanced(zone, testorder, 0, end_zone)) { | 			if (zone_balanced(zone, testorder, 0, end_zone)) | ||||||
| 				unbalanced_zone = zone; |  | ||||||
| 				/*
 |  | ||||||
| 				 * We are still under min water mark.  This |  | ||||||
| 				 * means that we have a GFP_ATOMIC allocation |  | ||||||
| 				 * failure risk. Hurry up! |  | ||||||
| 				 */ |  | ||||||
| 				if (!zone_watermark_ok_safe(zone, order, |  | ||||||
| 					    min_wmark_pages(zone), end_zone, 0)) |  | ||||||
| 					has_under_min_watermark_zone = 1; |  | ||||||
| 			} else { |  | ||||||
| 				/*
 | 				/*
 | ||||||
| 				 * If a zone reaches its high watermark, | 				 * If a zone reaches its high watermark, | ||||||
| 				 * consider it to be no longer congested. It's | 				 * consider it to be no longer congested. It's | ||||||
|  | @ -2809,8 +2795,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, | ||||||
| 				 * speculatively avoid congestion waits | 				 * speculatively avoid congestion waits | ||||||
| 				 */ | 				 */ | ||||||
| 				zone_clear_flag(zone, ZONE_CONGESTED); | 				zone_clear_flag(zone, ZONE_CONGESTED); | ||||||
| 			} |  | ||||||
| 
 |  | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		/*
 | 		/*
 | ||||||
|  | @ -2827,17 +2811,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, | ||||||
| 			break;		/* kswapd: all done */ | 			break;		/* kswapd: all done */ | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		/*
 |  | ||||||
| 		 * OK, kswapd is getting into trouble.  Take a nap, then take |  | ||||||
| 		 * another pass across the zones. |  | ||||||
| 		 */ |  | ||||||
| 		if (total_scanned && (sc.priority < DEF_PRIORITY - 2)) { |  | ||||||
| 			if (has_under_min_watermark_zone) |  | ||||||
| 				count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT); |  | ||||||
| 			else if (unbalanced_zone) |  | ||||||
| 				wait_iff_congested(unbalanced_zone, BLK_RW_ASYNC, HZ/10); |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		/*
 | 		/*
 | ||||||
| 		 * We do this so kswapd doesn't build up large priorities for | 		 * We do this so kswapd doesn't build up large priorities for | ||||||
| 		 * example when it is freeing in parallel with allocators. It | 		 * example when it is freeing in parallel with allocators. It | ||||||
|  |  | ||||||
|  | @ -770,7 +770,6 @@ const char * const vmstat_text[] = { | ||||||
| 	"kswapd_inodesteal", | 	"kswapd_inodesteal", | ||||||
| 	"kswapd_low_wmark_hit_quickly", | 	"kswapd_low_wmark_hit_quickly", | ||||||
| 	"kswapd_high_wmark_hit_quickly", | 	"kswapd_high_wmark_hit_quickly", | ||||||
| 	"kswapd_skip_congestion_wait", |  | ||||||
| 	"pageoutrun", | 	"pageoutrun", | ||||||
| 	"allocstall", | 	"allocstall", | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Zlatko Calusic
						Zlatko Calusic