forked from mirrors/linux
		
	oom: change all_unreclaimable zone member to flags
Convert the int all_unreclaimable member of struct zone to unsigned long flags. This can now be used to specify several different zone flags such as all_unreclaimable and reclaim_in_progress, which can now be removed and converted to a per-zone flag. Flags are set and cleared as follows: zone_set_flag(struct zone *zone, zone_flags_t flag) zone_clear_flag(struct zone *zone, zone_flags_t flag) Defines the first zone flags, ZONE_ALL_UNRECLAIMABLE and ZONE_RECLAIM_LOCKED, which have the same semantics as the old zone->all_unreclaimable and zone->reclaim_in_progress, respectively. Also converts all current users that set or clear either flag to use the new interface. Helper functions are defined to test the flags: int zone_is_all_unreclaimable(const struct zone *zone) int zone_is_reclaim_locked(const struct zone *zone) All flag operators are of the atomic variety because there are currently readers that are implemented that do not take zone->lock. [akpm@linux-foundation.org: add needed include] Cc: Andrea Arcangeli <andrea@suse.de> Acked-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									70e24bdf6d
								
							
						
					
					
						commit
						e815af95f9
					
				
					 4 changed files with 43 additions and 21 deletions
				
			
		|  | @ -7,6 +7,7 @@ | |||
| #include <linux/spinlock.h> | ||||
| #include <linux/list.h> | ||||
| #include <linux/wait.h> | ||||
| #include <linux/bitops.h> | ||||
| #include <linux/cache.h> | ||||
| #include <linux/threads.h> | ||||
| #include <linux/numa.h> | ||||
|  | @ -262,10 +263,7 @@ struct zone { | |||
| 	unsigned long		nr_scan_active; | ||||
| 	unsigned long		nr_scan_inactive; | ||||
| 	unsigned long		pages_scanned;	   /* since last reclaim */ | ||||
| 	int			all_unreclaimable; /* All pages pinned */ | ||||
| 
 | ||||
| 	/* A count of how many reclaimers are scanning this zone */ | ||||
| 	atomic_t		reclaim_in_progress; | ||||
| 	unsigned long		flags;		   /* zone flags, see below */ | ||||
| 
 | ||||
| 	/* Zone statistics */ | ||||
| 	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS]; | ||||
|  | @ -343,6 +341,29 @@ struct zone { | |||
| 	const char		*name; | ||||
| } ____cacheline_internodealigned_in_smp; | ||||
| 
 | ||||
| typedef enum { | ||||
| 	ZONE_ALL_UNRECLAIMABLE,		/* all pages pinned */ | ||||
| 	ZONE_RECLAIM_LOCKED,		/* prevents concurrent reclaim */ | ||||
| } zone_flags_t; | ||||
| 
 | ||||
| static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) | ||||
| { | ||||
| 	set_bit(flag, &zone->flags); | ||||
| } | ||||
| static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) | ||||
| { | ||||
| 	clear_bit(flag, &zone->flags); | ||||
| } | ||||
| 
 | ||||
| static inline int zone_is_all_unreclaimable(const struct zone *zone) | ||||
| { | ||||
| 	return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags); | ||||
| } | ||||
| static inline int zone_is_reclaim_locked(const struct zone *zone) | ||||
| { | ||||
| 	return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * The "priority" of VM scanning is how much of the queues we will scan in one | ||||
|  * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | ||||
|  |  | |||
|  | @ -490,7 +490,7 @@ static void free_pages_bulk(struct zone *zone, int count, | |||
| 					struct list_head *list, int order) | ||||
| { | ||||
| 	spin_lock(&zone->lock); | ||||
| 	zone->all_unreclaimable = 0; | ||||
| 	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); | ||||
| 	zone->pages_scanned = 0; | ||||
| 	while (count--) { | ||||
| 		struct page *page; | ||||
|  | @ -507,7 +507,7 @@ static void free_pages_bulk(struct zone *zone, int count, | |||
| static void free_one_page(struct zone *zone, struct page *page, int order) | ||||
| { | ||||
| 	spin_lock(&zone->lock); | ||||
| 	zone->all_unreclaimable = 0; | ||||
| 	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); | ||||
| 	zone->pages_scanned = 0; | ||||
| 	__free_one_page(page, zone, order); | ||||
| 	spin_unlock(&zone->lock); | ||||
|  | @ -1851,7 +1851,7 @@ void show_free_areas(void) | |||
| 			K(zone_page_state(zone, NR_INACTIVE)), | ||||
| 			K(zone->present_pages), | ||||
| 			zone->pages_scanned, | ||||
| 			(zone->all_unreclaimable ? "yes" : "no") | ||||
| 			(zone_is_all_unreclaimable(zone) ? "yes" : "no") | ||||
| 			); | ||||
| 		printk("lowmem_reserve[]:"); | ||||
| 		for (i = 0; i < MAX_NR_ZONES; i++) | ||||
|  | @ -3372,7 +3372,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat, | |||
| 		zone->nr_scan_active = 0; | ||||
| 		zone->nr_scan_inactive = 0; | ||||
| 		zap_zone_vm_stats(zone); | ||||
| 		atomic_set(&zone->reclaim_in_progress, 0); | ||||
| 		zone->flags = 0; | ||||
| 		if (!size) | ||||
| 			continue; | ||||
| 
 | ||||
|  |  | |||
							
								
								
									
										25
									
								
								mm/vmscan.c
									
									
									
									
									
								
							
							
						
						
									
										25
									
								
								mm/vmscan.c
									
									
									
									
									
								
							|  | @ -1108,7 +1108,7 @@ static unsigned long shrink_zone(int priority, struct zone *zone, | |||
| 	unsigned long nr_to_scan; | ||||
| 	unsigned long nr_reclaimed = 0; | ||||
| 
 | ||||
| 	atomic_inc(&zone->reclaim_in_progress); | ||||
| 	zone_set_flag(zone, ZONE_RECLAIM_LOCKED); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Add one to `nr_to_scan' just to make sure that the kernel will | ||||
|  | @ -1149,7 +1149,7 @@ static unsigned long shrink_zone(int priority, struct zone *zone, | |||
| 
 | ||||
| 	throttle_vm_writeout(sc->gfp_mask); | ||||
| 
 | ||||
| 	atomic_dec(&zone->reclaim_in_progress); | ||||
| 	zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); | ||||
| 	return nr_reclaimed; | ||||
| } | ||||
| 
 | ||||
|  | @ -1187,7 +1187,7 @@ static unsigned long shrink_zones(int priority, struct zone **zones, | |||
| 
 | ||||
| 		note_zone_scanning_priority(zone, priority); | ||||
| 
 | ||||
| 		if (zone->all_unreclaimable && priority != DEF_PRIORITY) | ||||
| 		if (zone_is_all_unreclaimable(zone) && priority != DEF_PRIORITY) | ||||
| 			continue;	/* Let kswapd poll it */ | ||||
| 
 | ||||
| 		sc->all_unreclaimable = 0; | ||||
|  | @ -1368,7 +1368,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order) | |||
| 			if (!populated_zone(zone)) | ||||
| 				continue; | ||||
| 
 | ||||
| 			if (zone->all_unreclaimable && priority != DEF_PRIORITY) | ||||
| 			if (zone_is_all_unreclaimable(zone) && | ||||
| 			    priority != DEF_PRIORITY) | ||||
| 				continue; | ||||
| 
 | ||||
| 			if (!zone_watermark_ok(zone, order, zone->pages_high, | ||||
|  | @ -1403,7 +1404,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order) | |||
| 			if (!populated_zone(zone)) | ||||
| 				continue; | ||||
| 
 | ||||
| 			if (zone->all_unreclaimable && priority != DEF_PRIORITY) | ||||
| 			if (zone_is_all_unreclaimable(zone) && | ||||
| 					priority != DEF_PRIORITY) | ||||
| 				continue; | ||||
| 
 | ||||
| 			if (!zone_watermark_ok(zone, order, zone->pages_high, | ||||
|  | @ -1424,12 +1426,13 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order) | |||
| 						lru_pages); | ||||
| 			nr_reclaimed += reclaim_state->reclaimed_slab; | ||||
| 			total_scanned += sc.nr_scanned; | ||||
| 			if (zone->all_unreclaimable) | ||||
| 			if (zone_is_all_unreclaimable(zone)) | ||||
| 				continue; | ||||
| 			if (nr_slab == 0 && zone->pages_scanned >= | ||||
| 				(zone_page_state(zone, NR_ACTIVE) | ||||
| 				+ zone_page_state(zone, NR_INACTIVE)) * 6) | ||||
| 					zone->all_unreclaimable = 1; | ||||
| 					zone_set_flag(zone, | ||||
| 						      ZONE_ALL_UNRECLAIMABLE); | ||||
| 			/*
 | ||||
| 			 * If we've done a decent amount of scanning and | ||||
| 			 * the reclaim ratio is low, start doing writepage | ||||
|  | @ -1595,7 +1598,7 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio, | |||
| 		if (!populated_zone(zone)) | ||||
| 			continue; | ||||
| 
 | ||||
| 		if (zone->all_unreclaimable && prio != DEF_PRIORITY) | ||||
| 		if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY) | ||||
| 			continue; | ||||
| 
 | ||||
| 		/* For pass = 0 we don't shrink the active list */ | ||||
|  | @ -1919,10 +1922,8 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
| 	 * not have reclaimable pages and if we should not delay the allocation | ||||
| 	 * then do not scan. | ||||
| 	 */ | ||||
| 	if (!(gfp_mask & __GFP_WAIT) || | ||||
| 		zone->all_unreclaimable || | ||||
| 		atomic_read(&zone->reclaim_in_progress) > 0 || | ||||
| 		(current->flags & PF_MEMALLOC)) | ||||
| 	if (!(gfp_mask & __GFP_WAIT) || zone_is_all_unreclaimable(zone) || | ||||
| 		zone_is_reclaim_locked(zone) || (current->flags & PF_MEMALLOC)) | ||||
| 			return 0; | ||||
| 
 | ||||
| 	/*
 | ||||
|  |  | |||
|  | @ -704,7 +704,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, | |||
| 		   "\n  all_unreclaimable: %u" | ||||
| 		   "\n  prev_priority:     %i" | ||||
| 		   "\n  start_pfn:         %lu", | ||||
| 		   zone->all_unreclaimable, | ||||
| 			   zone_is_all_unreclaimable(zone), | ||||
| 		   zone->prev_priority, | ||||
| 		   zone->zone_start_pfn); | ||||
| 	seq_putc(m, '\n'); | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 David Rientjes
						David Rientjes