mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	Commit 682a3385e7 ("mm, page_alloc: inline the fast path of the
zonelist iterator") changed how next_zones_zonelist() is called, by
adding a static inline function to do the fast path.  This function
adds:
       if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
               return z;
       return __next_zones_zonelist(z, highest_zoneidx, nodes);
Where __next_zones_zonelist() is only called when nodes is not NULL or
zonelist_zone_idx(z) is less than highest_zoneidx.
The original next_zone_zonelist() was converted to __next_zones_zonelist()
but it still maintained:
	if (likely(nodes == NULL))
Which is now actually a very unlikely, as it is only called with nodes
equal to NULL when zonelist_zone_idx(z) is greater than highest_zoneidx.
Before this commit, this if had this statistic:
 correct incorrect  %        Function                  File              Line
 ------- ---------  -        --------                  ----              ----
  837895   446078  34 next_zones_zonelist            mmzone.c             63
After this commit, it has:
 correct incorrect  %        Function                  File              Line
 ------- ---------  -        --------                  ----              ----
      10   173840  99 __next_zones_zonelist          mmzone.c             63
Thus, the if statement is now much more unlikely than it ever was as a
likely.
Link: http://lkml.kernel.org/r/20170105200102.77989567@gandalf.local.home
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
	
			
		
			
				
	
	
		
			114 lines
		
	
	
	
		
			2.4 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			114 lines
		
	
	
	
		
			2.4 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/*
 | 
						|
 * linux/mm/mmzone.c
 | 
						|
 *
 | 
						|
 * management codes for pgdats, zones and page flags
 | 
						|
 */
 | 
						|
 | 
						|
 | 
						|
#include <linux/stddef.h>
 | 
						|
#include <linux/mm.h>
 | 
						|
#include <linux/mmzone.h>
 | 
						|
 | 
						|
struct pglist_data *first_online_pgdat(void)
 | 
						|
{
 | 
						|
	return NODE_DATA(first_online_node);
 | 
						|
}
 | 
						|
 | 
						|
struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
 | 
						|
{
 | 
						|
	int nid = next_online_node(pgdat->node_id);
 | 
						|
 | 
						|
	if (nid == MAX_NUMNODES)
 | 
						|
		return NULL;
 | 
						|
	return NODE_DATA(nid);
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * next_zone - helper magic for for_each_zone()
 | 
						|
 */
 | 
						|
struct zone *next_zone(struct zone *zone)
 | 
						|
{
 | 
						|
	pg_data_t *pgdat = zone->zone_pgdat;
 | 
						|
 | 
						|
	if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
 | 
						|
		zone++;
 | 
						|
	else {
 | 
						|
		pgdat = next_online_pgdat(pgdat);
 | 
						|
		if (pgdat)
 | 
						|
			zone = pgdat->node_zones;
 | 
						|
		else
 | 
						|
			zone = NULL;
 | 
						|
	}
 | 
						|
	return zone;
 | 
						|
}
 | 
						|
 | 
						|
static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes)
 | 
						|
{
 | 
						|
#ifdef CONFIG_NUMA
 | 
						|
	return node_isset(zonelist_node_idx(zref), *nodes);
 | 
						|
#else
 | 
						|
	return 1;
 | 
						|
#endif /* CONFIG_NUMA */
 | 
						|
}
 | 
						|
 | 
						|
/* Returns the next zone at or below highest_zoneidx in a zonelist */
 | 
						|
struct zoneref *__next_zones_zonelist(struct zoneref *z,
 | 
						|
					enum zone_type highest_zoneidx,
 | 
						|
					nodemask_t *nodes)
 | 
						|
{
 | 
						|
	/*
 | 
						|
	 * Find the next suitable zone to use for the allocation.
 | 
						|
	 * Only filter based on nodemask if it's set
 | 
						|
	 */
 | 
						|
	if (unlikely(nodes == NULL))
 | 
						|
		while (zonelist_zone_idx(z) > highest_zoneidx)
 | 
						|
			z++;
 | 
						|
	else
 | 
						|
		while (zonelist_zone_idx(z) > highest_zoneidx ||
 | 
						|
				(z->zone && !zref_in_nodemask(z, nodes)))
 | 
						|
			z++;
 | 
						|
 | 
						|
	return z;
 | 
						|
}
 | 
						|
 | 
						|
#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
 | 
						|
bool memmap_valid_within(unsigned long pfn,
 | 
						|
					struct page *page, struct zone *zone)
 | 
						|
{
 | 
						|
	if (page_to_pfn(page) != pfn)
 | 
						|
		return false;
 | 
						|
 | 
						|
	if (page_zone(page) != zone)
 | 
						|
		return false;
 | 
						|
 | 
						|
	return true;
 | 
						|
}
 | 
						|
#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
 | 
						|
 | 
						|
void lruvec_init(struct lruvec *lruvec)
 | 
						|
{
 | 
						|
	enum lru_list lru;
 | 
						|
 | 
						|
	memset(lruvec, 0, sizeof(struct lruvec));
 | 
						|
 | 
						|
	for_each_lru(lru)
 | 
						|
		INIT_LIST_HEAD(&lruvec->lists[lru]);
 | 
						|
}
 | 
						|
 | 
						|
#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
 | 
						|
int page_cpupid_xchg_last(struct page *page, int cpupid)
 | 
						|
{
 | 
						|
	unsigned long old_flags, flags;
 | 
						|
	int last_cpupid;
 | 
						|
 | 
						|
	do {
 | 
						|
		old_flags = flags = page->flags;
 | 
						|
		last_cpupid = page_cpupid_last(page);
 | 
						|
 | 
						|
		flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
 | 
						|
		flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
 | 
						|
	} while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
 | 
						|
 | 
						|
	return last_cpupid;
 | 
						|
}
 | 
						|
#endif
 |