mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 08:38:45 +02:00 
			
		
		
		
	 53fbef56e0
			
		
	
	
		53fbef56e0
		
	
	
	
	
		
			
			Patch series "Add and use memdesc_flags_t". At some point struct page will be separated from struct slab and struct folio. This is a step towards that by introducing a type for the 'flags' word of all three structures. This gives us a certain amount of type safety by establishing that some of these unsigned longs are different from other unsigned longs in that they contain things like node ID, section number and zone number in the upper bits. That lets us have functions that can be easily called by anyone who has a slab, folio or page (but not easily by anyone else) to get the node or zone. There's going to be some unusual merge problems with this as some odd bits of the kernel decide they want to print out the flags value or something similar by writing page->flags and now they'll need to write page->flags.f instead. That's most of the churn here. Maybe we should be removing these things from the debug output? This patch (of 11): Wrap the unsigned long flags in a typedef. In upcoming patches, this will provide a strong hint that you can't just pass a random unsigned long to functions which take this as an argument. [willy@infradead.org: s/flags/flags.f/ in several architectures] Link: https://lkml.kernel.org/r/aKMgPRLD-WnkPxYm@casper.infradead.org [nicola.vetrini@gmail.com: mips: fix compilation error] Link: https://lore.kernel.org/lkml/CA+G9fYvkpmqGr6wjBNHY=dRp71PLCoi2341JxOudi60yqaeUdg@mail.gmail.com/ Link: https://lkml.kernel.org/r/20250825214245.1838158-1-nicola.vetrini@gmail.com Link: https://lkml.kernel.org/r/20250805172307.1302730-1-willy@infradead.org Link: https://lkml.kernel.org/r/20250805172307.1302730-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Zi Yan <ziy@nvidia.com> Cc: Shakeel Butt <shakeel.butt@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
		
			
				
	
	
		
			113 lines
		
	
	
	
		
			2.6 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			113 lines
		
	
	
	
		
			2.6 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| // SPDX-License-Identifier: GPL-2.0
 | |
| /*
 | |
|  * linux/mm/mmzone.c
 | |
|  *
 | |
|  * management codes for pgdats, zones and page flags
 | |
|  */
 | |
| 
 | |
| 
 | |
| #include <linux/stddef.h>
 | |
| #include <linux/mm.h>
 | |
| #include <linux/mmzone.h>
 | |
| 
 | |
| struct pglist_data *first_online_pgdat(void)
 | |
| {
 | |
| 	return NODE_DATA(first_online_node);
 | |
| }
 | |
| 
 | |
| struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
 | |
| {
 | |
| 	int nid = next_online_node(pgdat->node_id);
 | |
| 
 | |
| 	if (nid == MAX_NUMNODES)
 | |
| 		return NULL;
 | |
| 	return NODE_DATA(nid);
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * next_zone - helper magic for for_each_zone()
 | |
|  */
 | |
| struct zone *next_zone(struct zone *zone)
 | |
| {
 | |
| 	pg_data_t *pgdat = zone->zone_pgdat;
 | |
| 
 | |
| 	if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
 | |
| 		zone++;
 | |
| 	else {
 | |
| 		pgdat = next_online_pgdat(pgdat);
 | |
| 		if (pgdat)
 | |
| 			zone = pgdat->node_zones;
 | |
| 		else
 | |
| 			zone = NULL;
 | |
| 	}
 | |
| 	return zone;
 | |
| }
 | |
| 
 | |
| static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes)
 | |
| {
 | |
| #ifdef CONFIG_NUMA
 | |
| 	return node_isset(zonelist_node_idx(zref), *nodes);
 | |
| #else
 | |
| 	return 1;
 | |
| #endif /* CONFIG_NUMA */
 | |
| }
 | |
| 
 | |
| /* Returns the next zone at or below highest_zoneidx in a zonelist */
 | |
| struct zoneref *__next_zones_zonelist(struct zoneref *z,
 | |
| 					enum zone_type highest_zoneidx,
 | |
| 					nodemask_t *nodes)
 | |
| {
 | |
| 	/*
 | |
| 	 * Find the next suitable zone to use for the allocation.
 | |
| 	 * Only filter based on nodemask if it's set
 | |
| 	 */
 | |
| 	if (unlikely(nodes == NULL))
 | |
| 		while (zonelist_zone_idx(z) > highest_zoneidx)
 | |
| 			z++;
 | |
| 	else
 | |
| 		while (zonelist_zone_idx(z) > highest_zoneidx ||
 | |
| 				(zonelist_zone(z) && !zref_in_nodemask(z, nodes)))
 | |
| 			z++;
 | |
| 
 | |
| 	return z;
 | |
| }
 | |
| 
 | |
| void lruvec_init(struct lruvec *lruvec)
 | |
| {
 | |
| 	enum lru_list lru;
 | |
| 
 | |
| 	memset(lruvec, 0, sizeof(struct lruvec));
 | |
| 	spin_lock_init(&lruvec->lru_lock);
 | |
| 	zswap_lruvec_state_init(lruvec);
 | |
| 
 | |
| 	for_each_lru(lru)
 | |
| 		INIT_LIST_HEAD(&lruvec->lists[lru]);
 | |
| 	/*
 | |
| 	 * The "Unevictable LRU" is imaginary: though its size is maintained,
 | |
| 	 * it is never scanned, and unevictable pages are not threaded on it
 | |
| 	 * (so that their lru fields can be reused to hold mlock_count).
 | |
| 	 * Poison its list head, so that any operations on it would crash.
 | |
| 	 */
 | |
| 	list_del(&lruvec->lists[LRU_UNEVICTABLE]);
 | |
| 
 | |
| 	lru_gen_init_lruvec(lruvec);
 | |
| }
 | |
| 
 | |
| #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
 | |
| int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
 | |
| {
 | |
| 	unsigned long old_flags, flags;
 | |
| 	int last_cpupid;
 | |
| 
 | |
| 	old_flags = READ_ONCE(folio->flags.f);
 | |
| 	do {
 | |
| 		flags = old_flags;
 | |
| 		last_cpupid = (flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
 | |
| 
 | |
| 		flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
 | |
| 		flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
 | |
| 	} while (unlikely(!try_cmpxchg(&folio->flags.f, &old_flags, flags)));
 | |
| 
 | |
| 	return last_cpupid;
 | |
| }
 | |
| #endif
 |