mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 00:28:52 +02:00 
			
		
		
		
	 e7a5f249e6
			
		
	
	
		e7a5f249e6
		
	
	
	
	
		
			
			If kswapd fails to reclaim pages from a node MAX_RECLAIM_RETRIES in a
row, kswapd on that node gets disabled. That is, the system won't wakeup
kswapd for that node until page reclamation is observed at least once.
That reclamation is mostly done by direct reclaim, which in turn enables
kswapd back.
However, on systems with CXL memory nodes, workloads with high anon page
usage can disable kswapd indefinitely, without triggering direct
reclaim. This can be reproduced with following steps:
   numa node 0   (32GB memory, 48 CPUs)
   numa node 2~5 (512GB CXL memory, 128GB each)
   (numa node 1 is disabled)
   swap space 8GB
   1) Set /sys/kernel/mm/demotion_enabled to 0.
   2) Set /proc/sys/kernel/numa_balancing to 0.
   3) Run a process that allocates and random accesses 500GB of anon
      pages.
   4) Let the process exit normally.
During 3), free memory on node 0 gets lower than low watermark, and
kswapd runs and depletes swap space. Then, kswapd fails consecutively
and gets disabled. Allocation afterwards happens on CXL memory, so node
0 never gains more memory pressure to trigger direct reclaim.
After 4), kswapd on node 0 remains disabled, and tasks running on that
node are unable to swap. If you turn on NUMA_BALANCING_MEMORY_TIERING
and demotion now, it won't work properly since kswapd is disabled.
To mitigate this problem, reset kswapd_failures to 0 on following
conditions:
   a) ZONE_BELOW_HIGH bit of a zone in hopeless node with a fallback
      memory node gets cleared.
   b) demotion_enabled is changed from false to true.
Rationale for a):
   ZONE_BELOW_HIGH bit being cleared might be a sign that the node may
   be reclaimable afterwards. This won't help much if the memory-hungry
   process keeps running without freeing anything, but at least the node
   will go back to reclaimable state when the process exits.
Rationale for b):
   When demotion_enabled is false, kswapd can only reclaim anon pages by
   swapping them out to swap space. If demotion_enabled is turned on,
   kswapd can demote anon pages to another node for reclaiming. So, the
   original failure count for determining reclaimability is no longer
   valid.
Since kswapd_failures resets may be missed by ++ operation, it is
changed from int to atomic_t.
[akpm@linux-foundation.org: tweak whitespace]
Link: https://lkml.kernel.org/r/aL6qGi69jWXfPc4D@pcw-MS-7D22
Signed-off-by: Chanwon Park <flyinrm@gmail.com>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
		
	
			
		
			
				
	
	
		
			461 lines
		
	
	
	
		
			13 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			461 lines
		
	
	
	
		
			13 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| // SPDX-License-Identifier: GPL-2.0-only
 | |
| /*
 | |
|  * Generic show_mem() implementation
 | |
|  *
 | |
|  * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de>
 | |
|  */
 | |
| 
 | |
| #include <linux/blkdev.h>
 | |
| #include <linux/cma.h>
 | |
| #include <linux/cpuset.h>
 | |
| #include <linux/highmem.h>
 | |
| #include <linux/hugetlb.h>
 | |
| #include <linux/mm.h>
 | |
| #include <linux/mmzone.h>
 | |
| #include <linux/swap.h>
 | |
| #include <linux/vmstat.h>
 | |
| 
 | |
| #include "internal.h"
 | |
| #include "swap.h"
 | |
| 
 | |
| atomic_long_t _totalram_pages __read_mostly;
 | |
| EXPORT_SYMBOL(_totalram_pages);
 | |
| unsigned long totalreserve_pages __read_mostly;
 | |
| unsigned long totalcma_pages __read_mostly;
 | |
| 
 | |
| static inline void show_node(struct zone *zone)
 | |
| {
 | |
| 	if (IS_ENABLED(CONFIG_NUMA))
 | |
| 		printk("Node %d ", zone_to_nid(zone));
 | |
| }
 | |
| 
 | |
| long si_mem_available(void)
 | |
| {
 | |
| 	long available;
 | |
| 	unsigned long pagecache;
 | |
| 	unsigned long wmark_low = 0;
 | |
| 	unsigned long reclaimable;
 | |
| 	struct zone *zone;
 | |
| 
 | |
| 	for_each_zone(zone)
 | |
| 		wmark_low += low_wmark_pages(zone);
 | |
| 
 | |
| 	/*
 | |
| 	 * Estimate the amount of memory available for userspace allocations,
 | |
| 	 * without causing swapping or OOM.
 | |
| 	 */
 | |
| 	available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
 | |
| 
 | |
| 	/*
 | |
| 	 * Not all the page cache can be freed, otherwise the system will
 | |
| 	 * start swapping or thrashing. Assume at least half of the page
 | |
| 	 * cache, or the low watermark worth of cache, needs to stay.
 | |
| 	 */
 | |
| 	pagecache = global_node_page_state(NR_ACTIVE_FILE) +
 | |
| 		global_node_page_state(NR_INACTIVE_FILE);
 | |
| 	pagecache -= min(pagecache / 2, wmark_low);
 | |
| 	available += pagecache;
 | |
| 
 | |
| 	/*
 | |
| 	 * Part of the reclaimable slab and other kernel memory consists of
 | |
| 	 * items that are in use, and cannot be freed. Cap this estimate at the
 | |
| 	 * low watermark.
 | |
| 	 */
 | |
| 	reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
 | |
| 		global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
 | |
| 	reclaimable -= min(reclaimable / 2, wmark_low);
 | |
| 	available += reclaimable;
 | |
| 
 | |
| 	if (available < 0)
 | |
| 		available = 0;
 | |
| 	return available;
 | |
| }
 | |
| EXPORT_SYMBOL_GPL(si_mem_available);
 | |
| 
 | |
| void si_meminfo(struct sysinfo *val)
 | |
| {
 | |
| 	val->totalram = totalram_pages();
 | |
| 	val->sharedram = global_node_page_state(NR_SHMEM);
 | |
| 	val->freeram = global_zone_page_state(NR_FREE_PAGES);
 | |
| 	val->bufferram = nr_blockdev_pages();
 | |
| 	val->totalhigh = totalhigh_pages();
 | |
| 	val->freehigh = nr_free_highpages();
 | |
| 	val->mem_unit = PAGE_SIZE;
 | |
| }
 | |
| 
 | |
| EXPORT_SYMBOL(si_meminfo);
 | |
| 
 | |
| #ifdef CONFIG_NUMA
 | |
| void si_meminfo_node(struct sysinfo *val, int nid)
 | |
| {
 | |
| 	int zone_type;		/* needs to be signed */
 | |
| 	unsigned long managed_pages = 0;
 | |
| 	unsigned long managed_highpages = 0;
 | |
| 	unsigned long free_highpages = 0;
 | |
| 	pg_data_t *pgdat = NODE_DATA(nid);
 | |
| 
 | |
| 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
 | |
| 		struct zone *zone = &pgdat->node_zones[zone_type];
 | |
| 		managed_pages += zone_managed_pages(zone);
 | |
| 		if (is_highmem(zone)) {
 | |
| 			managed_highpages += zone_managed_pages(zone);
 | |
| 			free_highpages += zone_page_state(zone, NR_FREE_PAGES);
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	val->totalram = managed_pages;
 | |
| 	val->sharedram = node_page_state(pgdat, NR_SHMEM);
 | |
| 	val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
 | |
| 	val->totalhigh = managed_highpages;
 | |
| 	val->freehigh = free_highpages;
 | |
| 	val->mem_unit = PAGE_SIZE;
 | |
| }
 | |
| #endif
 | |
| 
 | |
| /*
 | |
|  * Determine whether the node should be displayed or not, depending on whether
 | |
|  * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
 | |
|  */
 | |
| static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
 | |
| {
 | |
| 	if (!(flags & SHOW_MEM_FILTER_NODES))
 | |
| 		return false;
 | |
| 
 | |
| 	/*
 | |
| 	 * no node mask - aka implicit memory numa policy. Do not bother with
 | |
| 	 * the synchronization - read_mems_allowed_begin - because we do not
 | |
| 	 * have to be precise here.
 | |
| 	 */
 | |
| 	if (!nodemask)
 | |
| 		nodemask = &cpuset_current_mems_allowed;
 | |
| 
 | |
| 	return !node_isset(nid, *nodemask);
 | |
| }
 | |
| 
 | |
| static void show_migration_types(unsigned char type)
 | |
| {
 | |
| 	static const char types[MIGRATE_TYPES] = {
 | |
| 		[MIGRATE_UNMOVABLE]	= 'U',
 | |
| 		[MIGRATE_MOVABLE]	= 'M',
 | |
| 		[MIGRATE_RECLAIMABLE]	= 'E',
 | |
| 		[MIGRATE_HIGHATOMIC]	= 'H',
 | |
| #ifdef CONFIG_CMA
 | |
| 		[MIGRATE_CMA]		= 'C',
 | |
| #endif
 | |
| #ifdef CONFIG_MEMORY_ISOLATION
 | |
| 		[MIGRATE_ISOLATE]	= 'I',
 | |
| #endif
 | |
| 	};
 | |
| 	char tmp[MIGRATE_TYPES + 1];
 | |
| 	char *p = tmp;
 | |
| 	int i;
 | |
| 
 | |
| 	for (i = 0; i < MIGRATE_TYPES; i++) {
 | |
| 		if (type & (1 << i))
 | |
| 			*p++ = types[i];
 | |
| 	}
 | |
| 
 | |
| 	*p = '\0';
 | |
| 	printk(KERN_CONT "(%s) ", tmp);
 | |
| }
 | |
| 
 | |
| static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx)
 | |
| {
 | |
| 	int zone_idx;
 | |
| 	for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++)
 | |
| 		if (zone_managed_pages(pgdat->node_zones + zone_idx))
 | |
| 			return true;
 | |
| 	return false;
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * Show free area list (used inside shift_scroll-lock stuff)
 | |
|  * We also calculate the percentage fragmentation. We do this by counting the
 | |
|  * memory on each free list with the exception of the first item on the list.
 | |
|  *
 | |
|  * Bits in @filter:
 | |
|  * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
 | |
|  *   cpuset.
 | |
|  */
 | |
| static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
 | |
| {
 | |
| 	unsigned long free_pcp = 0;
 | |
| 	int cpu, nid;
 | |
| 	struct zone *zone;
 | |
| 	pg_data_t *pgdat;
 | |
| 
 | |
| 	for_each_populated_zone(zone) {
 | |
| 		if (zone_idx(zone) > max_zone_idx)
 | |
| 			continue;
 | |
| 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
 | |
| 			continue;
 | |
| 
 | |
| 		for_each_online_cpu(cpu)
 | |
| 			free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
 | |
| 	}
 | |
| 
 | |
| 	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
 | |
| 		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
 | |
| 		" unevictable:%lu dirty:%lu writeback:%lu\n"
 | |
| 		" slab_reclaimable:%lu slab_unreclaimable:%lu\n"
 | |
| 		" mapped:%lu shmem:%lu pagetables:%lu\n"
 | |
| 		" sec_pagetables:%lu bounce:%lu\n"
 | |
| 		" kernel_misc_reclaimable:%lu\n"
 | |
| 		" free:%lu free_pcp:%lu free_cma:%lu\n",
 | |
| 		global_node_page_state(NR_ACTIVE_ANON),
 | |
| 		global_node_page_state(NR_INACTIVE_ANON),
 | |
| 		global_node_page_state(NR_ISOLATED_ANON),
 | |
| 		global_node_page_state(NR_ACTIVE_FILE),
 | |
| 		global_node_page_state(NR_INACTIVE_FILE),
 | |
| 		global_node_page_state(NR_ISOLATED_FILE),
 | |
| 		global_node_page_state(NR_UNEVICTABLE),
 | |
| 		global_node_page_state(NR_FILE_DIRTY),
 | |
| 		global_node_page_state(NR_WRITEBACK),
 | |
| 		global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
 | |
| 		global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
 | |
| 		global_node_page_state(NR_FILE_MAPPED),
 | |
| 		global_node_page_state(NR_SHMEM),
 | |
| 		global_node_page_state(NR_PAGETABLE),
 | |
| 		global_node_page_state(NR_SECONDARY_PAGETABLE),
 | |
| 		0UL,
 | |
| 		global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE),
 | |
| 		global_zone_page_state(NR_FREE_PAGES),
 | |
| 		free_pcp,
 | |
| 		global_zone_page_state(NR_FREE_CMA_PAGES));
 | |
| 
 | |
| 	for_each_online_pgdat(pgdat) {
 | |
| 		if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
 | |
| 			continue;
 | |
| 		if (!node_has_managed_zones(pgdat, max_zone_idx))
 | |
| 			continue;
 | |
| 
 | |
| 		printk("Node %d"
 | |
| 			" active_anon:%lukB"
 | |
| 			" inactive_anon:%lukB"
 | |
| 			" active_file:%lukB"
 | |
| 			" inactive_file:%lukB"
 | |
| 			" unevictable:%lukB"
 | |
| 			" isolated(anon):%lukB"
 | |
| 			" isolated(file):%lukB"
 | |
| 			" mapped:%lukB"
 | |
| 			" dirty:%lukB"
 | |
| 			" writeback:%lukB"
 | |
| 			" shmem:%lukB"
 | |
| #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 | |
| 			" shmem_thp:%lukB"
 | |
| 			" shmem_pmdmapped:%lukB"
 | |
| 			" anon_thp:%lukB"
 | |
| #endif
 | |
| 			" kernel_stack:%lukB"
 | |
| #ifdef CONFIG_SHADOW_CALL_STACK
 | |
| 			" shadow_call_stack:%lukB"
 | |
| #endif
 | |
| 			" pagetables:%lukB"
 | |
| 			" sec_pagetables:%lukB"
 | |
| 			" all_unreclaimable? %s"
 | |
| 			" Balloon:%lukB"
 | |
| 			"\n",
 | |
| 			pgdat->node_id,
 | |
| 			K(node_page_state(pgdat, NR_ACTIVE_ANON)),
 | |
| 			K(node_page_state(pgdat, NR_INACTIVE_ANON)),
 | |
| 			K(node_page_state(pgdat, NR_ACTIVE_FILE)),
 | |
| 			K(node_page_state(pgdat, NR_INACTIVE_FILE)),
 | |
| 			K(node_page_state(pgdat, NR_UNEVICTABLE)),
 | |
| 			K(node_page_state(pgdat, NR_ISOLATED_ANON)),
 | |
| 			K(node_page_state(pgdat, NR_ISOLATED_FILE)),
 | |
| 			K(node_page_state(pgdat, NR_FILE_MAPPED)),
 | |
| 			K(node_page_state(pgdat, NR_FILE_DIRTY)),
 | |
| 			K(node_page_state(pgdat, NR_WRITEBACK)),
 | |
| 			K(node_page_state(pgdat, NR_SHMEM)),
 | |
| #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 | |
| 			K(node_page_state(pgdat, NR_SHMEM_THPS)),
 | |
| 			K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
 | |
| 			K(node_page_state(pgdat, NR_ANON_THPS)),
 | |
| #endif
 | |
| 			node_page_state(pgdat, NR_KERNEL_STACK_KB),
 | |
| #ifdef CONFIG_SHADOW_CALL_STACK
 | |
| 			node_page_state(pgdat, NR_KERNEL_SCS_KB),
 | |
| #endif
 | |
| 			K(node_page_state(pgdat, NR_PAGETABLE)),
 | |
| 			K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
 | |
| 			str_yes_no(atomic_read(&pgdat->kswapd_failures) >=
 | |
| 				   MAX_RECLAIM_RETRIES),
 | |
| 			K(node_page_state(pgdat, NR_BALLOON_PAGES)));
 | |
| 	}
 | |
| 
 | |
| 	for_each_populated_zone(zone) {
 | |
| 		int i;
 | |
| 
 | |
| 		if (zone_idx(zone) > max_zone_idx)
 | |
| 			continue;
 | |
| 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
 | |
| 			continue;
 | |
| 
 | |
| 		free_pcp = 0;
 | |
| 		for_each_online_cpu(cpu)
 | |
| 			free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
 | |
| 
 | |
| 		show_node(zone);
 | |
| 		printk(KERN_CONT
 | |
| 			"%s"
 | |
| 			" free:%lukB"
 | |
| 			" boost:%lukB"
 | |
| 			" min:%lukB"
 | |
| 			" low:%lukB"
 | |
| 			" high:%lukB"
 | |
| 			" reserved_highatomic:%luKB"
 | |
| 			" free_highatomic:%luKB"
 | |
| 			" active_anon:%lukB"
 | |
| 			" inactive_anon:%lukB"
 | |
| 			" active_file:%lukB"
 | |
| 			" inactive_file:%lukB"
 | |
| 			" unevictable:%lukB"
 | |
| 			" writepending:%lukB"
 | |
| 			" zspages:%lukB"
 | |
| 			" present:%lukB"
 | |
| 			" managed:%lukB"
 | |
| 			" mlocked:%lukB"
 | |
| 			" bounce:%lukB"
 | |
| 			" free_pcp:%lukB"
 | |
| 			" local_pcp:%ukB"
 | |
| 			" free_cma:%lukB"
 | |
| 			"\n",
 | |
| 			zone->name,
 | |
| 			K(zone_page_state(zone, NR_FREE_PAGES)),
 | |
| 			K(zone->watermark_boost),
 | |
| 			K(min_wmark_pages(zone)),
 | |
| 			K(low_wmark_pages(zone)),
 | |
| 			K(high_wmark_pages(zone)),
 | |
| 			K(zone->nr_reserved_highatomic),
 | |
| 			K(zone->nr_free_highatomic),
 | |
| 			K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
 | |
| 			K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
 | |
| 			K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
 | |
| 			K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
 | |
| 			K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
 | |
| 			K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
 | |
| #if IS_ENABLED(CONFIG_ZSMALLOC)
 | |
| 			K(zone_page_state(zone, NR_ZSPAGES)),
 | |
| #else
 | |
| 			0UL,
 | |
| #endif
 | |
| 			K(zone->present_pages),
 | |
| 			K(zone_managed_pages(zone)),
 | |
| 			K(zone_page_state(zone, NR_MLOCK)),
 | |
| 			0UL,
 | |
| 			K(free_pcp),
 | |
| 			K(this_cpu_read(zone->per_cpu_pageset->count)),
 | |
| 			K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
 | |
| 		printk("lowmem_reserve[]:");
 | |
| 		for (i = 0; i < MAX_NR_ZONES; i++)
 | |
| 			printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
 | |
| 		printk(KERN_CONT "\n");
 | |
| 	}
 | |
| 
 | |
| 	for_each_populated_zone(zone) {
 | |
| 		unsigned int order;
 | |
| 		unsigned long nr[NR_PAGE_ORDERS], flags, total = 0;
 | |
| 		unsigned char types[NR_PAGE_ORDERS];
 | |
| 
 | |
| 		if (zone_idx(zone) > max_zone_idx)
 | |
| 			continue;
 | |
| 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
 | |
| 			continue;
 | |
| 		show_node(zone);
 | |
| 		printk(KERN_CONT "%s: ", zone->name);
 | |
| 
 | |
| 		spin_lock_irqsave(&zone->lock, flags);
 | |
| 		for (order = 0; order < NR_PAGE_ORDERS; order++) {
 | |
| 			struct free_area *area = &zone->free_area[order];
 | |
| 			int type;
 | |
| 
 | |
| 			nr[order] = area->nr_free;
 | |
| 			total += nr[order] << order;
 | |
| 
 | |
| 			types[order] = 0;
 | |
| 			for (type = 0; type < MIGRATE_TYPES; type++) {
 | |
| 				if (!free_area_empty(area, type))
 | |
| 					types[order] |= 1 << type;
 | |
| 			}
 | |
| 		}
 | |
| 		spin_unlock_irqrestore(&zone->lock, flags);
 | |
| 		for (order = 0; order < NR_PAGE_ORDERS; order++) {
 | |
| 			printk(KERN_CONT "%lu*%lukB ",
 | |
| 			       nr[order], K(1UL) << order);
 | |
| 			if (nr[order])
 | |
| 				show_migration_types(types[order]);
 | |
| 		}
 | |
| 		printk(KERN_CONT "= %lukB\n", K(total));
 | |
| 	}
 | |
| 
 | |
| 	for_each_online_node(nid) {
 | |
| 		if (show_mem_node_skip(filter, nid, nodemask))
 | |
| 			continue;
 | |
| 		hugetlb_show_meminfo_node(nid);
 | |
| 	}
 | |
| 
 | |
| 	printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
 | |
| 
 | |
| 	show_swap_cache_info();
 | |
| }
 | |
| 
 | |
| void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
 | |
| {
 | |
| 	unsigned long total = 0, reserved = 0, highmem = 0;
 | |
| 	struct zone *zone;
 | |
| 
 | |
| 	printk("Mem-Info:\n");
 | |
| 	show_free_areas(filter, nodemask, max_zone_idx);
 | |
| 
 | |
| 	for_each_populated_zone(zone) {
 | |
| 
 | |
| 		total += zone->present_pages;
 | |
| 		reserved += zone->present_pages - zone_managed_pages(zone);
 | |
| 
 | |
| 		if (is_highmem(zone))
 | |
| 			highmem += zone->present_pages;
 | |
| 	}
 | |
| 
 | |
| 	printk("%lu pages RAM\n", total);
 | |
| 	printk("%lu pages HighMem/MovableOnly\n", highmem);
 | |
| 	printk("%lu pages reserved\n", reserved);
 | |
| #ifdef CONFIG_CMA
 | |
| 	printk("%lu pages cma reserved\n", totalcma_pages);
 | |
| #endif
 | |
| #ifdef CONFIG_MEMORY_FAILURE
 | |
| 	printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
 | |
| #endif
 | |
| #ifdef CONFIG_MEM_ALLOC_PROFILING
 | |
| 	static DEFINE_SPINLOCK(mem_alloc_profiling_spinlock);
 | |
| 
 | |
| 	if (spin_trylock(&mem_alloc_profiling_spinlock)) {
 | |
| 		struct codetag_bytes tags[10];
 | |
| 		size_t i, nr;
 | |
| 
 | |
| 		nr = alloc_tag_top_users(tags, ARRAY_SIZE(tags), false);
 | |
| 		if (nr) {
 | |
| 			pr_notice("Memory allocations (profiling is currently turned %s):\n",
 | |
| 				mem_alloc_profiling_enabled() ? "on" : "off");
 | |
| 			for (i = 0; i < nr; i++) {
 | |
| 				struct codetag *ct = tags[i].ct;
 | |
| 				struct alloc_tag *tag = ct_to_alloc_tag(ct);
 | |
| 				struct alloc_tag_counters counter = alloc_tag_read(tag);
 | |
| 				char bytes[10];
 | |
| 
 | |
| 				string_get_size(counter.bytes, 1, STRING_UNITS_2, bytes, sizeof(bytes));
 | |
| 
 | |
| 				/* Same as alloc_tag_to_text() but w/o intermediate buffer */
 | |
| 				if (ct->modname)
 | |
| 					pr_notice("%12s %8llu %s:%u [%s] func:%s\n",
 | |
| 						  bytes, counter.calls, ct->filename,
 | |
| 						  ct->lineno, ct->modname, ct->function);
 | |
| 				else
 | |
| 					pr_notice("%12s %8llu %s:%u func:%s\n",
 | |
| 						  bytes, counter.calls, ct->filename,
 | |
| 						  ct->lineno, ct->function);
 | |
| 			}
 | |
| 		}
 | |
| 		spin_unlock(&mem_alloc_profiling_spinlock);
 | |
| 	}
 | |
| #endif
 | |
| }
 |