forked from mirrors/linux
		
	mm/sparsemem: support sub-section hotplug
The libnvdimm sub-system has suffered a series of hacks and broken
workarounds for the memory-hotplug implementation's awkward
section-aligned (128MB) granularity.
For example the following backtrace is emitted when attempting
arch_add_memory() with physical address ranges that intersect 'System
RAM' (RAM) with 'Persistent Memory' (PMEM) within a given section:
    # cat /proc/iomem | grep -A1 -B1 Persistent\ Memory
    100000000-1ffffffff : System RAM
    200000000-303ffffff : Persistent Memory (legacy)
    304000000-43fffffff : System RAM
    440000000-23ffffffff : Persistent Memory
    2400000000-43bfffffff : Persistent Memory
      2400000000-43bfffffff : namespace2.0
    WARNING: CPU: 38 PID: 928 at arch/x86/mm/init_64.c:850 add_pages+0x5c/0x60
    [..]
    RIP: 0010:add_pages+0x5c/0x60
    [..]
    Call Trace:
     devm_memremap_pages+0x460/0x6e0
     pmem_attach_disk+0x29e/0x680 [nd_pmem]
     ? nd_dax_probe+0xfc/0x120 [libnvdimm]
     nvdimm_bus_probe+0x66/0x160 [libnvdimm]
It was discovered that the problem goes beyond RAM vs PMEM collisions as
some platform produce PMEM vs PMEM collisions within a given section.
The libnvdimm workaround for that case revealed that the libnvdimm
section-alignment-padding implementation has been broken for a long
while.
A fix for that long-standing breakage introduces as many problems as it
solves as it would require a backward-incompatible change to the
namespace metadata interpretation.  Instead of that dubious route [1],
address the root problem in the memory-hotplug implementation.
Note that EEXIST is no longer treated as success as that is how
sparse_add_section() reports subsection collisions, it was also obviated
by recent changes to perform the request_region() for 'System RAM'
before arch_add_memory() in the add_memory() sequence.
[1] https://lore.kernel.org/r/155000671719.348031.2347363160141119237.stgit@dwillia2-desk3.amr.corp.intel.com
[osalvador@suse.de: fix deactivate_section for early sections]
  Link: http://lkml.kernel.org/r/20190715081549.32577-2-osalvador@suse.de
Link: http://lkml.kernel.org/r/156092354368.979959.6232443923440952359.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Oscar Salvador <osalvador@suse.de>
Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>	[ppc64]
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Cc: Toshi Kani <toshi.kani@hpe.com>
Cc: Wei Yang <richardw.yang@linux.intel.com>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
			
			
This commit is contained in:
		
							parent
							
								
									7ea6216049
								
							
						
					
					
						commit
						ba72b4c8cf
					
				
					 4 changed files with 141 additions and 96 deletions
				
			
		| 
						 | 
				
			
			@ -348,7 +348,7 @@ extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
 | 
			
		|||
extern bool is_memblock_offlined(struct memory_block *mem);
 | 
			
		||||
extern int sparse_add_section(int nid, unsigned long pfn,
 | 
			
		||||
		unsigned long nr_pages, struct vmem_altmap *altmap);
 | 
			
		||||
extern void sparse_remove_one_section(struct mem_section *ms,
 | 
			
		||||
extern void sparse_remove_section(struct mem_section *ms,
 | 
			
		||||
		unsigned long pfn, unsigned long nr_pages,
 | 
			
		||||
		unsigned long map_offset, struct vmem_altmap *altmap);
 | 
			
		||||
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -252,18 +252,6 @@ void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
 | 
			
		|||
}
 | 
			
		||||
#endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
 | 
			
		||||
 | 
			
		||||
static int __meminit __add_section(int nid, unsigned long pfn,
 | 
			
		||||
		unsigned long nr_pages,	struct vmem_altmap *altmap)
 | 
			
		||||
{
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	if (pfn_valid(pfn))
 | 
			
		||||
		return -EEXIST;
 | 
			
		||||
 | 
			
		||||
	ret = sparse_add_section(nid, pfn, nr_pages, altmap);
 | 
			
		||||
	return ret < 0 ? ret : 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int check_pfn_span(unsigned long pfn, unsigned long nr_pages,
 | 
			
		||||
		const char *reason)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -327,18 +315,11 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
 | 
			
		|||
 | 
			
		||||
		pfns = min(nr_pages, PAGES_PER_SECTION
 | 
			
		||||
				- (pfn & ~PAGE_SECTION_MASK));
 | 
			
		||||
		err = __add_section(nid, pfn, pfns, altmap);
 | 
			
		||||
		err = sparse_add_section(nid, pfn, pfns, altmap);
 | 
			
		||||
		if (err)
 | 
			
		||||
			break;
 | 
			
		||||
		pfn += pfns;
 | 
			
		||||
		nr_pages -= pfns;
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * EEXIST is finally dealt with by ioresource collision
 | 
			
		||||
		 * check. see add_memory() => register_memory_resource()
 | 
			
		||||
		 * Warning will be printed if there is collision.
 | 
			
		||||
		 */
 | 
			
		||||
		if (err && (err != -EEXIST))
 | 
			
		||||
			break;
 | 
			
		||||
		err = 0;
 | 
			
		||||
		cond_resched();
 | 
			
		||||
	}
 | 
			
		||||
	vmemmap_populate_print_last();
 | 
			
		||||
| 
						 | 
				
			
			@ -541,7 +522,7 @@ static void __remove_section(struct zone *zone, unsigned long pfn,
 | 
			
		|||
		return;
 | 
			
		||||
 | 
			
		||||
	__remove_zone(zone, pfn, nr_pages);
 | 
			
		||||
	sparse_remove_one_section(ms, pfn, nr_pages, map_offset, altmap);
 | 
			
		||||
	sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -5974,7 +5974,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
 | 
			
		|||
		 * pfn out of zone.
 | 
			
		||||
		 *
 | 
			
		||||
		 * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
 | 
			
		||||
		 * because this is done early in sparse_add_one_section
 | 
			
		||||
		 * because this is done early in section_activate()
 | 
			
		||||
		 */
 | 
			
		||||
		if (!(pfn & (pageblock_nr_pages - 1))) {
 | 
			
		||||
			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										206
									
								
								mm/sparse.c
									
									
									
									
									
								
							
							
						
						
									
										206
									
								
								mm/sparse.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -83,8 +83,15 @@ static int __meminit sparse_index_init(unsigned long section_nr, int nid)
 | 
			
		|||
	unsigned long root = SECTION_NR_TO_ROOT(section_nr);
 | 
			
		||||
	struct mem_section *section;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * An existing section is possible in the sub-section hotplug
 | 
			
		||||
	 * case. First hot-add instantiates, follow-on hot-add reuses
 | 
			
		||||
	 * the existing section.
 | 
			
		||||
	 *
 | 
			
		||||
	 * The mem_hotplug_lock resolves the apparent race below.
 | 
			
		||||
	 */
 | 
			
		||||
	if (mem_section[root])
 | 
			
		||||
		return -EEXIST;
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	section = sparse_index_alloc(nid);
 | 
			
		||||
	if (!section)
 | 
			
		||||
| 
						 | 
				
			
			@ -715,10 +722,120 @@ static void free_map_bootmem(struct page *memmap)
 | 
			
		|||
}
 | 
			
		||||
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
 | 
			
		||||
 | 
			
		||||
static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
 | 
			
		||||
		struct vmem_altmap *altmap)
 | 
			
		||||
{
 | 
			
		||||
	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
 | 
			
		||||
	DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
 | 
			
		||||
	struct mem_section *ms = __pfn_to_section(pfn);
 | 
			
		||||
	bool section_is_early = early_section(ms);
 | 
			
		||||
	struct page *memmap = NULL;
 | 
			
		||||
	unsigned long *subsection_map = ms->usage
 | 
			
		||||
		? &ms->usage->subsection_map[0] : NULL;
 | 
			
		||||
 | 
			
		||||
	subsection_mask_set(map, pfn, nr_pages);
 | 
			
		||||
	if (subsection_map)
 | 
			
		||||
		bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
 | 
			
		||||
 | 
			
		||||
	if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
 | 
			
		||||
				"section already deactivated (%#lx + %ld)\n",
 | 
			
		||||
				pfn, nr_pages))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * There are 3 cases to handle across two configurations
 | 
			
		||||
	 * (SPARSEMEM_VMEMMAP={y,n}):
 | 
			
		||||
	 *
 | 
			
		||||
	 * 1/ deactivation of a partial hot-added section (only possible
 | 
			
		||||
	 * in the SPARSEMEM_VMEMMAP=y case).
 | 
			
		||||
	 *    a/ section was present at memory init
 | 
			
		||||
	 *    b/ section was hot-added post memory init
 | 
			
		||||
	 * 2/ deactivation of a complete hot-added section
 | 
			
		||||
	 * 3/ deactivation of a complete section from memory init
 | 
			
		||||
	 *
 | 
			
		||||
	 * For 1/, when subsection_map does not empty we will not be
 | 
			
		||||
	 * freeing the usage map, but still need to free the vmemmap
 | 
			
		||||
	 * range.
 | 
			
		||||
	 *
 | 
			
		||||
	 * For 2/ and 3/ the SPARSEMEM_VMEMMAP={y,n} cases are unified
 | 
			
		||||
	 */
 | 
			
		||||
	bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
 | 
			
		||||
	if (bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION)) {
 | 
			
		||||
		unsigned long section_nr = pfn_to_section_nr(pfn);
 | 
			
		||||
 | 
			
		||||
		if (!section_is_early) {
 | 
			
		||||
			kfree(ms->usage);
 | 
			
		||||
			ms->usage = NULL;
 | 
			
		||||
		}
 | 
			
		||||
		memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
 | 
			
		||||
		ms->section_mem_map = sparse_encode_mem_map(NULL, section_nr);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (section_is_early && memmap)
 | 
			
		||||
		free_map_bootmem(memmap);
 | 
			
		||||
	else
 | 
			
		||||
		depopulate_section_memmap(pfn, nr_pages, altmap);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static struct page * __meminit section_activate(int nid, unsigned long pfn,
 | 
			
		||||
		unsigned long nr_pages, struct vmem_altmap *altmap)
 | 
			
		||||
{
 | 
			
		||||
	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
 | 
			
		||||
	struct mem_section *ms = __pfn_to_section(pfn);
 | 
			
		||||
	struct mem_section_usage *usage = NULL;
 | 
			
		||||
	unsigned long *subsection_map;
 | 
			
		||||
	struct page *memmap;
 | 
			
		||||
	int rc = 0;
 | 
			
		||||
 | 
			
		||||
	subsection_mask_set(map, pfn, nr_pages);
 | 
			
		||||
 | 
			
		||||
	if (!ms->usage) {
 | 
			
		||||
		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
 | 
			
		||||
		if (!usage)
 | 
			
		||||
			return ERR_PTR(-ENOMEM);
 | 
			
		||||
		ms->usage = usage;
 | 
			
		||||
	}
 | 
			
		||||
	subsection_map = &ms->usage->subsection_map[0];
 | 
			
		||||
 | 
			
		||||
	if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
 | 
			
		||||
		rc = -EINVAL;
 | 
			
		||||
	else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
 | 
			
		||||
		rc = -EEXIST;
 | 
			
		||||
	else
 | 
			
		||||
		bitmap_or(subsection_map, map, subsection_map,
 | 
			
		||||
				SUBSECTIONS_PER_SECTION);
 | 
			
		||||
 | 
			
		||||
	if (rc) {
 | 
			
		||||
		if (usage)
 | 
			
		||||
			ms->usage = NULL;
 | 
			
		||||
		kfree(usage);
 | 
			
		||||
		return ERR_PTR(rc);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * The early init code does not consider partially populated
 | 
			
		||||
	 * initial sections, it simply assumes that memory will never be
 | 
			
		||||
	 * referenced.  If we hot-add memory into such a section then we
 | 
			
		||||
	 * do not need to populate the memmap and can simply reuse what
 | 
			
		||||
	 * is already there.
 | 
			
		||||
	 */
 | 
			
		||||
	if (nr_pages < PAGES_PER_SECTION && early_section(ms))
 | 
			
		||||
		return pfn_to_page(pfn);
 | 
			
		||||
 | 
			
		||||
	memmap = populate_section_memmap(pfn, nr_pages, nid, altmap);
 | 
			
		||||
	if (!memmap) {
 | 
			
		||||
		section_deactivate(pfn, nr_pages, altmap);
 | 
			
		||||
		return ERR_PTR(-ENOMEM);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return memmap;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * sparse_add_one_section - add a memory section
 | 
			
		||||
 * sparse_add_section - add a memory section, or populate an existing one
 | 
			
		||||
 * @nid: The node to add section on
 | 
			
		||||
 * @start_pfn: start pfn of the memory range
 | 
			
		||||
 * @nr_pages: number of pfns to add in the section
 | 
			
		||||
 * @altmap: device page map
 | 
			
		||||
 *
 | 
			
		||||
 * This is only intended for hotplug.
 | 
			
		||||
| 
						 | 
				
			
			@ -732,51 +849,34 @@ int __meminit sparse_add_section(int nid, unsigned long start_pfn,
 | 
			
		|||
		unsigned long nr_pages, struct vmem_altmap *altmap)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long section_nr = pfn_to_section_nr(start_pfn);
 | 
			
		||||
	struct mem_section_usage *usage;
 | 
			
		||||
	struct mem_section *ms;
 | 
			
		||||
	struct page *memmap;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * no locking for this, because it does its own
 | 
			
		||||
	 * plus, it does a kmalloc
 | 
			
		||||
	 */
 | 
			
		||||
	ret = sparse_index_init(section_nr, nid);
 | 
			
		||||
	if (ret < 0 && ret != -EEXIST)
 | 
			
		||||
	if (ret < 0)
 | 
			
		||||
		return ret;
 | 
			
		||||
	ret = 0;
 | 
			
		||||
	memmap = populate_section_memmap(start_pfn, PAGES_PER_SECTION, nid,
 | 
			
		||||
			altmap);
 | 
			
		||||
	if (!memmap)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
	usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
 | 
			
		||||
	if (!usage) {
 | 
			
		||||
		depopulate_section_memmap(start_pfn, PAGES_PER_SECTION, altmap);
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ms = __pfn_to_section(start_pfn);
 | 
			
		||||
	if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
 | 
			
		||||
		ret = -EEXIST;
 | 
			
		||||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
	memmap = section_activate(nid, start_pfn, nr_pages, altmap);
 | 
			
		||||
	if (IS_ERR(memmap))
 | 
			
		||||
		return PTR_ERR(memmap);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Poison uninitialized struct pages in order to catch invalid flags
 | 
			
		||||
	 * combinations.
 | 
			
		||||
	 */
 | 
			
		||||
	page_init_poison(memmap, sizeof(struct page) * PAGES_PER_SECTION);
 | 
			
		||||
	page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages);
 | 
			
		||||
 | 
			
		||||
	ms = __pfn_to_section(start_pfn);
 | 
			
		||||
	set_section_nid(section_nr, nid);
 | 
			
		||||
	section_mark_present(ms);
 | 
			
		||||
	sparse_init_one_section(ms, section_nr, memmap, usage, 0);
 | 
			
		||||
 | 
			
		||||
out:
 | 
			
		||||
	if (ret < 0) {
 | 
			
		||||
		kfree(usage);
 | 
			
		||||
		depopulate_section_memmap(start_pfn, PAGES_PER_SECTION, altmap);
 | 
			
		||||
	}
 | 
			
		||||
	return ret;
 | 
			
		||||
	/* Align memmap to section boundary in the subsection case */
 | 
			
		||||
	if (section_nr_to_pfn(section_nr) != start_pfn)
 | 
			
		||||
		memmap = pfn_to_kaddr(section_nr_to_pfn(section_nr));
 | 
			
		||||
	sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_MEMORY_FAILURE
 | 
			
		||||
| 
						 | 
				
			
			@ -809,48 +909,12 @@ static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
 | 
			
		|||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
static void free_section_usage(struct mem_section *ms, struct page *memmap,
 | 
			
		||||
		struct mem_section_usage *usage, unsigned long pfn,
 | 
			
		||||
		unsigned long nr_pages, struct vmem_altmap *altmap)
 | 
			
		||||
{
 | 
			
		||||
	if (!usage)
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Check to see if allocation came from hot-plug-add
 | 
			
		||||
	 */
 | 
			
		||||
	if (!early_section(ms)) {
 | 
			
		||||
		kfree(usage);
 | 
			
		||||
		if (memmap)
 | 
			
		||||
			depopulate_section_memmap(pfn, nr_pages, altmap);
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * The usemap came from bootmem. This is packed with other usemaps
 | 
			
		||||
	 * on the section which has pgdat at boot time. Just keep it as is now.
 | 
			
		||||
	 */
 | 
			
		||||
 | 
			
		||||
	if (memmap)
 | 
			
		||||
		free_map_bootmem(memmap);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void sparse_remove_one_section(struct mem_section *ms, unsigned long pfn,
 | 
			
		||||
void sparse_remove_section(struct mem_section *ms, unsigned long pfn,
 | 
			
		||||
		unsigned long nr_pages, unsigned long map_offset,
 | 
			
		||||
		struct vmem_altmap *altmap)
 | 
			
		||||
{
 | 
			
		||||
	struct page *memmap = NULL;
 | 
			
		||||
	struct mem_section_usage *usage = NULL;
 | 
			
		||||
 | 
			
		||||
	if (ms->section_mem_map) {
 | 
			
		||||
		usage = ms->usage;
 | 
			
		||||
		memmap = sparse_decode_mem_map(ms->section_mem_map,
 | 
			
		||||
						__section_nr(ms));
 | 
			
		||||
		ms->section_mem_map = 0;
 | 
			
		||||
		ms->usage = NULL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	clear_hwpoisoned_pages(memmap + map_offset, nr_pages - map_offset);
 | 
			
		||||
	free_section_usage(ms, memmap, usage, pfn, nr_pages, altmap);
 | 
			
		||||
	clear_hwpoisoned_pages(pfn_to_page(pfn) + map_offset,
 | 
			
		||||
			nr_pages - map_offset);
 | 
			
		||||
	section_deactivate(pfn, nr_pages, altmap);
 | 
			
		||||
}
 | 
			
		||||
#endif /* CONFIG_MEMORY_HOTPLUG */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue