forked from mirrors/linux
		
	mm/memremap_pages: support multiple ranges per invocation
In support of device-dax growing the ability to front physically dis-contiguous ranges of memory, update devm_memremap_pages() to track multiple ranges with a single reference counter and devm instance. Convert all [devm_]memremap_pages() users to specify the number of ranges they are mapping in their 'struct dev_pagemap' instance. Signed-off-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Paul Mackerras <paulus@ozlabs.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Vishal Verma <vishal.l.verma@intel.com> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Dave Jiang <dave.jiang@intel.com> Cc: Ben Skeggs <bskeggs@redhat.com> Cc: David Airlie <airlied@linux.ie> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Ira Weiny <ira.weiny@intel.com> Cc: Bjorn Helgaas <bhelgaas@google.com> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Juergen Gross <jgross@suse.com> Cc: Stefano Stabellini <sstabellini@kernel.org> Cc: "Jérôme Glisse" <jglisse@redhat.co Cc: Andy Lutomirski <luto@kernel.org> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brice Goglin <Brice.Goglin@inria.fr> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Hulk Robot <hulkci@huawei.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jason Gunthorpe <jgg@mellanox.com> Cc: Jason Yan <yanaijie@huawei.com> Cc: Jeff Moyer <jmoyer@redhat.com> Cc: "Jérôme Glisse" <jglisse@redhat.com> Cc: Jia He <justin.he@arm.com> Cc: Joao Martins <joao.m.martins@oracle.com> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: kernel test robot <lkp@intel.com> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Pavel Tatashin <pasha.tatashin@soleen.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tom Lendacky <thomas.lendacky@amd.com> Cc: Wei Yang <richard.weiyang@linux.alibaba.com> Cc: Will Deacon <will@kernel.org> Link: https://lkml.kernel.org/r/159643103789.4062302.18426128170217903785.stgit@dwillia2-desk3.amr.corp.intel.com Link: https://lkml.kernel.org/r/160106116293.30709.13350662794915396198.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									a4574f63ed
								
							
						
					
					
						commit
						b7b3c01b19
					
				
					 10 changed files with 174 additions and 118 deletions
				
			
		|  | @ -1172,6 +1172,7 @@ int kvmppc_uvmem_init(void) | |||
| 	kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE; | ||||
| 	kvmppc_uvmem_pgmap.range.start = res->start; | ||||
| 	kvmppc_uvmem_pgmap.range.end = res->end; | ||||
| 	kvmppc_uvmem_pgmap.nr_range = 1; | ||||
| 	kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops; | ||||
| 	/* just one global instance: */ | ||||
| 	kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap; | ||||
|  |  | |||
|  | @ -417,6 +417,7 @@ int dev_dax_probe(struct dev_dax *dev_dax) | |||
| 		if (!pgmap) | ||||
| 			return -ENOMEM; | ||||
| 		pgmap->range = *range; | ||||
| 		pgmap->nr_range = 1; | ||||
| 	} | ||||
| 	pgmap->type = MEMORY_DEVICE_GENERIC; | ||||
| 	addr = devm_memremap_pages(dev, pgmap); | ||||
|  |  | |||
|  | @ -251,6 +251,7 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage) | |||
| 	chunk->pagemap.type = MEMORY_DEVICE_PRIVATE; | ||||
| 	chunk->pagemap.range.start = res->start; | ||||
| 	chunk->pagemap.range.end = res->end; | ||||
| 	chunk->pagemap.nr_range = 1; | ||||
| 	chunk->pagemap.ops = &nouveau_dmem_pagemap_ops; | ||||
| 	chunk->pagemap.owner = drm->dev; | ||||
| 
 | ||||
|  |  | |||
|  | @ -693,6 +693,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) | |||
| 		.start = nsio->res.start + start_pad, | ||||
| 		.end = nsio->res.end - end_trunc, | ||||
| 	}; | ||||
| 	pgmap->nr_range = 1; | ||||
| 	if (nd_pfn->mode == PFN_MODE_RAM) { | ||||
| 		if (offset < reserve) | ||||
| 			return -EINVAL; | ||||
|  |  | |||
|  | @ -441,6 +441,7 @@ static int pmem_attach_disk(struct device *dev, | |||
| 	} else if (pmem_should_map_pages(dev)) { | ||||
| 		pmem->pgmap.range.start = res->start; | ||||
| 		pmem->pgmap.range.end = res->end; | ||||
| 		pmem->pgmap.nr_range = 1; | ||||
| 		pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; | ||||
| 		pmem->pgmap.ops = &fsdax_pagemap_ops; | ||||
| 		addr = devm_memremap_pages(dev, &pmem->pgmap); | ||||
|  |  | |||
|  | @ -187,6 +187,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, | |||
| 	pgmap = &p2p_pgmap->pgmap; | ||||
| 	pgmap->range.start = pci_resource_start(pdev, bar) + offset; | ||||
| 	pgmap->range.end = pgmap->range.start + size - 1; | ||||
| 	pgmap->nr_range = 1; | ||||
| 	pgmap->type = MEMORY_DEVICE_PCI_P2PDMA; | ||||
| 
 | ||||
| 	p2p_pgmap->provider = pdev; | ||||
|  |  | |||
|  | @ -47,6 +47,7 @@ static int fill_list(unsigned int nr_pages) | |||
| 		.start = res->start, | ||||
| 		.end = res->end, | ||||
| 	}; | ||||
| 	pgmap->nr_range = 1; | ||||
| 	pgmap->owner = res; | ||||
| 
 | ||||
| #ifdef CONFIG_XEN_HAVE_PVMMU | ||||
|  |  | |||
|  | @ -94,7 +94,6 @@ struct dev_pagemap_ops { | |||
| /**
 | ||||
|  * struct dev_pagemap - metadata for ZONE_DEVICE mappings | ||||
|  * @altmap: pre-allocated/reserved memory for vmemmap allocations | ||||
|  * @range: physical address range covered by @ref | ||||
|  * @ref: reference count that pins the devm_memremap_pages() mapping | ||||
|  * @internal_ref: internal reference if @ref is not provided by the caller | ||||
|  * @done: completion for @internal_ref | ||||
|  | @ -104,10 +103,12 @@ struct dev_pagemap_ops { | |||
|  * @owner: an opaque pointer identifying the entity that manages this | ||||
|  *	instance.  Used by various helpers to make sure that no | ||||
|  *	foreign ZONE_DEVICE memory is accessed. | ||||
|  * @nr_range: number of ranges to be mapped | ||||
|  * @range: range to be mapped when nr_range == 1 | ||||
|  * @ranges: array of ranges to be mapped when nr_range > 1 | ||||
|  */ | ||||
| struct dev_pagemap { | ||||
| 	struct vmem_altmap altmap; | ||||
| 	struct range range; | ||||
| 	struct percpu_ref *ref; | ||||
| 	struct percpu_ref internal_ref; | ||||
| 	struct completion done; | ||||
|  | @ -115,6 +116,11 @@ struct dev_pagemap { | |||
| 	unsigned int flags; | ||||
| 	const struct dev_pagemap_ops *ops; | ||||
| 	void *owner; | ||||
| 	int nr_range; | ||||
| 	union { | ||||
| 		struct range range; | ||||
| 		struct range ranges[0]; | ||||
| 	}; | ||||
| }; | ||||
| 
 | ||||
| static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap) | ||||
|  |  | |||
|  | @ -472,6 +472,7 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice, | |||
| 	devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; | ||||
| 	devmem->pagemap.range.start = res->start; | ||||
| 	devmem->pagemap.range.end = res->end; | ||||
| 	devmem->pagemap.nr_range = 1; | ||||
| 	devmem->pagemap.ops = &dmirror_devmem_ops; | ||||
| 	devmem->pagemap.owner = mdevice; | ||||
| 
 | ||||
|  |  | |||
							
								
								
									
										276
									
								
								mm/memremap.c
									
									
									
									
									
								
							
							
						
						
									
										276
									
								
								mm/memremap.c
									
									
									
									
									
								
							|  | @ -77,15 +77,19 @@ static void pgmap_array_delete(struct range *range) | |||
| 	synchronize_rcu(); | ||||
| } | ||||
| 
 | ||||
| static unsigned long pfn_first(struct dev_pagemap *pgmap) | ||||
| static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id) | ||||
| { | ||||
| 	return PHYS_PFN(pgmap->range.start) + | ||||
| 		vmem_altmap_offset(pgmap_altmap(pgmap)); | ||||
| 	struct range *range = &pgmap->ranges[range_id]; | ||||
| 	unsigned long pfn = PHYS_PFN(range->start); | ||||
| 
 | ||||
| 	if (range_id) | ||||
| 		return pfn; | ||||
| 	return pfn + vmem_altmap_offset(pgmap_altmap(pgmap)); | ||||
| } | ||||
| 
 | ||||
| static unsigned long pfn_end(struct dev_pagemap *pgmap) | ||||
| static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id) | ||||
| { | ||||
| 	const struct range *range = &pgmap->range; | ||||
| 	const struct range *range = &pgmap->ranges[range_id]; | ||||
| 
 | ||||
| 	return (range->start + range_len(range)) >> PAGE_SHIFT; | ||||
| } | ||||
|  | @ -97,8 +101,8 @@ static unsigned long pfn_next(unsigned long pfn) | |||
| 	return pfn + 1; | ||||
| } | ||||
| 
 | ||||
| #define for_each_device_pfn(pfn, map) \ | ||||
| 	for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn)) | ||||
| #define for_each_device_pfn(pfn, map, i) \ | ||||
| 	for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); pfn = pfn_next(pfn)) | ||||
| 
 | ||||
| static void dev_pagemap_kill(struct dev_pagemap *pgmap) | ||||
| { | ||||
|  | @ -124,20 +128,14 @@ static void dev_pagemap_cleanup(struct dev_pagemap *pgmap) | |||
| 		pgmap->ref = NULL; | ||||
| } | ||||
| 
 | ||||
| void memunmap_pages(struct dev_pagemap *pgmap) | ||||
| static void pageunmap_range(struct dev_pagemap *pgmap, int range_id) | ||||
| { | ||||
| 	struct range *range = &pgmap->range; | ||||
| 	struct range *range = &pgmap->ranges[range_id]; | ||||
| 	struct page *first_page; | ||||
| 	unsigned long pfn; | ||||
| 	int nid; | ||||
| 
 | ||||
| 	dev_pagemap_kill(pgmap); | ||||
| 	for_each_device_pfn(pfn, pgmap) | ||||
| 		put_page(pfn_to_page(pfn)); | ||||
| 	dev_pagemap_cleanup(pgmap); | ||||
| 
 | ||||
| 	/* make sure to access a memmap that was actually initialized */ | ||||
| 	first_page = pfn_to_page(pfn_first(pgmap)); | ||||
| 	first_page = pfn_to_page(pfn_first(pgmap, range_id)); | ||||
| 
 | ||||
| 	/* pages are dead and unused, undo the arch mapping */ | ||||
| 	nid = page_to_nid(first_page); | ||||
|  | @ -157,6 +155,22 @@ void memunmap_pages(struct dev_pagemap *pgmap) | |||
| 
 | ||||
| 	untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range)); | ||||
| 	pgmap_array_delete(range); | ||||
| } | ||||
| 
 | ||||
| void memunmap_pages(struct dev_pagemap *pgmap) | ||||
| { | ||||
| 	unsigned long pfn; | ||||
| 	int i; | ||||
| 
 | ||||
| 	dev_pagemap_kill(pgmap); | ||||
| 	for (i = 0; i < pgmap->nr_range; i++) | ||||
| 		for_each_device_pfn(pfn, pgmap, i) | ||||
| 			put_page(pfn_to_page(pfn)); | ||||
| 	dev_pagemap_cleanup(pgmap); | ||||
| 
 | ||||
| 	for (i = 0; i < pgmap->nr_range; i++) | ||||
| 		pageunmap_range(pgmap, i); | ||||
| 
 | ||||
| 	WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); | ||||
| 	devmap_managed_enable_put(); | ||||
| } | ||||
|  | @ -175,6 +189,114 @@ static void dev_pagemap_percpu_release(struct percpu_ref *ref) | |||
| 	complete(&pgmap->done); | ||||
| } | ||||
| 
 | ||||
| static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params, | ||||
| 		int range_id, int nid) | ||||
| { | ||||
| 	struct range *range = &pgmap->ranges[range_id]; | ||||
| 	struct dev_pagemap *conflict_pgmap; | ||||
| 	int error, is_ram; | ||||
| 
 | ||||
| 	if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0, | ||||
| 				"altmap not supported for multiple ranges\n")) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL); | ||||
| 	if (conflict_pgmap) { | ||||
| 		WARN(1, "Conflicting mapping in same section\n"); | ||||
| 		put_dev_pagemap(conflict_pgmap); | ||||
| 		return -ENOMEM; | ||||
| 	} | ||||
| 
 | ||||
| 	conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL); | ||||
| 	if (conflict_pgmap) { | ||||
| 		WARN(1, "Conflicting mapping in same section\n"); | ||||
| 		put_dev_pagemap(conflict_pgmap); | ||||
| 		return -ENOMEM; | ||||
| 	} | ||||
| 
 | ||||
| 	is_ram = region_intersects(range->start, range_len(range), | ||||
| 		IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); | ||||
| 
 | ||||
| 	if (is_ram != REGION_DISJOINT) { | ||||
| 		WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n", | ||||
| 				is_ram == REGION_MIXED ? "mixed" : "ram", | ||||
| 				range->start, range->end); | ||||
| 		return -ENXIO; | ||||
| 	} | ||||
| 
 | ||||
| 	error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start), | ||||
| 				PHYS_PFN(range->end), pgmap, GFP_KERNEL)); | ||||
| 	if (error) | ||||
| 		return error; | ||||
| 
 | ||||
| 	if (nid < 0) | ||||
| 		nid = numa_mem_id(); | ||||
| 
 | ||||
| 	error = track_pfn_remap(NULL, ¶ms->pgprot, PHYS_PFN(range->start), 0, | ||||
| 			range_len(range)); | ||||
| 	if (error) | ||||
| 		goto err_pfn_remap; | ||||
| 
 | ||||
| 	mem_hotplug_begin(); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * For device private memory we call add_pages() as we only need to | ||||
| 	 * allocate and initialize struct page for the device memory. More- | ||||
| 	 * over the device memory is un-accessible thus we do not want to | ||||
| 	 * create a linear mapping for the memory like arch_add_memory() | ||||
| 	 * would do. | ||||
| 	 * | ||||
| 	 * For all other device memory types, which are accessible by | ||||
| 	 * the CPU, we do want the linear mapping and thus use | ||||
| 	 * arch_add_memory(). | ||||
| 	 */ | ||||
| 	if (pgmap->type == MEMORY_DEVICE_PRIVATE) { | ||||
| 		error = add_pages(nid, PHYS_PFN(range->start), | ||||
| 				PHYS_PFN(range_len(range)), params); | ||||
| 	} else { | ||||
| 		error = kasan_add_zero_shadow(__va(range->start), range_len(range)); | ||||
| 		if (error) { | ||||
| 			mem_hotplug_done(); | ||||
| 			goto err_kasan; | ||||
| 		} | ||||
| 
 | ||||
| 		error = arch_add_memory(nid, range->start, range_len(range), | ||||
| 					params); | ||||
| 	} | ||||
| 
 | ||||
| 	if (!error) { | ||||
| 		struct zone *zone; | ||||
| 
 | ||||
| 		zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; | ||||
| 		move_pfn_range_to_zone(zone, PHYS_PFN(range->start), | ||||
| 				PHYS_PFN(range_len(range)), params->altmap); | ||||
| 	} | ||||
| 
 | ||||
| 	mem_hotplug_done(); | ||||
| 	if (error) | ||||
| 		goto err_add_memory; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Initialization of the pages has been deferred until now in order | ||||
| 	 * to allow us to do the work while not holding the hotplug lock. | ||||
| 	 */ | ||||
| 	memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], | ||||
| 				PHYS_PFN(range->start), | ||||
| 				PHYS_PFN(range_len(range)), pgmap); | ||||
| 	percpu_ref_get_many(pgmap->ref, pfn_end(pgmap, range_id) | ||||
| 			- pfn_first(pgmap, range_id)); | ||||
| 	return 0; | ||||
| 
 | ||||
| err_add_memory: | ||||
| 	kasan_remove_zero_shadow(__va(range->start), range_len(range)); | ||||
| err_kasan: | ||||
| 	untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range)); | ||||
| err_pfn_remap: | ||||
| 	pgmap_array_delete(range); | ||||
| 	return error; | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| /*
 | ||||
|  * Not device managed version of dev_memremap_pages, undone by | ||||
|  * memunmap_pages().  Please use dev_memremap_pages if you have a struct | ||||
|  | @ -182,17 +304,16 @@ static void dev_pagemap_percpu_release(struct percpu_ref *ref) | |||
|  */ | ||||
| void *memremap_pages(struct dev_pagemap *pgmap, int nid) | ||||
| { | ||||
| 	struct range *range = &pgmap->range; | ||||
| 	struct dev_pagemap *conflict_pgmap; | ||||
| 	struct mhp_params params = { | ||||
| 		/*
 | ||||
| 		 * We do not want any optional features only our own memmap | ||||
| 		 */ | ||||
| 		.altmap = pgmap_altmap(pgmap), | ||||
| 		.pgprot = PAGE_KERNEL, | ||||
| 	}; | ||||
| 	int error, is_ram; | ||||
| 	const int nr_range = pgmap->nr_range; | ||||
| 	bool need_devmap_managed = true; | ||||
| 	int error, i; | ||||
| 
 | ||||
| 	if (WARN_ONCE(!nr_range, "nr_range must be specified\n")) | ||||
| 		return ERR_PTR(-EINVAL); | ||||
| 
 | ||||
| 	switch (pgmap->type) { | ||||
| 	case MEMORY_DEVICE_PRIVATE: | ||||
|  | @ -251,107 +372,28 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) | |||
| 			return ERR_PTR(error); | ||||
| 	} | ||||
| 
 | ||||
| 	conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL); | ||||
| 	if (conflict_pgmap) { | ||||
| 		WARN(1, "Conflicting mapping in same section\n"); | ||||
| 		put_dev_pagemap(conflict_pgmap); | ||||
| 		error = -ENOMEM; | ||||
| 		goto err_array; | ||||
| 	} | ||||
| 
 | ||||
| 	conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL); | ||||
| 	if (conflict_pgmap) { | ||||
| 		WARN(1, "Conflicting mapping in same section\n"); | ||||
| 		put_dev_pagemap(conflict_pgmap); | ||||
| 		error = -ENOMEM; | ||||
| 		goto err_array; | ||||
| 	} | ||||
| 
 | ||||
| 	is_ram = region_intersects(range->start, range_len(range), | ||||
| 		IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); | ||||
| 
 | ||||
| 	if (is_ram != REGION_DISJOINT) { | ||||
| 		WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n", | ||||
| 				is_ram == REGION_MIXED ? "mixed" : "ram", | ||||
| 				range->start, range->end); | ||||
| 		error = -ENXIO; | ||||
| 		goto err_array; | ||||
| 	} | ||||
| 
 | ||||
| 	error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start), | ||||
| 				PHYS_PFN(range->end), pgmap, GFP_KERNEL)); | ||||
| 	if (error) | ||||
| 		goto err_array; | ||||
| 
 | ||||
| 	if (nid < 0) | ||||
| 		nid = numa_mem_id(); | ||||
| 
 | ||||
| 	error = track_pfn_remap(NULL, ¶ms.pgprot, PHYS_PFN(range->start), 0, | ||||
| 			range_len(range)); | ||||
| 	if (error) | ||||
| 		goto err_pfn_remap; | ||||
| 
 | ||||
| 	mem_hotplug_begin(); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * For device private memory we call add_pages() as we only need to | ||||
| 	 * allocate and initialize struct page for the device memory. More- | ||||
| 	 * over the device memory is un-accessible thus we do not want to | ||||
| 	 * create a linear mapping for the memory like arch_add_memory() | ||||
| 	 * would do. | ||||
| 	 * | ||||
| 	 * For all other device memory types, which are accessible by | ||||
| 	 * the CPU, we do want the linear mapping and thus use | ||||
| 	 * arch_add_memory(). | ||||
| 	 * Clear the pgmap nr_range as it will be incremented for each | ||||
| 	 * successfully processed range. This communicates how many | ||||
| 	 * regions to unwind in the abort case. | ||||
| 	 */ | ||||
| 	if (pgmap->type == MEMORY_DEVICE_PRIVATE) { | ||||
| 		error = add_pages(nid, PHYS_PFN(range->start), | ||||
| 				PHYS_PFN(range_len(range)), ¶ms); | ||||
| 	} else { | ||||
| 		error = kasan_add_zero_shadow(__va(range->start), range_len(range)); | ||||
| 		if (error) { | ||||
| 			mem_hotplug_done(); | ||||
| 			goto err_kasan; | ||||
| 		} | ||||
| 
 | ||||
| 		error = arch_add_memory(nid, range->start, range_len(range), | ||||
| 					¶ms); | ||||
| 	} | ||||
| 
 | ||||
| 	if (!error) { | ||||
| 		struct zone *zone; | ||||
| 
 | ||||
| 		zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; | ||||
| 		move_pfn_range_to_zone(zone, PHYS_PFN(range->start), | ||||
| 				PHYS_PFN(range_len(range)), params.altmap); | ||||
| 	} | ||||
| 
 | ||||
| 	mem_hotplug_done(); | ||||
| 	pgmap->nr_range = 0; | ||||
| 	error = 0; | ||||
| 	for (i = 0; i < nr_range; i++) { | ||||
| 		error = pagemap_range(pgmap, ¶ms, i, nid); | ||||
| 		if (error) | ||||
| 		goto err_add_memory; | ||||
| 			break; | ||||
| 		pgmap->nr_range++; | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Initialization of the pages has been deferred until now in order | ||||
| 	 * to allow us to do the work while not holding the hotplug lock. | ||||
| 	 */ | ||||
| 	memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], | ||||
| 				PHYS_PFN(range->start), | ||||
| 				PHYS_PFN(range_len(range)), pgmap); | ||||
| 	percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap)); | ||||
| 	return __va(range->start); | ||||
| 
 | ||||
|  err_add_memory: | ||||
| 	kasan_remove_zero_shadow(__va(range->start), range_len(range)); | ||||
|  err_kasan: | ||||
| 	untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range)); | ||||
|  err_pfn_remap: | ||||
| 	pgmap_array_delete(range); | ||||
|  err_array: | ||||
| 	dev_pagemap_kill(pgmap); | ||||
| 	dev_pagemap_cleanup(pgmap); | ||||
| 	devmap_managed_enable_put(); | ||||
| 	if (i < nr_range) { | ||||
| 		memunmap_pages(pgmap); | ||||
| 		pgmap->nr_range = nr_range; | ||||
| 		return ERR_PTR(error); | ||||
| 	} | ||||
| 
 | ||||
| 	return __va(pgmap->ranges[0].start); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(memremap_pages); | ||||
| 
 | ||||
| /**
 | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Dan Williams
						Dan Williams