forked from mirrors/linux
		
	memremap: provide an optional internal refcount in struct dev_pagemap
Provide an internal refcounting logic if no ->ref field is provided in the pagemap passed into devm_memremap_pages so that callers don't have to reinvent it poorly. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Tested-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
		
							parent
							
								
									514caf23a7
								
							
						
					
					
						commit
						24917f6b10
					
				
					 3 changed files with 100 additions and 24 deletions
				
			
		|  | @ -95,6 +95,8 @@ struct dev_pagemap_ops { | ||||||
|  * @altmap: pre-allocated/reserved memory for vmemmap allocations |  * @altmap: pre-allocated/reserved memory for vmemmap allocations | ||||||
|  * @res: physical address range covered by @ref |  * @res: physical address range covered by @ref | ||||||
|  * @ref: reference count that pins the devm_memremap_pages() mapping |  * @ref: reference count that pins the devm_memremap_pages() mapping | ||||||
|  |  * @internal_ref: internal reference if @ref is not provided by the caller | ||||||
|  |  * @done: completion for @internal_ref | ||||||
|  * @dev: host device of the mapping for debug |  * @dev: host device of the mapping for debug | ||||||
|  * @data: private data pointer for page_free() |  * @data: private data pointer for page_free() | ||||||
|  * @type: memory type: see MEMORY_* in memory_hotplug.h |  * @type: memory type: see MEMORY_* in memory_hotplug.h | ||||||
|  | @ -105,6 +107,8 @@ struct dev_pagemap { | ||||||
| 	struct vmem_altmap altmap; | 	struct vmem_altmap altmap; | ||||||
| 	struct resource res; | 	struct resource res; | ||||||
| 	struct percpu_ref *ref; | 	struct percpu_ref *ref; | ||||||
|  | 	struct percpu_ref internal_ref; | ||||||
|  | 	struct completion done; | ||||||
| 	struct device *dev; | 	struct device *dev; | ||||||
| 	enum memory_type type; | 	enum memory_type type; | ||||||
| 	unsigned int flags; | 	unsigned int flags; | ||||||
|  |  | ||||||
|  | @ -29,7 +29,7 @@ static void devmap_managed_enable_put(void *data) | ||||||
| 
 | 
 | ||||||
| static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap) | static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap) | ||||||
| { | { | ||||||
| 	if (!pgmap->ops->page_free) { | 	if (!pgmap->ops || !pgmap->ops->page_free) { | ||||||
| 		WARN(1, "Missing page_free method\n"); | 		WARN(1, "Missing page_free method\n"); | ||||||
| 		return -EINVAL; | 		return -EINVAL; | ||||||
| 	} | 	} | ||||||
|  | @ -75,6 +75,24 @@ static unsigned long pfn_next(unsigned long pfn) | ||||||
| #define for_each_device_pfn(pfn, map) \ | #define for_each_device_pfn(pfn, map) \ | ||||||
| 	for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn)) | 	for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn)) | ||||||
| 
 | 
 | ||||||
|  | static void dev_pagemap_kill(struct dev_pagemap *pgmap) | ||||||
|  | { | ||||||
|  | 	if (pgmap->ops && pgmap->ops->kill) | ||||||
|  | 		pgmap->ops->kill(pgmap); | ||||||
|  | 	else | ||||||
|  | 		percpu_ref_kill(pgmap->ref); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static void dev_pagemap_cleanup(struct dev_pagemap *pgmap) | ||||||
|  | { | ||||||
|  | 	if (pgmap->ops && pgmap->ops->cleanup) { | ||||||
|  | 		pgmap->ops->cleanup(pgmap); | ||||||
|  | 	} else { | ||||||
|  | 		wait_for_completion(&pgmap->done); | ||||||
|  | 		percpu_ref_exit(pgmap->ref); | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
| static void devm_memremap_pages_release(void *data) | static void devm_memremap_pages_release(void *data) | ||||||
| { | { | ||||||
| 	struct dev_pagemap *pgmap = data; | 	struct dev_pagemap *pgmap = data; | ||||||
|  | @ -84,10 +102,10 @@ static void devm_memremap_pages_release(void *data) | ||||||
| 	unsigned long pfn; | 	unsigned long pfn; | ||||||
| 	int nid; | 	int nid; | ||||||
| 
 | 
 | ||||||
| 	pgmap->ops->kill(pgmap); | 	dev_pagemap_kill(pgmap); | ||||||
| 	for_each_device_pfn(pfn, pgmap) | 	for_each_device_pfn(pfn, pgmap) | ||||||
| 		put_page(pfn_to_page(pfn)); | 		put_page(pfn_to_page(pfn)); | ||||||
| 	pgmap->ops->cleanup(pgmap); | 	dev_pagemap_cleanup(pgmap); | ||||||
| 
 | 
 | ||||||
| 	/* pages are dead and unused, undo the arch mapping */ | 	/* pages are dead and unused, undo the arch mapping */ | ||||||
| 	align_start = res->start & ~(SECTION_SIZE - 1); | 	align_start = res->start & ~(SECTION_SIZE - 1); | ||||||
|  | @ -114,20 +132,29 @@ static void devm_memremap_pages_release(void *data) | ||||||
| 		      "%s: failed to free all reserved pages\n", __func__); | 		      "%s: failed to free all reserved pages\n", __func__); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | static void dev_pagemap_percpu_release(struct percpu_ref *ref) | ||||||
|  | { | ||||||
|  | 	struct dev_pagemap *pgmap = | ||||||
|  | 		container_of(ref, struct dev_pagemap, internal_ref); | ||||||
|  | 
 | ||||||
|  | 	complete(&pgmap->done); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| /**
 | /**
 | ||||||
|  * devm_memremap_pages - remap and provide memmap backing for the given resource |  * devm_memremap_pages - remap and provide memmap backing for the given resource | ||||||
|  * @dev: hosting device for @res |  * @dev: hosting device for @res | ||||||
|  * @pgmap: pointer to a struct dev_pagemap |  * @pgmap: pointer to a struct dev_pagemap | ||||||
|  * |  * | ||||||
|  * Notes: |  * Notes: | ||||||
|  * 1/ At a minimum the res, ref and type and ops members of @pgmap must be |  * 1/ At a minimum the res and type members of @pgmap must be initialized | ||||||
|  *    initialized by the caller before passing it to this function |  *    by the caller before passing it to this function | ||||||
|  * |  * | ||||||
|  * 2/ The altmap field may optionally be initialized, in which case |  * 2/ The altmap field may optionally be initialized, in which case | ||||||
|  *    PGMAP_ALTMAP_VALID must be set in pgmap->flags. |  *    PGMAP_ALTMAP_VALID must be set in pgmap->flags. | ||||||
|  * |  * | ||||||
|  * 3/ pgmap->ref must be 'live' on entry and will be killed and reaped |  * 3/ The ref field may optionally be provided, in which pgmap->ref must be | ||||||
|  *    at devm_memremap_pages_release() time, or if this routine fails. |  *    'live' on entry and will be killed and reaped at | ||||||
|  |  *    devm_memremap_pages_release() time, or if this routine fails. | ||||||
|  * |  * | ||||||
|  * 4/ res is expected to be a host memory range that could feasibly be |  * 4/ res is expected to be a host memory range that could feasibly be | ||||||
|  *    treated as a "System RAM" range, i.e. not a device mmio range, but |  *    treated as a "System RAM" range, i.e. not a device mmio range, but | ||||||
|  | @ -175,10 +202,21 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) | ||||||
| 		break; | 		break; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (!pgmap->ref || !pgmap->ops || !pgmap->ops->kill || | 	if (!pgmap->ref) { | ||||||
| 	    !pgmap->ops->cleanup) { | 		if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup)) | ||||||
| 		WARN(1, "Missing reference count teardown definition\n"); | 			return ERR_PTR(-EINVAL); | ||||||
| 		return ERR_PTR(-EINVAL); | 
 | ||||||
|  | 		init_completion(&pgmap->done); | ||||||
|  | 		error = percpu_ref_init(&pgmap->internal_ref, | ||||||
|  | 				dev_pagemap_percpu_release, 0, GFP_KERNEL); | ||||||
|  | 		if (error) | ||||||
|  | 			return ERR_PTR(error); | ||||||
|  | 		pgmap->ref = &pgmap->internal_ref; | ||||||
|  | 	} else { | ||||||
|  | 		if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) { | ||||||
|  | 			WARN(1, "Missing reference count teardown definition\n"); | ||||||
|  | 			return ERR_PTR(-EINVAL); | ||||||
|  | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (need_devmap_managed) { | 	if (need_devmap_managed) { | ||||||
|  | @ -296,8 +334,8 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) | ||||||
|  err_pfn_remap: |  err_pfn_remap: | ||||||
| 	pgmap_array_delete(res); | 	pgmap_array_delete(res); | ||||||
|  err_array: |  err_array: | ||||||
| 	pgmap->ops->kill(pgmap); | 	dev_pagemap_kill(pgmap); | ||||||
| 	pgmap->ops->cleanup(pgmap); | 	dev_pagemap_cleanup(pgmap); | ||||||
| 	return ERR_PTR(error); | 	return ERR_PTR(error); | ||||||
| } | } | ||||||
| EXPORT_SYMBOL_GPL(devm_memremap_pages); | EXPORT_SYMBOL_GPL(devm_memremap_pages); | ||||||
|  |  | ||||||
|  | @ -100,26 +100,60 @@ static void nfit_test_kill(void *_pgmap) | ||||||
| { | { | ||||||
| 	struct dev_pagemap *pgmap = _pgmap; | 	struct dev_pagemap *pgmap = _pgmap; | ||||||
| 
 | 
 | ||||||
| 	WARN_ON(!pgmap || !pgmap->ref || !pgmap->ops || !pgmap->ops->kill || | 	WARN_ON(!pgmap || !pgmap->ref); | ||||||
| 		!pgmap->ops->cleanup); | 
 | ||||||
| 	pgmap->ops->kill(pgmap); | 	if (pgmap->ops && pgmap->ops->kill) | ||||||
| 	pgmap->ops->cleanup(pgmap); | 		pgmap->ops->kill(pgmap); | ||||||
|  | 	else | ||||||
|  | 		percpu_ref_kill(pgmap->ref); | ||||||
|  | 
 | ||||||
|  | 	if (pgmap->ops && pgmap->ops->cleanup) { | ||||||
|  | 		pgmap->ops->cleanup(pgmap); | ||||||
|  | 	} else { | ||||||
|  | 		wait_for_completion(&pgmap->done); | ||||||
|  | 		percpu_ref_exit(pgmap->ref); | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static void dev_pagemap_percpu_release(struct percpu_ref *ref) | ||||||
|  | { | ||||||
|  | 	struct dev_pagemap *pgmap = | ||||||
|  | 		container_of(ref, struct dev_pagemap, internal_ref); | ||||||
|  | 
 | ||||||
|  | 	complete(&pgmap->done); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) | void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) | ||||||
| { | { | ||||||
|  | 	int error; | ||||||
| 	resource_size_t offset = pgmap->res.start; | 	resource_size_t offset = pgmap->res.start; | ||||||
| 	struct nfit_test_resource *nfit_res = get_nfit_res(offset); | 	struct nfit_test_resource *nfit_res = get_nfit_res(offset); | ||||||
| 
 | 
 | ||||||
| 	if (nfit_res) { | 	if (!nfit_res) | ||||||
| 		int rc; | 		return devm_memremap_pages(dev, pgmap); | ||||||
| 
 | 
 | ||||||
| 		rc = devm_add_action_or_reset(dev, nfit_test_kill, pgmap); | 	pgmap->dev = dev; | ||||||
| 		if (rc) | 	if (!pgmap->ref) { | ||||||
| 			return ERR_PTR(rc); | 		if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup)) | ||||||
| 		return nfit_res->buf + offset - nfit_res->res.start; | 			return ERR_PTR(-EINVAL); | ||||||
|  | 
 | ||||||
|  | 		init_completion(&pgmap->done); | ||||||
|  | 		error = percpu_ref_init(&pgmap->internal_ref, | ||||||
|  | 				dev_pagemap_percpu_release, 0, GFP_KERNEL); | ||||||
|  | 		if (error) | ||||||
|  | 			return ERR_PTR(error); | ||||||
|  | 		pgmap->ref = &pgmap->internal_ref; | ||||||
|  | 	} else { | ||||||
|  | 		if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) { | ||||||
|  | 			WARN(1, "Missing reference count teardown definition\n"); | ||||||
|  | 			return ERR_PTR(-EINVAL); | ||||||
|  | 		} | ||||||
| 	} | 	} | ||||||
| 	return devm_memremap_pages(dev, pgmap); | 
 | ||||||
|  | 	error = devm_add_action_or_reset(dev, nfit_test_kill, pgmap); | ||||||
|  | 	if (error) | ||||||
|  | 		return ERR_PTR(error); | ||||||
|  | 	return nfit_res->buf + offset - nfit_res->res.start; | ||||||
| } | } | ||||||
| EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages); | EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Christoph Hellwig
						Christoph Hellwig