forked from mirrors/linux
		
	libnvdimm: pmem label sets and namespace instantiation.
A complete label set is a PMEM-label per-dimm per-interleave-set where all the UUIDs match and the interleave set cookie matches the hosting interleave set. Present sysfs attributes for manipulation of a PMEM-namespace's 'alt_name', 'uuid', and 'size' attributes. A later patch will make these settings persistent by writing back the label. Note that PMEM allocations grow forwards from the start of an interleave set (lowest dimm-physical-address (DPA)). BLK-namespaces that alias with a PMEM interleave set will grow allocations backward from the highest DPA. Cc: Greg KH <gregkh@linuxfoundation.org> Cc: Neil Brown <neilb@suse.de> Acked-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
		
							parent
							
								
									4a826c83db
								
							
						
					
					
						commit
						bf9bccc14c
					
				
					 15 changed files with 1506 additions and 31 deletions
				
			
		|  | @ -97,6 +97,8 @@ static int nvdimm_bus_probe(struct device *dev) | ||||||
| 	rc = nd_drv->probe(dev); | 	rc = nd_drv->probe(dev); | ||||||
| 	if (rc == 0) | 	if (rc == 0) | ||||||
| 		nd_region_probe_success(nvdimm_bus, dev); | 		nd_region_probe_success(nvdimm_bus, dev); | ||||||
|  | 	else | ||||||
|  | 		nd_region_disable(nvdimm_bus, dev); | ||||||
| 	nvdimm_bus_probe_end(nvdimm_bus); | 	nvdimm_bus_probe_end(nvdimm_bus); | ||||||
| 
 | 
 | ||||||
| 	dev_dbg(&nvdimm_bus->dev, "%s.probe(%s) = %d\n", dev->driver->name, | 	dev_dbg(&nvdimm_bus->dev, "%s.probe(%s) = %d\n", dev->driver->name, | ||||||
|  | @ -381,8 +383,10 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd, | ||||||
| } | } | ||||||
| EXPORT_SYMBOL_GPL(nd_cmd_out_size); | EXPORT_SYMBOL_GPL(nd_cmd_out_size); | ||||||
| 
 | 
 | ||||||
| static void wait_nvdimm_bus_probe_idle(struct nvdimm_bus *nvdimm_bus) | void wait_nvdimm_bus_probe_idle(struct device *dev) | ||||||
| { | { | ||||||
|  | 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); | ||||||
|  | 
 | ||||||
| 	do { | 	do { | ||||||
| 		if (nvdimm_bus->probe_active == 0) | 		if (nvdimm_bus->probe_active == 0) | ||||||
| 			break; | 			break; | ||||||
|  | @ -402,7 +406,7 @@ static int nd_cmd_clear_to_send(struct nvdimm *nvdimm, unsigned int cmd) | ||||||
| 		return 0; | 		return 0; | ||||||
| 
 | 
 | ||||||
| 	nvdimm_bus = walk_to_nvdimm_bus(&nvdimm->dev); | 	nvdimm_bus = walk_to_nvdimm_bus(&nvdimm->dev); | ||||||
| 	wait_nvdimm_bus_probe_idle(nvdimm_bus); | 	wait_nvdimm_bus_probe_idle(&nvdimm_bus->dev); | ||||||
| 
 | 
 | ||||||
| 	if (atomic_read(&nvdimm->busy)) | 	if (atomic_read(&nvdimm->busy)) | ||||||
| 		return -EBUSY; | 		return -EBUSY; | ||||||
|  |  | ||||||
|  | @ -14,6 +14,7 @@ | ||||||
| #include <linux/export.h> | #include <linux/export.h> | ||||||
| #include <linux/module.h> | #include <linux/module.h> | ||||||
| #include <linux/device.h> | #include <linux/device.h> | ||||||
|  | #include <linux/ctype.h> | ||||||
| #include <linux/ndctl.h> | #include <linux/ndctl.h> | ||||||
| #include <linux/mutex.h> | #include <linux/mutex.h> | ||||||
| #include <linux/slab.h> | #include <linux/slab.h> | ||||||
|  | @ -109,6 +110,69 @@ struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev) | ||||||
| 	return NULL; | 	return NULL; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | static bool is_uuid_sep(char sep) | ||||||
|  | { | ||||||
|  | 	if (sep == '\n' || sep == '-' || sep == ':' || sep == '\0') | ||||||
|  | 		return true; | ||||||
|  | 	return false; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf, | ||||||
|  | 		size_t len) | ||||||
|  | { | ||||||
|  | 	const char *str = buf; | ||||||
|  | 	u8 uuid[16]; | ||||||
|  | 	int i; | ||||||
|  | 
 | ||||||
|  | 	for (i = 0; i < 16; i++) { | ||||||
|  | 		if (!isxdigit(str[0]) || !isxdigit(str[1])) { | ||||||
|  | 			dev_dbg(dev, "%s: pos: %d buf[%zd]: %c buf[%zd]: %c\n", | ||||||
|  | 					__func__, i, str - buf, str[0], | ||||||
|  | 					str + 1 - buf, str[1]); | ||||||
|  | 			return -EINVAL; | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		uuid[i] = (hex_to_bin(str[0]) << 4) | hex_to_bin(str[1]); | ||||||
|  | 		str += 2; | ||||||
|  | 		if (is_uuid_sep(*str)) | ||||||
|  | 			str++; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	memcpy(uuid_out, uuid, sizeof(uuid)); | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * nd_uuid_store: common implementation for writing 'uuid' sysfs attributes | ||||||
|  |  * @dev: container device for the uuid property | ||||||
|  |  * @uuid_out: uuid buffer to replace | ||||||
|  |  * @buf: raw sysfs buffer to parse | ||||||
|  |  * | ||||||
|  |  * Enforce that uuids can only be changed while the device is disabled | ||||||
|  |  * (driver detached) | ||||||
|  |  * LOCKING: expects device_lock() is held on entry | ||||||
|  |  */ | ||||||
|  | int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf, | ||||||
|  | 		size_t len) | ||||||
|  | { | ||||||
|  | 	u8 uuid[16]; | ||||||
|  | 	int rc; | ||||||
|  | 
 | ||||||
|  | 	if (dev->driver) | ||||||
|  | 		return -EBUSY; | ||||||
|  | 
 | ||||||
|  | 	rc = nd_uuid_parse(dev, uuid, buf, len); | ||||||
|  | 	if (rc) | ||||||
|  | 		return rc; | ||||||
|  | 
 | ||||||
|  | 	kfree(*uuid_out); | ||||||
|  | 	*uuid_out = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); | ||||||
|  | 	if (!(*uuid_out)) | ||||||
|  | 		return -ENOMEM; | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
| static ssize_t commands_show(struct device *dev, | static ssize_t commands_show(struct device *dev, | ||||||
| 		struct device_attribute *attr, char *buf) | 		struct device_attribute *attr, char *buf) | ||||||
| { | { | ||||||
|  |  | ||||||
|  | @ -21,18 +21,6 @@ | ||||||
| #include "label.h" | #include "label.h" | ||||||
| #include "nd.h" | #include "nd.h" | ||||||
| 
 | 
 | ||||||
| static void free_data(struct nvdimm_drvdata *ndd) |  | ||||||
| { |  | ||||||
| 	if (!ndd) |  | ||||||
| 		return; |  | ||||||
| 
 |  | ||||||
| 	if (ndd->data && is_vmalloc_addr(ndd->data)) |  | ||||||
| 		vfree(ndd->data); |  | ||||||
| 	else |  | ||||||
| 		kfree(ndd->data); |  | ||||||
| 	kfree(ndd); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static int nvdimm_probe(struct device *dev) | static int nvdimm_probe(struct device *dev) | ||||||
| { | { | ||||||
| 	struct nvdimm_drvdata *ndd; | 	struct nvdimm_drvdata *ndd; | ||||||
|  | @ -49,6 +37,8 @@ static int nvdimm_probe(struct device *dev) | ||||||
| 	ndd->dpa.start = 0; | 	ndd->dpa.start = 0; | ||||||
| 	ndd->dpa.end = -1; | 	ndd->dpa.end = -1; | ||||||
| 	ndd->dev = dev; | 	ndd->dev = dev; | ||||||
|  | 	get_device(dev); | ||||||
|  | 	kref_init(&ndd->kref); | ||||||
| 
 | 
 | ||||||
| 	rc = nvdimm_init_nsarea(ndd); | 	rc = nvdimm_init_nsarea(ndd); | ||||||
| 	if (rc) | 	if (rc) | ||||||
|  | @ -74,21 +64,18 @@ static int nvdimm_probe(struct device *dev) | ||||||
| 	return 0; | 	return 0; | ||||||
| 
 | 
 | ||||||
|  err: |  err: | ||||||
| 	free_data(ndd); | 	put_ndd(ndd); | ||||||
| 	return rc; | 	return rc; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int nvdimm_remove(struct device *dev) | static int nvdimm_remove(struct device *dev) | ||||||
| { | { | ||||||
| 	struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); | 	struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); | ||||||
| 	struct resource *res, *_r; |  | ||||||
| 
 | 
 | ||||||
| 	nvdimm_bus_lock(dev); | 	nvdimm_bus_lock(dev); | ||||||
| 	dev_set_drvdata(dev, NULL); | 	dev_set_drvdata(dev, NULL); | ||||||
| 	for_each_dpa_resource_safe(ndd, res, _r) |  | ||||||
| 		nvdimm_free_dpa(ndd, res); |  | ||||||
| 	nvdimm_bus_unlock(dev); | 	nvdimm_bus_unlock(dev); | ||||||
| 	free_data(ndd); | 	put_ndd(ndd); | ||||||
| 
 | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -159,6 +159,48 @@ struct nvdimm *to_nvdimm(struct device *dev) | ||||||
| } | } | ||||||
| EXPORT_SYMBOL_GPL(to_nvdimm); | EXPORT_SYMBOL_GPL(to_nvdimm); | ||||||
| 
 | 
 | ||||||
|  | struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping) | ||||||
|  | { | ||||||
|  | 	struct nvdimm *nvdimm = nd_mapping->nvdimm; | ||||||
|  | 
 | ||||||
|  | 	WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev)); | ||||||
|  | 
 | ||||||
|  | 	return dev_get_drvdata(&nvdimm->dev); | ||||||
|  | } | ||||||
|  | EXPORT_SYMBOL(to_ndd); | ||||||
|  | 
 | ||||||
|  | void nvdimm_drvdata_release(struct kref *kref) | ||||||
|  | { | ||||||
|  | 	struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref); | ||||||
|  | 	struct device *dev = ndd->dev; | ||||||
|  | 	struct resource *res, *_r; | ||||||
|  | 
 | ||||||
|  | 	dev_dbg(dev, "%s\n", __func__); | ||||||
|  | 
 | ||||||
|  | 	nvdimm_bus_lock(dev); | ||||||
|  | 	for_each_dpa_resource_safe(ndd, res, _r) | ||||||
|  | 		nvdimm_free_dpa(ndd, res); | ||||||
|  | 	nvdimm_bus_unlock(dev); | ||||||
|  | 
 | ||||||
|  | 	if (ndd->data && is_vmalloc_addr(ndd->data)) | ||||||
|  | 		vfree(ndd->data); | ||||||
|  | 	else | ||||||
|  | 		kfree(ndd->data); | ||||||
|  | 	kfree(ndd); | ||||||
|  | 	put_device(dev); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | void get_ndd(struct nvdimm_drvdata *ndd) | ||||||
|  | { | ||||||
|  | 	kref_get(&ndd->kref); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | void put_ndd(struct nvdimm_drvdata *ndd) | ||||||
|  | { | ||||||
|  | 	if (ndd) | ||||||
|  | 		kref_put(&ndd->kref, nvdimm_drvdata_release); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| const char *nvdimm_name(struct nvdimm *nvdimm) | const char *nvdimm_name(struct nvdimm *nvdimm) | ||||||
| { | { | ||||||
| 	return dev_name(&nvdimm->dev); | 	return dev_name(&nvdimm->dev); | ||||||
|  | @ -247,6 +289,83 @@ struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, | ||||||
| } | } | ||||||
| EXPORT_SYMBOL_GPL(nvdimm_create); | EXPORT_SYMBOL_GPL(nvdimm_create); | ||||||
| 
 | 
 | ||||||
|  | /**
 | ||||||
|  |  * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa | ||||||
|  |  * @nd_mapping: container of dpa-resource-root + labels | ||||||
|  |  * @nd_region: constrain available space check to this reference region | ||||||
|  |  * @overlap: calculate available space assuming this level of overlap | ||||||
|  |  * | ||||||
|  |  * Validate that a PMEM label, if present, aligns with the start of an | ||||||
|  |  * interleave set and truncate the available size at the lowest BLK | ||||||
|  |  * overlap point. | ||||||
|  |  * | ||||||
|  |  * The expectation is that this routine is called multiple times as it | ||||||
|  |  * probes for the largest BLK encroachment for any single member DIMM of | ||||||
|  |  * the interleave set.  Once that value is determined the PMEM-limit for | ||||||
|  |  * the set can be established. | ||||||
|  |  */ | ||||||
|  | resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, | ||||||
|  | 		struct nd_mapping *nd_mapping, resource_size_t *overlap) | ||||||
|  | { | ||||||
|  | 	resource_size_t map_start, map_end, busy = 0, available, blk_start; | ||||||
|  | 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | ||||||
|  | 	struct resource *res; | ||||||
|  | 	const char *reason; | ||||||
|  | 
 | ||||||
|  | 	if (!ndd) | ||||||
|  | 		return 0; | ||||||
|  | 
 | ||||||
|  | 	map_start = nd_mapping->start; | ||||||
|  | 	map_end = map_start + nd_mapping->size - 1; | ||||||
|  | 	blk_start = max(map_start, map_end + 1 - *overlap); | ||||||
|  | 	for_each_dpa_resource(ndd, res) | ||||||
|  | 		if (res->start >= map_start && res->start < map_end) { | ||||||
|  | 			if (strncmp(res->name, "blk", 3) == 0) | ||||||
|  | 				blk_start = min(blk_start, res->start); | ||||||
|  | 			else if (res->start != map_start) { | ||||||
|  | 				reason = "misaligned to iset"; | ||||||
|  | 				goto err; | ||||||
|  | 			} else { | ||||||
|  | 				if (busy) { | ||||||
|  | 					reason = "duplicate overlapping PMEM reservations?"; | ||||||
|  | 					goto err; | ||||||
|  | 				} | ||||||
|  | 				busy += resource_size(res); | ||||||
|  | 				continue; | ||||||
|  | 			} | ||||||
|  | 		} else if (res->end >= map_start && res->end <= map_end) { | ||||||
|  | 			if (strncmp(res->name, "blk", 3) == 0) { | ||||||
|  | 				/*
 | ||||||
|  | 				 * If a BLK allocation overlaps the start of | ||||||
|  | 				 * PMEM the entire interleave set may now only | ||||||
|  | 				 * be used for BLK. | ||||||
|  | 				 */ | ||||||
|  | 				blk_start = map_start; | ||||||
|  | 			} else { | ||||||
|  | 				reason = "misaligned to iset"; | ||||||
|  | 				goto err; | ||||||
|  | 			} | ||||||
|  | 		} else if (map_start > res->start && map_start < res->end) { | ||||||
|  | 			/* total eclipse of the mapping */ | ||||||
|  | 			busy += nd_mapping->size; | ||||||
|  | 			blk_start = map_start; | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 	*overlap = map_end + 1 - blk_start; | ||||||
|  | 	available = blk_start - map_start; | ||||||
|  | 	if (busy < available) | ||||||
|  | 		return available - busy; | ||||||
|  | 	return 0; | ||||||
|  | 
 | ||||||
|  |  err: | ||||||
|  | 	/*
 | ||||||
|  | 	 * Something is wrong, PMEM must align with the start of the | ||||||
|  | 	 * interleave set, and there can only be one allocation per set. | ||||||
|  | 	 */ | ||||||
|  | 	nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason); | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
| void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res) | void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res) | ||||||
| { | { | ||||||
| 	WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev)); | 	WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev)); | ||||||
|  | @ -271,6 +390,24 @@ struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd, | ||||||
| 	return res; | 	return res; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | /**
 | ||||||
|  |  * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id | ||||||
|  |  * @nvdimm: container of dpa-resource-root + labels | ||||||
|  |  * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid> | ||||||
|  |  */ | ||||||
|  | resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, | ||||||
|  | 		struct nd_label_id *label_id) | ||||||
|  | { | ||||||
|  | 	resource_size_t allocated = 0; | ||||||
|  | 	struct resource *res; | ||||||
|  | 
 | ||||||
|  | 	for_each_dpa_resource(ndd, res) | ||||||
|  | 		if (strcmp(res->name, label_id->id) == 0) | ||||||
|  | 			allocated += resource_size(res); | ||||||
|  | 
 | ||||||
|  | 	return allocated; | ||||||
|  | } | ||||||
|  | 
 | ||||||
| static int count_dimms(struct device *dev, void *c) | static int count_dimms(struct device *dev, void *c) | ||||||
| { | { | ||||||
| 	int *count = c; | 	int *count = c; | ||||||
|  |  | ||||||
|  | @ -230,7 +230,7 @@ static bool preamble_current(struct nvdimm_drvdata *ndd, | ||||||
| 	return true; | 	return true; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags) | char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags) | ||||||
| { | { | ||||||
| 	if (!label_id || !uuid) | 	if (!label_id || !uuid) | ||||||
| 		return NULL; | 		return NULL; | ||||||
|  | @ -288,3 +288,56 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd) | ||||||
| 
 | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | int nd_label_active_count(struct nvdimm_drvdata *ndd) | ||||||
|  | { | ||||||
|  | 	struct nd_namespace_index *nsindex; | ||||||
|  | 	unsigned long *free; | ||||||
|  | 	u32 nslot, slot; | ||||||
|  | 	int count = 0; | ||||||
|  | 
 | ||||||
|  | 	if (!preamble_current(ndd, &nsindex, &free, &nslot)) | ||||||
|  | 		return 0; | ||||||
|  | 
 | ||||||
|  | 	for_each_clear_bit_le(slot, free, nslot) { | ||||||
|  | 		struct nd_namespace_label *nd_label; | ||||||
|  | 
 | ||||||
|  | 		nd_label = nd_label_base(ndd) + slot; | ||||||
|  | 
 | ||||||
|  | 		if (!slot_valid(nd_label, slot)) { | ||||||
|  | 			u32 label_slot = __le32_to_cpu(nd_label->slot); | ||||||
|  | 			u64 size = __le64_to_cpu(nd_label->rawsize); | ||||||
|  | 			u64 dpa = __le64_to_cpu(nd_label->dpa); | ||||||
|  | 
 | ||||||
|  | 			dev_dbg(ndd->dev, | ||||||
|  | 				"%s: slot%d invalid slot: %d dpa: %llx size: %llx\n", | ||||||
|  | 					__func__, slot, label_slot, dpa, size); | ||||||
|  | 			continue; | ||||||
|  | 		} | ||||||
|  | 		count++; | ||||||
|  | 	} | ||||||
|  | 	return count; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n) | ||||||
|  | { | ||||||
|  | 	struct nd_namespace_index *nsindex; | ||||||
|  | 	unsigned long *free; | ||||||
|  | 	u32 nslot, slot; | ||||||
|  | 
 | ||||||
|  | 	if (!preamble_current(ndd, &nsindex, &free, &nslot)) | ||||||
|  | 		return NULL; | ||||||
|  | 
 | ||||||
|  | 	for_each_clear_bit_le(slot, free, nslot) { | ||||||
|  | 		struct nd_namespace_label *nd_label; | ||||||
|  | 
 | ||||||
|  | 		nd_label = nd_label_base(ndd) + slot; | ||||||
|  | 		if (!slot_valid(nd_label, slot)) | ||||||
|  | 			continue; | ||||||
|  | 
 | ||||||
|  | 		if (n-- == 0) | ||||||
|  | 			return nd_label_base(ndd) + slot; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return NULL; | ||||||
|  | } | ||||||
|  |  | ||||||
|  | @ -125,4 +125,6 @@ int nd_label_validate(struct nvdimm_drvdata *ndd); | ||||||
| void nd_label_copy(struct nvdimm_drvdata *ndd, struct nd_namespace_index *dst, | void nd_label_copy(struct nvdimm_drvdata *ndd, struct nd_namespace_index *dst, | ||||||
| 		struct nd_namespace_index *src); | 		struct nd_namespace_index *src); | ||||||
| size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd); | size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd); | ||||||
|  | int nd_label_active_count(struct nvdimm_drvdata *ndd); | ||||||
|  | struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n); | ||||||
| #endif /* __LABEL_H__ */ | #endif /* __LABEL_H__ */ | ||||||
|  |  | ||||||
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							|  | @ -56,4 +56,16 @@ int nvdimm_bus_register_dimms(struct nvdimm_bus *nvdimm_bus); | ||||||
| int nvdimm_bus_register_regions(struct nvdimm_bus *nvdimm_bus); | int nvdimm_bus_register_regions(struct nvdimm_bus *nvdimm_bus); | ||||||
| int nvdimm_bus_init_interleave_sets(struct nvdimm_bus *nvdimm_bus); | int nvdimm_bus_init_interleave_sets(struct nvdimm_bus *nvdimm_bus); | ||||||
| int nd_match_dimm(struct device *dev, void *data); | int nd_match_dimm(struct device *dev, void *data); | ||||||
|  | struct nd_label_id; | ||||||
|  | char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags); | ||||||
|  | bool nd_is_uuid_unique(struct device *dev, u8 *uuid); | ||||||
|  | struct nd_region; | ||||||
|  | struct nvdimm_drvdata; | ||||||
|  | struct nd_mapping; | ||||||
|  | resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, | ||||||
|  | 		struct nd_mapping *nd_mapping, resource_size_t *overlap); | ||||||
|  | resource_size_t nd_region_available_dpa(struct nd_region *nd_region); | ||||||
|  | resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, | ||||||
|  | 		struct nd_label_id *label_id); | ||||||
|  | void get_ndd(struct nvdimm_drvdata *ndd); | ||||||
| #endif /* __ND_CORE_H__ */ | #endif /* __ND_CORE_H__ */ | ||||||
|  |  | ||||||
|  | @ -16,6 +16,7 @@ | ||||||
| #include <linux/device.h> | #include <linux/device.h> | ||||||
| #include <linux/mutex.h> | #include <linux/mutex.h> | ||||||
| #include <linux/ndctl.h> | #include <linux/ndctl.h> | ||||||
|  | #include <linux/types.h> | ||||||
| #include "label.h" | #include "label.h" | ||||||
| 
 | 
 | ||||||
| struct nvdimm_drvdata { | struct nvdimm_drvdata { | ||||||
|  | @ -25,6 +26,7 @@ struct nvdimm_drvdata { | ||||||
| 	void *data; | 	void *data; | ||||||
| 	int ns_current, ns_next; | 	int ns_current, ns_next; | ||||||
| 	struct resource dpa; | 	struct resource dpa; | ||||||
|  | 	struct kref kref; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| struct nd_region_namespaces { | struct nd_region_namespaces { | ||||||
|  | @ -59,12 +61,19 @@ static inline struct nd_namespace_index *to_next_namespace_index( | ||||||
| 		(unsigned long long) (res ? resource_size(res) : 0), \ | 		(unsigned long long) (res ? resource_size(res) : 0), \ | ||||||
| 		(unsigned long long) (res ? res->start : 0), ##arg) | 		(unsigned long long) (res ? res->start : 0), ##arg) | ||||||
| 
 | 
 | ||||||
|  | #define for_each_label(l, label, labels) \ | ||||||
|  | 	for (l = 0; (label = labels ? labels[l] : NULL); l++) | ||||||
|  | 
 | ||||||
|  | #define for_each_dpa_resource(ndd, res) \ | ||||||
|  | 	for (res = (ndd)->dpa.child; res; res = res->sibling) | ||||||
|  | 
 | ||||||
| #define for_each_dpa_resource_safe(ndd, res, next) \ | #define for_each_dpa_resource_safe(ndd, res, next) \ | ||||||
| 	for (res = (ndd)->dpa.child, next = res ? res->sibling : NULL; \ | 	for (res = (ndd)->dpa.child, next = res ? res->sibling : NULL; \ | ||||||
| 			res; res = next, next = next ? next->sibling : NULL) | 			res; res = next, next = next ? next->sibling : NULL) | ||||||
| 
 | 
 | ||||||
| struct nd_region { | struct nd_region { | ||||||
| 	struct device dev; | 	struct device dev; | ||||||
|  | 	struct device *ns_seed; | ||||||
| 	u16 ndr_mappings; | 	u16 ndr_mappings; | ||||||
| 	u64 ndr_size; | 	u64 ndr_size; | ||||||
| 	u64 ndr_start; | 	u64 ndr_start; | ||||||
|  | @ -88,20 +97,28 @@ enum nd_async_mode { | ||||||
| 	ND_ASYNC, | 	ND_ASYNC, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | void wait_nvdimm_bus_probe_idle(struct device *dev); | ||||||
| void nd_device_register(struct device *dev); | void nd_device_register(struct device *dev); | ||||||
| void nd_device_unregister(struct device *dev, enum nd_async_mode mode); | void nd_device_unregister(struct device *dev, enum nd_async_mode mode); | ||||||
|  | int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf, | ||||||
|  | 		size_t len); | ||||||
| int __init nvdimm_init(void); | int __init nvdimm_init(void); | ||||||
| int __init nd_region_init(void); | int __init nd_region_init(void); | ||||||
| void nvdimm_exit(void); | void nvdimm_exit(void); | ||||||
| void nd_region_exit(void); | void nd_region_exit(void); | ||||||
|  | struct nvdimm; | ||||||
|  | struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping); | ||||||
| int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd); | int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd); | ||||||
| int nvdimm_init_config_data(struct nvdimm_drvdata *ndd); | int nvdimm_init_config_data(struct nvdimm_drvdata *ndd); | ||||||
| struct nd_region *to_nd_region(struct device *dev); | struct nd_region *to_nd_region(struct device *dev); | ||||||
| int nd_region_to_nstype(struct nd_region *nd_region); | int nd_region_to_nstype(struct nd_region *nd_region); | ||||||
| int nd_region_register_namespaces(struct nd_region *nd_region, int *err); | int nd_region_register_namespaces(struct nd_region *nd_region, int *err); | ||||||
|  | u64 nd_region_interleave_set_cookie(struct nd_region *nd_region); | ||||||
| void nvdimm_bus_lock(struct device *dev); | void nvdimm_bus_lock(struct device *dev); | ||||||
| void nvdimm_bus_unlock(struct device *dev); | void nvdimm_bus_unlock(struct device *dev); | ||||||
| bool is_nvdimm_bus_locked(struct device *dev); | bool is_nvdimm_bus_locked(struct device *dev); | ||||||
|  | void nvdimm_drvdata_release(struct kref *kref); | ||||||
|  | void put_ndd(struct nvdimm_drvdata *ndd); | ||||||
| int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd); | int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd); | ||||||
| void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res); | void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res); | ||||||
| struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd, | struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd, | ||||||
|  |  | ||||||
|  | @ -203,6 +203,23 @@ static int nd_pmem_probe(struct device *dev) | ||||||
| 	struct nd_namespace_io *nsio = to_nd_namespace_io(dev); | 	struct nd_namespace_io *nsio = to_nd_namespace_io(dev); | ||||||
| 	struct pmem_device *pmem; | 	struct pmem_device *pmem; | ||||||
| 
 | 
 | ||||||
|  | 	if (resource_size(&nsio->res) < ND_MIN_NAMESPACE_SIZE) { | ||||||
|  | 		resource_size_t size = resource_size(&nsio->res); | ||||||
|  | 
 | ||||||
|  | 		dev_dbg(dev, "%s: size: %pa, too small must be at least %#x\n", | ||||||
|  | 				__func__, &size, ND_MIN_NAMESPACE_SIZE); | ||||||
|  | 		return -ENODEV; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_PMEM) { | ||||||
|  | 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); | ||||||
|  | 
 | ||||||
|  | 		if (!nspm->uuid) { | ||||||
|  | 			dev_dbg(dev, "%s: uuid not set\n", __func__); | ||||||
|  | 			return -ENODEV; | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	pmem = pmem_alloc(dev, &nsio->res, nd_region->id); | 	pmem = pmem_alloc(dev, &nsio->res, nd_region->id); | ||||||
| 	if (IS_ERR(pmem)) | 	if (IS_ERR(pmem)) | ||||||
| 		return PTR_ERR(pmem); | 		return PTR_ERR(pmem); | ||||||
|  | @ -222,13 +239,14 @@ static int nd_pmem_remove(struct device *dev) | ||||||
| 
 | 
 | ||||||
| MODULE_ALIAS("pmem"); | MODULE_ALIAS("pmem"); | ||||||
| MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO); | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO); | ||||||
|  | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM); | ||||||
| static struct nd_device_driver nd_pmem_driver = { | static struct nd_device_driver nd_pmem_driver = { | ||||||
| 	.probe = nd_pmem_probe, | 	.probe = nd_pmem_probe, | ||||||
| 	.remove = nd_pmem_remove, | 	.remove = nd_pmem_remove, | ||||||
| 	.drv = { | 	.drv = { | ||||||
| 		.name = "nd_pmem", | 		.name = "nd_pmem", | ||||||
| 	}, | 	}, | ||||||
| 	.type = ND_DRIVER_NAMESPACE_IO, | 	.type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| static int __init pmem_init(void) | static int __init pmem_init(void) | ||||||
|  |  | ||||||
|  | @ -61,8 +61,11 @@ static int child_unregister(struct device *dev, void *data) | ||||||
| 
 | 
 | ||||||
| static int nd_region_remove(struct device *dev) | static int nd_region_remove(struct device *dev) | ||||||
| { | { | ||||||
|  | 	struct nd_region *nd_region = to_nd_region(dev); | ||||||
|  | 
 | ||||||
| 	/* flush attribute readers and disable */ | 	/* flush attribute readers and disable */ | ||||||
| 	nvdimm_bus_lock(dev); | 	nvdimm_bus_lock(dev); | ||||||
|  | 	nd_region->ns_seed = NULL; | ||||||
| 	dev_set_drvdata(dev, NULL); | 	dev_set_drvdata(dev, NULL); | ||||||
| 	nvdimm_bus_unlock(dev); | 	nvdimm_bus_unlock(dev); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -15,6 +15,7 @@ | ||||||
| #include <linux/slab.h> | #include <linux/slab.h> | ||||||
| #include <linux/sort.h> | #include <linux/sort.h> | ||||||
| #include <linux/io.h> | #include <linux/io.h> | ||||||
|  | #include <linux/nd.h> | ||||||
| #include "nd-core.h" | #include "nd-core.h" | ||||||
| #include "nd.h" | #include "nd.h" | ||||||
| 
 | 
 | ||||||
|  | @ -99,6 +100,58 @@ int nd_region_to_nstype(struct nd_region *nd_region) | ||||||
| 
 | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
|  | EXPORT_SYMBOL(nd_region_to_nstype); | ||||||
|  | 
 | ||||||
|  | static int is_uuid_busy(struct device *dev, void *data) | ||||||
|  | { | ||||||
|  | 	struct nd_region *nd_region = to_nd_region(dev->parent); | ||||||
|  | 	u8 *uuid = data; | ||||||
|  | 
 | ||||||
|  | 	switch (nd_region_to_nstype(nd_region)) { | ||||||
|  | 	case ND_DEVICE_NAMESPACE_PMEM: { | ||||||
|  | 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); | ||||||
|  | 
 | ||||||
|  | 		if (!nspm->uuid) | ||||||
|  | 			break; | ||||||
|  | 		if (memcmp(uuid, nspm->uuid, NSLABEL_UUID_LEN) == 0) | ||||||
|  | 			return -EBUSY; | ||||||
|  | 		break; | ||||||
|  | 	} | ||||||
|  | 	case ND_DEVICE_NAMESPACE_BLK: { | ||||||
|  | 		/* TODO: blk namespace support */ | ||||||
|  | 		break; | ||||||
|  | 	} | ||||||
|  | 	default: | ||||||
|  | 		break; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static int is_namespace_uuid_busy(struct device *dev, void *data) | ||||||
|  | { | ||||||
|  | 	if (is_nd_pmem(dev) || is_nd_blk(dev)) | ||||||
|  | 		return device_for_each_child(dev, data, is_uuid_busy); | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * nd_is_uuid_unique - verify that no other namespace has @uuid | ||||||
|  |  * @dev: any device on a nvdimm_bus | ||||||
|  |  * @uuid: uuid to check | ||||||
|  |  */ | ||||||
|  | bool nd_is_uuid_unique(struct device *dev, u8 *uuid) | ||||||
|  | { | ||||||
|  | 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); | ||||||
|  | 
 | ||||||
|  | 	if (!nvdimm_bus) | ||||||
|  | 		return false; | ||||||
|  | 	WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev)); | ||||||
|  | 	if (device_for_each_child(&nvdimm_bus->dev, uuid, | ||||||
|  | 				is_namespace_uuid_busy) != 0) | ||||||
|  | 		return false; | ||||||
|  | 	return true; | ||||||
|  | } | ||||||
| 
 | 
 | ||||||
| static ssize_t size_show(struct device *dev, | static ssize_t size_show(struct device *dev, | ||||||
| 		struct device_attribute *attr, char *buf) | 		struct device_attribute *attr, char *buf) | ||||||
|  | @ -151,6 +204,60 @@ static ssize_t set_cookie_show(struct device *dev, | ||||||
| } | } | ||||||
| static DEVICE_ATTR_RO(set_cookie); | static DEVICE_ATTR_RO(set_cookie); | ||||||
| 
 | 
 | ||||||
|  | resource_size_t nd_region_available_dpa(struct nd_region *nd_region) | ||||||
|  | { | ||||||
|  | 	resource_size_t blk_max_overlap = 0, available, overlap; | ||||||
|  | 	int i; | ||||||
|  | 
 | ||||||
|  | 	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); | ||||||
|  | 
 | ||||||
|  |  retry: | ||||||
|  | 	available = 0; | ||||||
|  | 	overlap = blk_max_overlap; | ||||||
|  | 	for (i = 0; i < nd_region->ndr_mappings; i++) { | ||||||
|  | 		struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | ||||||
|  | 		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | ||||||
|  | 
 | ||||||
|  | 		/* if a dimm is disabled the available capacity is zero */ | ||||||
|  | 		if (!ndd) | ||||||
|  | 			return 0; | ||||||
|  | 
 | ||||||
|  | 		if (is_nd_pmem(&nd_region->dev)) { | ||||||
|  | 			available += nd_pmem_available_dpa(nd_region, | ||||||
|  | 					nd_mapping, &overlap); | ||||||
|  | 			if (overlap > blk_max_overlap) { | ||||||
|  | 				blk_max_overlap = overlap; | ||||||
|  | 				goto retry; | ||||||
|  | 			} | ||||||
|  | 		} else if (is_nd_blk(&nd_region->dev)) { | ||||||
|  | 			/* TODO: BLK Namespace support */ | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return available; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static ssize_t available_size_show(struct device *dev, | ||||||
|  | 		struct device_attribute *attr, char *buf) | ||||||
|  | { | ||||||
|  | 	struct nd_region *nd_region = to_nd_region(dev); | ||||||
|  | 	unsigned long long available = 0; | ||||||
|  | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * Flush in-flight updates and grab a snapshot of the available | ||||||
|  | 	 * size.  Of course, this value is potentially invalidated the | ||||||
|  | 	 * memory nvdimm_bus_lock() is dropped, but that's userspace's | ||||||
|  | 	 * problem to not race itself. | ||||||
|  | 	 */ | ||||||
|  | 	nvdimm_bus_lock(dev); | ||||||
|  | 	wait_nvdimm_bus_probe_idle(dev); | ||||||
|  | 	available = nd_region_available_dpa(nd_region); | ||||||
|  | 	nvdimm_bus_unlock(dev); | ||||||
|  | 
 | ||||||
|  | 	return sprintf(buf, "%llu\n", available); | ||||||
|  | } | ||||||
|  | static DEVICE_ATTR_RO(available_size); | ||||||
|  | 
 | ||||||
| static ssize_t init_namespaces_show(struct device *dev, | static ssize_t init_namespaces_show(struct device *dev, | ||||||
| 		struct device_attribute *attr, char *buf) | 		struct device_attribute *attr, char *buf) | ||||||
| { | { | ||||||
|  | @ -168,11 +275,29 @@ static ssize_t init_namespaces_show(struct device *dev, | ||||||
| } | } | ||||||
| static DEVICE_ATTR_RO(init_namespaces); | static DEVICE_ATTR_RO(init_namespaces); | ||||||
| 
 | 
 | ||||||
|  | static ssize_t namespace_seed_show(struct device *dev, | ||||||
|  | 		struct device_attribute *attr, char *buf) | ||||||
|  | { | ||||||
|  | 	struct nd_region *nd_region = to_nd_region(dev); | ||||||
|  | 	ssize_t rc; | ||||||
|  | 
 | ||||||
|  | 	nvdimm_bus_lock(dev); | ||||||
|  | 	if (nd_region->ns_seed) | ||||||
|  | 		rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); | ||||||
|  | 	else | ||||||
|  | 		rc = sprintf(buf, "\n"); | ||||||
|  | 	nvdimm_bus_unlock(dev); | ||||||
|  | 	return rc; | ||||||
|  | } | ||||||
|  | static DEVICE_ATTR_RO(namespace_seed); | ||||||
|  | 
 | ||||||
| static struct attribute *nd_region_attributes[] = { | static struct attribute *nd_region_attributes[] = { | ||||||
| 	&dev_attr_size.attr, | 	&dev_attr_size.attr, | ||||||
| 	&dev_attr_nstype.attr, | 	&dev_attr_nstype.attr, | ||||||
| 	&dev_attr_mappings.attr, | 	&dev_attr_mappings.attr, | ||||||
| 	&dev_attr_set_cookie.attr, | 	&dev_attr_set_cookie.attr, | ||||||
|  | 	&dev_attr_available_size.attr, | ||||||
|  | 	&dev_attr_namespace_seed.attr, | ||||||
| 	&dev_attr_init_namespaces.attr, | 	&dev_attr_init_namespaces.attr, | ||||||
| 	NULL, | 	NULL, | ||||||
| }; | }; | ||||||
|  | @ -182,12 +307,18 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) | ||||||
| 	struct device *dev = container_of(kobj, typeof(*dev), kobj); | 	struct device *dev = container_of(kobj, typeof(*dev), kobj); | ||||||
| 	struct nd_region *nd_region = to_nd_region(dev); | 	struct nd_region *nd_region = to_nd_region(dev); | ||||||
| 	struct nd_interleave_set *nd_set = nd_region->nd_set; | 	struct nd_interleave_set *nd_set = nd_region->nd_set; | ||||||
|  | 	int type = nd_region_to_nstype(nd_region); | ||||||
| 
 | 
 | ||||||
| 	if (a != &dev_attr_set_cookie.attr) | 	if (a != &dev_attr_set_cookie.attr | ||||||
|  | 			&& a != &dev_attr_available_size.attr) | ||||||
| 		return a->mode; | 		return a->mode; | ||||||
| 
 | 
 | ||||||
| 	if (is_nd_pmem(dev) && nd_set) | 	if ((type == ND_DEVICE_NAMESPACE_PMEM | ||||||
| 			return a->mode; | 				|| type == ND_DEVICE_NAMESPACE_BLK) | ||||||
|  | 			&& a == &dev_attr_available_size.attr) | ||||||
|  | 		return a->mode; | ||||||
|  | 	else if (is_nd_pmem(dev) && nd_set) | ||||||
|  | 		return a->mode; | ||||||
| 
 | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
|  | @ -198,6 +329,15 @@ struct attribute_group nd_region_attribute_group = { | ||||||
| }; | }; | ||||||
| EXPORT_SYMBOL_GPL(nd_region_attribute_group); | EXPORT_SYMBOL_GPL(nd_region_attribute_group); | ||||||
| 
 | 
 | ||||||
|  | u64 nd_region_interleave_set_cookie(struct nd_region *nd_region) | ||||||
|  | { | ||||||
|  | 	struct nd_interleave_set *nd_set = nd_region->nd_set; | ||||||
|  | 
 | ||||||
|  | 	if (nd_set) | ||||||
|  | 		return nd_set->cookie; | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
| /*
 | /*
 | ||||||
|  * Upon successful probe/remove, take/release a reference on the |  * Upon successful probe/remove, take/release a reference on the | ||||||
|  * associated interleave set (if present) |  * associated interleave set (if present) | ||||||
|  | @ -205,18 +345,20 @@ EXPORT_SYMBOL_GPL(nd_region_attribute_group); | ||||||
| static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, | static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, | ||||||
| 		struct device *dev, bool probe) | 		struct device *dev, bool probe) | ||||||
| { | { | ||||||
| 	if (is_nd_pmem(dev) || is_nd_blk(dev)) { | 	if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) { | ||||||
| 		struct nd_region *nd_region = to_nd_region(dev); | 		struct nd_region *nd_region = to_nd_region(dev); | ||||||
| 		int i; | 		int i; | ||||||
| 
 | 
 | ||||||
| 		for (i = 0; i < nd_region->ndr_mappings; i++) { | 		for (i = 0; i < nd_region->ndr_mappings; i++) { | ||||||
| 			struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | 			struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | ||||||
|  | 			struct nvdimm_drvdata *ndd = nd_mapping->ndd; | ||||||
| 			struct nvdimm *nvdimm = nd_mapping->nvdimm; | 			struct nvdimm *nvdimm = nd_mapping->nvdimm; | ||||||
| 
 | 
 | ||||||
| 			if (probe) | 			kfree(nd_mapping->labels); | ||||||
| 				atomic_inc(&nvdimm->busy); | 			nd_mapping->labels = NULL; | ||||||
| 			else | 			put_ndd(ndd); | ||||||
| 				atomic_dec(&nvdimm->busy); | 			nd_mapping->ndd = NULL; | ||||||
|  | 			atomic_dec(&nvdimm->busy); | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -41,10 +41,20 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc, | ||||||
| 		struct nvdimm *nvdimm, unsigned int cmd, void *buf, | 		struct nvdimm *nvdimm, unsigned int cmd, void *buf, | ||||||
| 		unsigned int buf_len); | 		unsigned int buf_len); | ||||||
| 
 | 
 | ||||||
|  | struct nd_namespace_label; | ||||||
|  | struct nvdimm_drvdata; | ||||||
| struct nd_mapping { | struct nd_mapping { | ||||||
| 	struct nvdimm *nvdimm; | 	struct nvdimm *nvdimm; | ||||||
|  | 	struct nd_namespace_label **labels; | ||||||
| 	u64 start; | 	u64 start; | ||||||
| 	u64 size; | 	u64 size; | ||||||
|  | 	/*
 | ||||||
|  | 	 * @ndd is for private use at region enable / disable time for | ||||||
|  | 	 * get_ndd() + put_ndd(), all other nd_mapping to ndd | ||||||
|  | 	 * conversions use to_ndd() which respects enabled state of the | ||||||
|  | 	 * nvdimm. | ||||||
|  | 	 */ | ||||||
|  | 	struct nvdimm_drvdata *ndd; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| struct nvdimm_bus_descriptor { | struct nvdimm_bus_descriptor { | ||||||
|  |  | ||||||
|  | @ -28,16 +28,40 @@ static inline struct nd_device_driver *to_nd_device_driver( | ||||||
| 	return container_of(drv, struct nd_device_driver, drv); | 	return container_of(drv, struct nd_device_driver, drv); | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | /**
 | ||||||
|  |  * struct nd_namespace_io - infrastructure for loading an nd_pmem instance | ||||||
|  |  * @dev: namespace device created by the nd region driver | ||||||
|  |  * @res: struct resource conversion of a NFIT SPA table | ||||||
|  |  */ | ||||||
| struct nd_namespace_io { | struct nd_namespace_io { | ||||||
| 	struct device dev; | 	struct device dev; | ||||||
| 	struct resource res; | 	struct resource res; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | /**
 | ||||||
|  |  * struct nd_namespace_pmem - namespace device for dimm-backed interleaved memory | ||||||
|  |  * @nsio: device and system physical address range to drive | ||||||
|  |  * @alt_name: namespace name supplied in the dimm label | ||||||
|  |  * @uuid: namespace name supplied in the dimm label | ||||||
|  |  */ | ||||||
|  | struct nd_namespace_pmem { | ||||||
|  | 	struct nd_namespace_io nsio; | ||||||
|  | 	char *alt_name; | ||||||
|  | 	u8 *uuid; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
| static inline struct nd_namespace_io *to_nd_namespace_io(struct device *dev) | static inline struct nd_namespace_io *to_nd_namespace_io(struct device *dev) | ||||||
| { | { | ||||||
| 	return container_of(dev, struct nd_namespace_io, dev); | 	return container_of(dev, struct nd_namespace_io, dev); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | static inline struct nd_namespace_pmem *to_nd_namespace_pmem(struct device *dev) | ||||||
|  | { | ||||||
|  | 	struct nd_namespace_io *nsio = to_nd_namespace_io(dev); | ||||||
|  | 
 | ||||||
|  | 	return container_of(nsio, struct nd_namespace_pmem, nsio); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| #define MODULE_ALIAS_ND_DEVICE(type) \ | #define MODULE_ALIAS_ND_DEVICE(type) \ | ||||||
| 	MODULE_ALIAS("nd:t" __stringify(type) "*") | 	MODULE_ALIAS("nd:t" __stringify(type) "*") | ||||||
| #define ND_DEVICE_MODALIAS_FMT "nd:t%d" | #define ND_DEVICE_MODALIAS_FMT "nd:t%d" | ||||||
|  |  | ||||||
|  | @ -190,4 +190,8 @@ enum nd_driver_flags { | ||||||
| 	ND_DRIVER_NAMESPACE_PMEM  = 1 << ND_DEVICE_NAMESPACE_PMEM, | 	ND_DRIVER_NAMESPACE_PMEM  = 1 << ND_DEVICE_NAMESPACE_PMEM, | ||||||
| 	ND_DRIVER_NAMESPACE_BLK   = 1 << ND_DEVICE_NAMESPACE_BLK, | 	ND_DRIVER_NAMESPACE_BLK   = 1 << ND_DEVICE_NAMESPACE_BLK, | ||||||
| }; | }; | ||||||
|  | 
 | ||||||
|  | enum { | ||||||
|  | 	ND_MIN_NAMESPACE_SIZE = 0x00400000, | ||||||
|  | }; | ||||||
| #endif /* __NDCTL_H__ */ | #endif /* __NDCTL_H__ */ | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Dan Williams
						Dan Williams