mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	dax: Check the end of the block-device capacity with dax_direct_access()
The checks in __bdev_dax_supported() helped mitigate a potential data corruption bug in the pmem driver's handling of section alignment padding. Strengthen the checks, including checking the end of the range, to validate the dev_pagemap, Xarray entries, and sector-to-pfn translation established for pmem namespaces. Acked-by: Jan Kara <jack@suse.cz> Cc: "Darrick J. Wong" <darrick.wong@oracle.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
		
							parent
							
								
									fa7d2e639c
								
							
						
					
					
						commit
						ad428cdb52
					
				
					 1 changed files with 28 additions and 10 deletions
				
			
		| 
						 | 
					@ -86,12 +86,14 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct dax_device *dax_dev;
 | 
						struct dax_device *dax_dev;
 | 
				
			||||||
	bool dax_enabled = false;
 | 
						bool dax_enabled = false;
 | 
				
			||||||
 | 
						pgoff_t pgoff, pgoff_end;
 | 
				
			||||||
	struct request_queue *q;
 | 
						struct request_queue *q;
 | 
				
			||||||
	pgoff_t pgoff;
 | 
					 | 
				
			||||||
	int err, id;
 | 
					 | 
				
			||||||
	pfn_t pfn;
 | 
					 | 
				
			||||||
	long len;
 | 
					 | 
				
			||||||
	char buf[BDEVNAME_SIZE];
 | 
						char buf[BDEVNAME_SIZE];
 | 
				
			||||||
 | 
						void *kaddr, *end_kaddr;
 | 
				
			||||||
 | 
						pfn_t pfn, end_pfn;
 | 
				
			||||||
 | 
						sector_t last_page;
 | 
				
			||||||
 | 
						long len, len2;
 | 
				
			||||||
 | 
						int err, id;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (blocksize != PAGE_SIZE) {
 | 
						if (blocksize != PAGE_SIZE) {
 | 
				
			||||||
		pr_debug("%s: error: unsupported blocksize for dax\n",
 | 
							pr_debug("%s: error: unsupported blocksize for dax\n",
 | 
				
			||||||
| 
						 | 
					@ -113,6 +115,14 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						last_page = PFN_DOWN(i_size_read(bdev->bd_inode) - 1) * 8;
 | 
				
			||||||
 | 
						err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
 | 
				
			||||||
 | 
						if (err) {
 | 
				
			||||||
 | 
							pr_debug("%s: error: unaligned partition for dax\n",
 | 
				
			||||||
 | 
									bdevname(bdev, buf));
 | 
				
			||||||
 | 
							return false;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
 | 
						dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
 | 
				
			||||||
	if (!dax_dev) {
 | 
						if (!dax_dev) {
 | 
				
			||||||
		pr_debug("%s: error: device does not support dax\n",
 | 
							pr_debug("%s: error: device does not support dax\n",
 | 
				
			||||||
| 
						 | 
					@ -121,14 +131,15 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	id = dax_read_lock();
 | 
						id = dax_read_lock();
 | 
				
			||||||
	len = dax_direct_access(dax_dev, pgoff, 1, NULL, &pfn);
 | 
						len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
 | 
				
			||||||
 | 
						len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
 | 
				
			||||||
	dax_read_unlock(id);
 | 
						dax_read_unlock(id);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	put_dax(dax_dev);
 | 
						put_dax(dax_dev);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (len < 1) {
 | 
						if (len < 1 || len2 < 1) {
 | 
				
			||||||
		pr_debug("%s: error: dax access failed (%ld)\n",
 | 
							pr_debug("%s: error: dax access failed (%ld)\n",
 | 
				
			||||||
				bdevname(bdev, buf), len);
 | 
									bdevname(bdev, buf), len < 1 ? len : len2);
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -143,13 +154,20 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
 | 
							WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
 | 
				
			||||||
		dax_enabled = true;
 | 
							dax_enabled = true;
 | 
				
			||||||
	} else if (pfn_t_devmap(pfn)) {
 | 
						} else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) {
 | 
				
			||||||
		struct dev_pagemap *pgmap;
 | 
							struct dev_pagemap *pgmap, *end_pgmap;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
 | 
							pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
 | 
				
			||||||
		if (pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX)
 | 
							end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL);
 | 
				
			||||||
 | 
							if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX
 | 
				
			||||||
 | 
									&& pfn_t_to_page(pfn)->pgmap == pgmap
 | 
				
			||||||
 | 
									&& pfn_t_to_page(end_pfn)->pgmap == pgmap
 | 
				
			||||||
 | 
									&& pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr))
 | 
				
			||||||
 | 
									&& pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr)))
 | 
				
			||||||
			dax_enabled = true;
 | 
								dax_enabled = true;
 | 
				
			||||||
		put_dev_pagemap(pgmap);
 | 
							put_dev_pagemap(pgmap);
 | 
				
			||||||
 | 
							put_dev_pagemap(end_pgmap);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!dax_enabled) {
 | 
						if (!dax_enabled) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue