forked from mirrors/linux
		
	dax: Convert dax_lock_mapping_entry to XArray
Instead of always retrying when we slept, only retry if the page has moved. Signed-off-by: Matthew Wilcox <willy@infradead.org>
This commit is contained in:
		
							parent
							
								
									9fc747f68d
								
							
						
					
					
						commit
						9f32d22130
					
				
					 1 changed files with 35 additions and 48 deletions
				
			
		
							
								
								
									
										81
									
								
								fs/dax.c
									
									
									
									
									
								
							
							
						
						
									
										81
									
								
								fs/dax.c
									
									
									
									
									
								
							|  | @ -99,6 +99,17 @@ static void *dax_make_locked(unsigned long pfn, unsigned long flags) | ||||||
| 			DAX_LOCKED); | 			DAX_LOCKED); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | static void *dax_make_entry(pfn_t pfn, unsigned long flags) | ||||||
|  | { | ||||||
|  | 	return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static void *dax_make_page_entry(struct page *page) | ||||||
|  | { | ||||||
|  | 	pfn_t pfn = page_to_pfn_t(page); | ||||||
|  | 	return dax_make_entry(pfn, PageHead(page) ? DAX_PMD : 0); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| static bool dax_is_locked(void *entry) | static bool dax_is_locked(void *entry) | ||||||
| { | { | ||||||
| 	return xa_to_value(entry) & DAX_LOCKED; | 	return xa_to_value(entry) & DAX_LOCKED; | ||||||
|  | @ -487,33 +498,16 @@ static struct page *dax_busy_page(void *entry) | ||||||
| 	return NULL; | 	return NULL; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static bool entry_wait_revalidate(void) |  | ||||||
| { |  | ||||||
| 	rcu_read_unlock(); |  | ||||||
| 	schedule(); |  | ||||||
| 	rcu_read_lock(); |  | ||||||
| 
 |  | ||||||
| 	/*
 |  | ||||||
| 	 * Tell __get_unlocked_mapping_entry() to take a break, we need |  | ||||||
| 	 * to revalidate page->mapping after dropping locks |  | ||||||
| 	 */ |  | ||||||
| 	return true; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| bool dax_lock_mapping_entry(struct page *page) | bool dax_lock_mapping_entry(struct page *page) | ||||||
| { | { | ||||||
| 	pgoff_t index; | 	XA_STATE(xas, NULL, 0); | ||||||
| 	struct inode *inode; | 	void *entry; | ||||||
| 	bool did_lock = false; |  | ||||||
| 	void *entry = NULL, **slot; |  | ||||||
| 	struct address_space *mapping; |  | ||||||
| 
 | 
 | ||||||
| 	rcu_read_lock(); |  | ||||||
| 	for (;;) { | 	for (;;) { | ||||||
| 		mapping = READ_ONCE(page->mapping); | 		struct address_space *mapping = READ_ONCE(page->mapping); | ||||||
| 
 | 
 | ||||||
| 		if (!dax_mapping(mapping)) | 		if (!dax_mapping(mapping)) | ||||||
| 			break; | 			return false; | ||||||
| 
 | 
 | ||||||
| 		/*
 | 		/*
 | ||||||
| 		 * In the device-dax case there's no need to lock, a | 		 * In the device-dax case there's no need to lock, a | ||||||
|  | @ -522,47 +516,40 @@ bool dax_lock_mapping_entry(struct page *page) | ||||||
| 		 * otherwise we would not have a valid pfn_to_page() | 		 * otherwise we would not have a valid pfn_to_page() | ||||||
| 		 * translation. | 		 * translation. | ||||||
| 		 */ | 		 */ | ||||||
| 		inode = mapping->host; | 		if (S_ISCHR(mapping->host->i_mode)) | ||||||
| 		if (S_ISCHR(inode->i_mode)) { | 			return true; | ||||||
| 			did_lock = true; |  | ||||||
| 			break; |  | ||||||
| 		} |  | ||||||
| 
 | 
 | ||||||
| 		xa_lock_irq(&mapping->i_pages); | 		xas.xa = &mapping->i_pages; | ||||||
|  | 		xas_lock_irq(&xas); | ||||||
| 		if (mapping != page->mapping) { | 		if (mapping != page->mapping) { | ||||||
| 			xa_unlock_irq(&mapping->i_pages); | 			xas_unlock_irq(&xas); | ||||||
| 			continue; | 			continue; | ||||||
| 		} | 		} | ||||||
| 		index = page->index; | 		xas_set(&xas, page->index); | ||||||
| 
 | 		entry = xas_load(&xas); | ||||||
| 		entry = __get_unlocked_mapping_entry(mapping, index, &slot, | 		if (dax_is_locked(entry)) { | ||||||
| 				entry_wait_revalidate); | 			entry = get_unlocked_entry(&xas); | ||||||
| 		if (!entry) { | 			/* Did the page move while we slept? */ | ||||||
| 			xa_unlock_irq(&mapping->i_pages); | 			if (dax_to_pfn(entry) != page_to_pfn(page)) { | ||||||
| 			break; | 				xas_unlock_irq(&xas); | ||||||
| 		} else if (IS_ERR(entry)) { |  | ||||||
| 			WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN); |  | ||||||
| 				continue; | 				continue; | ||||||
| 			} | 			} | ||||||
| 		lock_slot(mapping, slot); |  | ||||||
| 		did_lock = true; |  | ||||||
| 		xa_unlock_irq(&mapping->i_pages); |  | ||||||
| 		break; |  | ||||||
| 		} | 		} | ||||||
| 	rcu_read_unlock(); | 		dax_lock_entry(&xas, entry); | ||||||
| 
 | 		xas_unlock_irq(&xas); | ||||||
| 	return did_lock; | 		return true; | ||||||
|  | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void dax_unlock_mapping_entry(struct page *page) | void dax_unlock_mapping_entry(struct page *page) | ||||||
| { | { | ||||||
| 	struct address_space *mapping = page->mapping; | 	struct address_space *mapping = page->mapping; | ||||||
| 	struct inode *inode = mapping->host; | 	XA_STATE(xas, &mapping->i_pages, page->index); | ||||||
| 
 | 
 | ||||||
| 	if (S_ISCHR(inode->i_mode)) | 	if (S_ISCHR(mapping->host->i_mode)) | ||||||
| 		return; | 		return; | ||||||
| 
 | 
 | ||||||
| 	unlock_mapping_entry(mapping, page->index); | 	dax_unlock_entry(&xas, dax_make_page_entry(page)); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Matthew Wilcox
						Matthew Wilcox