mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	page cache: Convert delete_batch to XArray
Rename the function from page_cache_tree_delete_batch to just page_cache_delete_batch. Signed-off-by: Matthew Wilcox <willy@infradead.org>
This commit is contained in:
		
							parent
							
								
									a332125fc3
								
							
						
					
					
						commit
						ef8e5717db
					
				
					 1 changed files with 13 additions and 15 deletions
				
			
		
							
								
								
									
										28
									
								
								mm/filemap.c
									
									
									
									
									
								
							
							
						
						
									
										28
									
								
								mm/filemap.c
									
									
									
									
									
								
							| 
						 | 
					@ -272,7 +272,7 @@ void delete_from_page_cache(struct page *page)
 | 
				
			||||||
EXPORT_SYMBOL(delete_from_page_cache);
 | 
					EXPORT_SYMBOL(delete_from_page_cache);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * page_cache_tree_delete_batch - delete several pages from page cache
 | 
					 * page_cache_delete_batch - delete several pages from page cache
 | 
				
			||||||
 * @mapping: the mapping to which pages belong
 | 
					 * @mapping: the mapping to which pages belong
 | 
				
			||||||
 * @pvec: pagevec with pages to delete
 | 
					 * @pvec: pagevec with pages to delete
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
| 
						 | 
					@ -285,23 +285,18 @@ EXPORT_SYMBOL(delete_from_page_cache);
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * The function expects the i_pages lock to be held.
 | 
					 * The function expects the i_pages lock to be held.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void
 | 
					static void page_cache_delete_batch(struct address_space *mapping,
 | 
				
			||||||
page_cache_tree_delete_batch(struct address_space *mapping,
 | 
					 | 
				
			||||||
			     struct pagevec *pvec)
 | 
								     struct pagevec *pvec)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct radix_tree_iter iter;
 | 
						XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index);
 | 
				
			||||||
	void **slot;
 | 
					 | 
				
			||||||
	int total_pages = 0;
 | 
						int total_pages = 0;
 | 
				
			||||||
	int i = 0, tail_pages = 0;
 | 
						int i = 0, tail_pages = 0;
 | 
				
			||||||
	struct page *page;
 | 
						struct page *page;
 | 
				
			||||||
	pgoff_t start;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	start = pvec->pages[0]->index;
 | 
						mapping_set_update(&xas, mapping);
 | 
				
			||||||
	radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
 | 
						xas_for_each(&xas, page, ULONG_MAX) {
 | 
				
			||||||
		if (i >= pagevec_count(pvec) && !tail_pages)
 | 
							if (i >= pagevec_count(pvec) && !tail_pages)
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
		page = radix_tree_deref_slot_protected(slot,
 | 
					 | 
				
			||||||
						       &mapping->i_pages.xa_lock);
 | 
					 | 
				
			||||||
		if (xa_is_value(page))
 | 
							if (xa_is_value(page))
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
		if (!tail_pages) {
 | 
							if (!tail_pages) {
 | 
				
			||||||
| 
						 | 
					@ -310,8 +305,11 @@ page_cache_tree_delete_batch(struct address_space *mapping,
 | 
				
			||||||
			 * have our pages locked so they are protected from
 | 
								 * have our pages locked so they are protected from
 | 
				
			||||||
			 * being removed.
 | 
								 * being removed.
 | 
				
			||||||
			 */
 | 
								 */
 | 
				
			||||||
			if (page != pvec->pages[i])
 | 
								if (page != pvec->pages[i]) {
 | 
				
			||||||
 | 
									VM_BUG_ON_PAGE(page->index >
 | 
				
			||||||
 | 
											pvec->pages[i]->index, page);
 | 
				
			||||||
				continue;
 | 
									continue;
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
			WARN_ON_ONCE(!PageLocked(page));
 | 
								WARN_ON_ONCE(!PageLocked(page));
 | 
				
			||||||
			if (PageTransHuge(page) && !PageHuge(page))
 | 
								if (PageTransHuge(page) && !PageHuge(page))
 | 
				
			||||||
				tail_pages = HPAGE_PMD_NR - 1;
 | 
									tail_pages = HPAGE_PMD_NR - 1;
 | 
				
			||||||
| 
						 | 
					@ -322,11 +320,11 @@ page_cache_tree_delete_batch(struct address_space *mapping,
 | 
				
			||||||
			 */
 | 
								 */
 | 
				
			||||||
			i++;
 | 
								i++;
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
 | 
								VM_BUG_ON_PAGE(page->index + HPAGE_PMD_NR - tail_pages
 | 
				
			||||||
 | 
										!= pvec->pages[i]->index, page);
 | 
				
			||||||
			tail_pages--;
 | 
								tail_pages--;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		radix_tree_clear_tags(&mapping->i_pages, iter.node, slot);
 | 
							xas_store(&xas, NULL);
 | 
				
			||||||
		__radix_tree_replace(&mapping->i_pages, iter.node, slot, NULL,
 | 
					 | 
				
			||||||
				workingset_lookup_update(mapping));
 | 
					 | 
				
			||||||
		total_pages++;
 | 
							total_pages++;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	mapping->nrpages -= total_pages;
 | 
						mapping->nrpages -= total_pages;
 | 
				
			||||||
| 
						 | 
					@ -347,7 +345,7 @@ void delete_from_page_cache_batch(struct address_space *mapping,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		unaccount_page_cache_page(mapping, pvec->pages[i]);
 | 
							unaccount_page_cache_page(mapping, pvec->pages[i]);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	page_cache_tree_delete_batch(mapping, pvec);
 | 
						page_cache_delete_batch(mapping, pvec);
 | 
				
			||||||
	xa_unlock_irqrestore(&mapping->i_pages, flags);
 | 
						xa_unlock_irqrestore(&mapping->i_pages, flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = 0; i < pagevec_count(pvec); i++)
 | 
						for (i = 0; i < pagevec_count(pvec); i++)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue