mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	mm: Remove __delete_from_page_cache()
This wrapper is no longer used. Remove it and all references to it. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
This commit is contained in:
		
							parent
							
								
									fb5c2029f8
								
							
						
					
					
						commit
						6ffcd825e7
					
				
					 5 changed files with 4 additions and 8 deletions
				
			
		|  | @ -604,7 +604,7 @@ void clear_inode(struct inode *inode) | |||
| { | ||||
| 	/*
 | ||||
| 	 * We have to cycle the i_pages lock here because reclaim can be in the | ||||
| 	 * process of removing the last page (in __delete_from_page_cache()) | ||||
| 	 * process of removing the last page (in __filemap_remove_folio()) | ||||
| 	 * and we must not free the mapping under it. | ||||
| 	 */ | ||||
| 	xa_lock_irq(&inode->i_data.i_pages); | ||||
|  |  | |||
|  | @ -1107,10 +1107,6 @@ int filemap_add_folio(struct address_space *mapping, struct folio *folio, | |||
| void filemap_remove_folio(struct folio *folio); | ||||
| void delete_from_page_cache(struct page *page); | ||||
| void __filemap_remove_folio(struct folio *folio, void *shadow); | ||||
| static inline void __delete_from_page_cache(struct page *page, void *shadow) | ||||
| { | ||||
| 	__filemap_remove_folio(page_folio(page), shadow); | ||||
| } | ||||
| void replace_page_cache_page(struct page *old, struct page *new); | ||||
| void delete_from_page_cache_batch(struct address_space *mapping, | ||||
| 				  struct folio_batch *fbatch); | ||||
|  |  | |||
|  | @ -1935,7 +1935,7 @@ int memory_failure(unsigned long pfn, int flags) | |||
| 
 | ||||
| 	/*
 | ||||
| 	 * Now take care of user space mappings. | ||||
| 	 * Abort on fail: __delete_from_page_cache() assumes unmapped page. | ||||
| 	 * Abort on fail: __filemap_remove_folio() assumes unmapped page. | ||||
| 	 */ | ||||
| 	if (!hwpoison_user_mappings(p, pfn, flags, p)) { | ||||
| 		action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); | ||||
|  |  | |||
|  | @ -392,7 +392,7 @@ void shmem_uncharge(struct inode *inode, long pages) | |||
| 	struct shmem_inode_info *info = SHMEM_I(inode); | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	/* nrpages adjustment done by __delete_from_page_cache() or caller */ | ||||
| 	/* nrpages adjustment done by __filemap_remove_folio() or caller */ | ||||
| 
 | ||||
| 	spin_lock_irqsave(&info->lock, flags); | ||||
| 	info->alloced -= pages; | ||||
|  |  | |||
|  | @ -443,7 +443,7 @@ EXPORT_SYMBOL(truncate_inode_pages_range); | |||
|  * mapping->invalidate_lock. | ||||
|  * | ||||
|  * Note: When this function returns, there can be a page in the process of | ||||
|  * deletion (inside __delete_from_page_cache()) in the specified range.  Thus | ||||
|  * deletion (inside __filemap_remove_folio()) in the specified range.  Thus | ||||
|  * mapping->nrpages can be non-zero when this function returns even after | ||||
|  * truncation of the whole mapping. | ||||
|  */ | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Matthew Wilcox (Oracle)
						Matthew Wilcox (Oracle)