forked from mirrors/linux
		
	mm: Provide a means of invalidation without using launder_folio
Implement a replacement for launder_folio. The key feature of invalidate_inode_pages2() is that it locks each folio individually, unmaps it to prevent mmap'd accesses interfering and calls the ->launder_folio() address_space op to flush it. This has problems: firstly, each folio is written individually as one or more small writes; secondly, adjacent folios cannot be added so easily into the laundry; thirdly, it's yet another op to implement. Instead, use the invalidate lock to cause anyone wanting to add a folio to the inode to wait, then unmap all the folios if we have mmaps, then, conditionally, use ->writepages() to flush any dirty data back and then discard all pages. The invalidate lock prevents ->read_iter(), ->write_iter() and faulting through mmap all from adding pages for the duration. This is then used from netfslib to handle the flusing in unbuffered and direct writes. Signed-off-by: David Howells <dhowells@redhat.com> cc: Matthew Wilcox <willy@infradead.org> cc: Miklos Szeredi <miklos@szeredi.hu> cc: Trond Myklebust <trond.myklebust@hammerspace.com> cc: Christoph Hellwig <hch@lst.de> cc: Andrew Morton <akpm@linux-foundation.org> cc: Alexander Viro <viro@zeniv.linux.org.uk> cc: Christian Brauner <brauner@kernel.org> cc: Jeff Layton <jlayton@kernel.org> cc: linux-mm@kvack.org cc: linux-fsdevel@vger.kernel.org cc: netfs@lists.linux.dev cc: v9fs@lists.linux.dev cc: linux-afs@lists.infradead.org cc: ceph-devel@vger.kernel.org cc: linux-cifs@vger.kernel.org cc: linux-nfs@vger.kernel.org cc: devel@lists.orangefs.org
This commit is contained in:
		
							parent
							
								
									120b878158
								
							
						
					
					
						commit
						74e797d79c
					
				
					 3 changed files with 80 additions and 4 deletions
				
			
		|  | @ -132,12 +132,14 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov | |||
| ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from) | ||||
| { | ||||
| 	struct file *file = iocb->ki_filp; | ||||
| 	struct inode *inode = file->f_mapping->host; | ||||
| 	struct address_space *mapping = file->f_mapping; | ||||
| 	struct inode *inode = mapping->host; | ||||
| 	struct netfs_inode *ictx = netfs_inode(inode); | ||||
| 	unsigned long long end; | ||||
| 	ssize_t ret; | ||||
| 	loff_t pos = iocb->ki_pos; | ||||
| 	unsigned long long end = pos + iov_iter_count(from) - 1; | ||||
| 
 | ||||
| 	_enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode)); | ||||
| 	_enter("%llx,%zx,%llx", pos, iov_iter_count(from), i_size_read(inode)); | ||||
| 
 | ||||
| 	if (!iov_iter_count(from)) | ||||
| 		return 0; | ||||
|  | @ -157,7 +159,25 @@ ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 	ret = file_update_time(file); | ||||
| 	if (ret < 0) | ||||
| 		goto out; | ||||
| 	ret = kiocb_invalidate_pages(iocb, iov_iter_count(from)); | ||||
| 	if (iocb->ki_flags & IOCB_NOWAIT) { | ||||
| 		/* We could block if there are any pages in the range. */ | ||||
| 		ret = -EAGAIN; | ||||
| 		if (filemap_range_has_page(mapping, pos, end)) | ||||
| 			if (filemap_invalidate_inode(inode, true, pos, end)) | ||||
| 				goto out; | ||||
| 	} else { | ||||
| 		ret = filemap_write_and_wait_range(mapping, pos, end); | ||||
| 		if (ret < 0) | ||||
| 			goto out; | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * After a write we want buffered reads to be sure to go to disk to get | ||||
| 	 * the new data.  We invalidate clean cached page from the region we're | ||||
| 	 * about to write.  We do this *before* the write so that we can return | ||||
| 	 * without clobbering -EIOCBQUEUED from ->direct_IO(). | ||||
| 	 */ | ||||
| 	ret = filemap_invalidate_inode(inode, true, pos, end); | ||||
| 	if (ret < 0) | ||||
| 		goto out; | ||||
| 	end = iocb->ki_pos + iov_iter_count(from); | ||||
|  |  | |||
|  | @ -40,6 +40,8 @@ int filemap_fdatawait_keep_errors(struct address_space *mapping); | |||
| int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend); | ||||
| int filemap_fdatawait_range_keep_errors(struct address_space *mapping, | ||||
| 		loff_t start_byte, loff_t end_byte); | ||||
| int filemap_invalidate_inode(struct inode *inode, bool flush, | ||||
| 			     loff_t start, loff_t end); | ||||
| 
 | ||||
| static inline int filemap_fdatawait(struct address_space *mapping) | ||||
| { | ||||
|  |  | |||
							
								
								
									
										54
									
								
								mm/filemap.c
									
									
									
									
									
								
							
							
						
						
									
										54
									
								
								mm/filemap.c
									
									
									
									
									
								
							|  | @ -4134,6 +4134,60 @@ bool filemap_release_folio(struct folio *folio, gfp_t gfp) | |||
| } | ||||
| EXPORT_SYMBOL(filemap_release_folio); | ||||
| 
 | ||||
| /**
 | ||||
|  * filemap_invalidate_inode - Invalidate/forcibly write back a range of an inode's pagecache | ||||
|  * @inode: The inode to flush | ||||
|  * @flush: Set to write back rather than simply invalidate. | ||||
|  * @start: First byte to in range. | ||||
|  * @end: Last byte in range (inclusive), or LLONG_MAX for everything from start | ||||
|  *       onwards. | ||||
|  * | ||||
|  * Invalidate all the folios on an inode that contribute to the specified | ||||
|  * range, possibly writing them back first.  Whilst the operation is | ||||
|  * undertaken, the invalidate lock is held to prevent new folios from being | ||||
|  * installed. | ||||
|  */ | ||||
| int filemap_invalidate_inode(struct inode *inode, bool flush, | ||||
| 			     loff_t start, loff_t end) | ||||
| { | ||||
| 	struct address_space *mapping = inode->i_mapping; | ||||
| 	pgoff_t first = start >> PAGE_SHIFT; | ||||
| 	pgoff_t last = end >> PAGE_SHIFT; | ||||
| 	pgoff_t nr = end == LLONG_MAX ? ULONG_MAX : last - first + 1; | ||||
| 
 | ||||
| 	if (!mapping || !mapping->nrpages || end < start) | ||||
| 		goto out; | ||||
| 
 | ||||
| 	/* Prevent new folios from being added to the inode. */ | ||||
| 	filemap_invalidate_lock(mapping); | ||||
| 
 | ||||
| 	if (!mapping->nrpages) | ||||
| 		goto unlock; | ||||
| 
 | ||||
| 	unmap_mapping_pages(mapping, first, nr, false); | ||||
| 
 | ||||
| 	/* Write back the data if we're asked to. */ | ||||
| 	if (flush) { | ||||
| 		struct writeback_control wbc = { | ||||
| 			.sync_mode	= WB_SYNC_ALL, | ||||
| 			.nr_to_write	= LONG_MAX, | ||||
| 			.range_start	= start, | ||||
| 			.range_end	= end, | ||||
| 		}; | ||||
| 
 | ||||
| 		filemap_fdatawrite_wbc(mapping, &wbc); | ||||
| 	} | ||||
| 
 | ||||
| 	/* Wait for writeback to complete on all folios and discard. */ | ||||
| 	truncate_inode_pages_range(mapping, start, end); | ||||
| 
 | ||||
| unlock: | ||||
| 	filemap_invalidate_unlock(mapping); | ||||
| out: | ||||
| 	return filemap_check_errors(mapping); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(filemap_invalidate_inode); | ||||
| 
 | ||||
| #ifdef CONFIG_CACHESTAT_SYSCALL | ||||
| /**
 | ||||
|  * filemap_cachestat() - compute the page cache statistics of a mapping | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 David Howells
						David Howells