forked from mirrors/linux
		
	netfs: Prep to use folio->private for write grouping and streaming write
Prepare to use folio->private to hold information write grouping and
streaming write.  These are implemented in the same commit as they both
make use of folio->private and will be both checked at the same time in
several places.
"Write grouping" involves ordering the writeback of groups of writes, such
as is needed for ceph snaps.  A group is represented by a
filesystem-supplied object which must contain a netfs_group struct.  This
contains just a refcount and a pointer to a destructor.
"Streaming write" is the storage of data in folios that are marked dirty,
but not uptodate, to avoid unnecessary reads of data.  This is represented
by a netfs_folio struct.  This contains the offset and length of the
modified region plus the otherwise displaced write grouping pointer.
The way folio->private is multiplexed is:
 (1) If private is NULL then neither is in operation on a dirty folio.
 (2) If private is set, with bit 0 clear, then this points to a group.
 (3) If private is set, with bit 0 set, then this points to a netfs_folio
     struct (with bit 0 AND'ed out).
Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
cc: linux-cachefs@redhat.com
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org
			
			
This commit is contained in:
		
							parent
							
								
									4fcccc38eb
								
							
						
					
					
						commit
						9ebff83e64
					
				
					 3 changed files with 115 additions and 0 deletions
				
			
		| 
						 | 
				
			
			@ -149,6 +149,34 @@ static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx)
 | 
			
		|||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Get a ref on a netfs group attached to a dirty page (e.g. a ceph snap).
 | 
			
		||||
 */
 | 
			
		||||
static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group)
 | 
			
		||||
{
 | 
			
		||||
	if (netfs_group)
 | 
			
		||||
		refcount_inc(&netfs_group->ref);
 | 
			
		||||
	return netfs_group;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
 | 
			
		||||
 */
 | 
			
		||||
static inline void netfs_put_group(struct netfs_group *netfs_group)
 | 
			
		||||
{
 | 
			
		||||
	if (netfs_group && refcount_dec_and_test(&netfs_group->ref))
 | 
			
		||||
		netfs_group->free(netfs_group);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
 | 
			
		||||
 */
 | 
			
		||||
static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr)
 | 
			
		||||
{
 | 
			
		||||
	if (netfs_group && refcount_sub_and_test(nr, &netfs_group->ref))
 | 
			
		||||
		netfs_group->free(netfs_group);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * fscache-cache.c
 | 
			
		||||
 */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -177,9 +177,55 @@ EXPORT_SYMBOL(netfs_clear_inode_writeback);
 | 
			
		|||
 */
 | 
			
		||||
void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
 | 
			
		||||
{
 | 
			
		||||
	struct netfs_folio *finfo = NULL;
 | 
			
		||||
	size_t flen = folio_size(folio);
 | 
			
		||||
 | 
			
		||||
	_enter("{%lx},%zx,%zx", folio_index(folio), offset, length);
 | 
			
		||||
 | 
			
		||||
	folio_wait_fscache(folio);
 | 
			
		||||
 | 
			
		||||
	if (!folio_test_private(folio))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	finfo = netfs_folio_info(folio);
 | 
			
		||||
 | 
			
		||||
	if (offset == 0 && length >= flen)
 | 
			
		||||
		goto erase_completely;
 | 
			
		||||
 | 
			
		||||
	if (finfo) {
 | 
			
		||||
		/* We have a partially uptodate page from a streaming write. */
 | 
			
		||||
		unsigned int fstart = finfo->dirty_offset;
 | 
			
		||||
		unsigned int fend = fstart + finfo->dirty_len;
 | 
			
		||||
		unsigned int end = offset + length;
 | 
			
		||||
 | 
			
		||||
		if (offset >= fend)
 | 
			
		||||
			return;
 | 
			
		||||
		if (end <= fstart)
 | 
			
		||||
			return;
 | 
			
		||||
		if (offset <= fstart && end >= fend)
 | 
			
		||||
			goto erase_completely;
 | 
			
		||||
		if (offset <= fstart && end > fstart)
 | 
			
		||||
			goto reduce_len;
 | 
			
		||||
		if (offset > fstart && end >= fend)
 | 
			
		||||
			goto move_start;
 | 
			
		||||
		/* A partial write was split.  The caller has already zeroed
 | 
			
		||||
		 * it, so just absorb the hole.
 | 
			
		||||
		 */
 | 
			
		||||
	}
 | 
			
		||||
	return;
 | 
			
		||||
 | 
			
		||||
erase_completely:
 | 
			
		||||
	netfs_put_group(netfs_folio_group(folio));
 | 
			
		||||
	folio_detach_private(folio);
 | 
			
		||||
	folio_clear_uptodate(folio);
 | 
			
		||||
	kfree(finfo);
 | 
			
		||||
	return;
 | 
			
		||||
reduce_len:
 | 
			
		||||
	finfo->dirty_len = offset + length - finfo->dirty_offset;
 | 
			
		||||
	return;
 | 
			
		||||
move_start:
 | 
			
		||||
	finfo->dirty_len -= offset - finfo->dirty_offset;
 | 
			
		||||
	finfo->dirty_offset = offset;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(netfs_invalidate_folio);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -140,6 +140,47 @@ struct netfs_inode {
 | 
			
		|||
#define NETFS_ICTX_ODIRECT	0		/* The file has DIO in progress */
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * A netfs group - for instance a ceph snap.  This is marked on dirty pages and
 | 
			
		||||
 * pages marked with a group must be flushed before they can be written under
 | 
			
		||||
 * the domain of another group.
 | 
			
		||||
 */
 | 
			
		||||
struct netfs_group {
 | 
			
		||||
	refcount_t		ref;
 | 
			
		||||
	void (*free)(struct netfs_group *netfs_group);
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Information about a dirty page (attached only if necessary).
 | 
			
		||||
 * folio->private
 | 
			
		||||
 */
 | 
			
		||||
struct netfs_folio {
 | 
			
		||||
	struct netfs_group	*netfs_group;	/* Filesystem's grouping marker (or NULL). */
 | 
			
		||||
	unsigned int		dirty_offset;	/* Write-streaming dirty data offset */
 | 
			
		||||
	unsigned int		dirty_len;	/* Write-streaming dirty data length */
 | 
			
		||||
};
 | 
			
		||||
#define NETFS_FOLIO_INFO	0x1UL	/* OR'd with folio->private. */
 | 
			
		||||
 | 
			
		||||
static inline struct netfs_folio *netfs_folio_info(struct folio *folio)
 | 
			
		||||
{
 | 
			
		||||
	void *priv = folio_get_private(folio);
 | 
			
		||||
 | 
			
		||||
	if ((unsigned long)priv & NETFS_FOLIO_INFO)
 | 
			
		||||
		return (struct netfs_folio *)((unsigned long)priv & ~NETFS_FOLIO_INFO);
 | 
			
		||||
	return NULL;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline struct netfs_group *netfs_folio_group(struct folio *folio)
 | 
			
		||||
{
 | 
			
		||||
	struct netfs_folio *finfo;
 | 
			
		||||
	void *priv = folio_get_private(folio);
 | 
			
		||||
 | 
			
		||||
	finfo = netfs_folio_info(folio);
 | 
			
		||||
	if (finfo)
 | 
			
		||||
		return finfo->netfs_group;
 | 
			
		||||
	return priv;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Resources required to do operations on a cache.
 | 
			
		||||
 */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue