mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	 a416637f90
			
		
	
	
		a416637f90
		
	
	
	
	
		
			
			Since we can no longer assume all data filemap folios are page sized, use proper folio_size() calls to determine the folio size, as a preparation for future large data filemap folios. Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
		
			
				
	
	
		
			202 lines
		
	
	
	
		
			7.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			202 lines
		
	
	
	
		
			7.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* SPDX-License-Identifier: GPL-2.0 */
 | |
| 
 | |
| #ifndef BTRFS_SUBPAGE_H
 | |
| #define BTRFS_SUBPAGE_H
 | |
| 
 | |
| #include <linux/spinlock.h>
 | |
| #include <linux/atomic.h>
 | |
| #include <linux/sizes.h>
 | |
| #include "btrfs_inode.h"
 | |
| #include "fs.h"
 | |
| 
 | |
| struct address_space;
 | |
| struct folio;
 | |
| 
 | |
| /*
 | |
|  * Extra info for subpapge bitmap.
 | |
|  *
 | |
|  * For subpage we pack all uptodate/dirty/writeback/ordered bitmaps into
 | |
|  * one larger bitmap.
 | |
|  *
 | |
|  * This structure records how they are organized in the bitmap:
 | |
|  *
 | |
|  * /- uptodate          /- dirty        /- ordered
 | |
|  * |			|		|
 | |
|  * v			v		v
 | |
|  * |u|u|u|u|........|u|u|d|d|.......|d|d|o|o|.......|o|o|
 | |
|  * |< sectors_per_page >|
 | |
|  *
 | |
|  * Unlike regular macro-like enums, here we do not go upper-case names, as
 | |
|  * these names will be utilized in various macros to define function names.
 | |
|  */
 | |
| enum {
 | |
| 	btrfs_bitmap_nr_uptodate = 0,
 | |
| 	btrfs_bitmap_nr_dirty,
 | |
| 	btrfs_bitmap_nr_writeback,
 | |
| 	btrfs_bitmap_nr_ordered,
 | |
| 	btrfs_bitmap_nr_checked,
 | |
| 	btrfs_bitmap_nr_locked,
 | |
| 	btrfs_bitmap_nr_max
 | |
| };
 | |
| 
 | |
| /*
 | |
|  * Structure to trace status of each sector inside a page, attached to
 | |
|  * page::private for both data and metadata inodes.
 | |
|  */
 | |
| struct btrfs_subpage {
 | |
| 	/* Common members for both data and metadata pages */
 | |
| 	spinlock_t lock;
 | |
| 	union {
 | |
| 		/*
 | |
| 		 * Structures only used by metadata
 | |
| 		 *
 | |
| 		 * @eb_refs should only be operated under private_lock, as it
 | |
| 		 * manages whether the subpage can be detached.
 | |
| 		 */
 | |
| 		atomic_t eb_refs;
 | |
| 
 | |
| 		/*
 | |
| 		 * Structures only used by data,
 | |
| 		 *
 | |
| 		 * How many sectors inside the page is locked.
 | |
| 		 */
 | |
| 		atomic_t nr_locked;
 | |
| 	};
 | |
| 	unsigned long bitmaps[];
 | |
| };
 | |
| 
 | |
| enum btrfs_subpage_type {
 | |
| 	BTRFS_SUBPAGE_METADATA,
 | |
| 	BTRFS_SUBPAGE_DATA,
 | |
| };
 | |
| 
 | |
| #if PAGE_SIZE > BTRFS_MIN_BLOCKSIZE
 | |
| /*
 | |
|  * Subpage support for metadata is more complex, as we can have dummy extent
 | |
|  * buffers, where folios have no mapping to determine the owning inode.
 | |
|  *
 | |
|  * Thankfully we only need to check if node size is smaller than page size.
 | |
|  * Even with larger folio support, we will only allocate a folio as large as
 | |
|  * node size.
 | |
|  * Thus if nodesize < PAGE_SIZE, we know metadata needs need to subpage routine.
 | |
|  */
 | |
| static inline bool btrfs_meta_is_subpage(const struct btrfs_fs_info *fs_info)
 | |
| {
 | |
| 	return fs_info->nodesize < PAGE_SIZE;
 | |
| }
 | |
| static inline bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info,
 | |
| 				    struct folio *folio)
 | |
| {
 | |
| 	if (folio->mapping && folio->mapping->host)
 | |
| 		ASSERT(is_data_inode(BTRFS_I(folio->mapping->host)));
 | |
| 	return fs_info->sectorsize < folio_size(folio);
 | |
| }
 | |
| #else
 | |
| static inline bool btrfs_meta_is_subpage(const struct btrfs_fs_info *fs_info)
 | |
| {
 | |
| 	return false;
 | |
| }
 | |
| static inline bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info,
 | |
| 				    struct folio *folio)
 | |
| {
 | |
| 	if (folio->mapping && folio->mapping->host)
 | |
| 		ASSERT(is_data_inode(BTRFS_I(folio->mapping->host)));
 | |
| 	return false;
 | |
| }
 | |
| #endif
 | |
| 
 | |
| int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
 | |
| 			 struct folio *folio, enum btrfs_subpage_type type);
 | |
| void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio,
 | |
| 			  enum btrfs_subpage_type type);
 | |
| 
 | |
| /* Allocate additional data where page represents more than one sector */
 | |
| struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
 | |
| 				size_t fsize, enum btrfs_subpage_type type);
 | |
| void btrfs_free_subpage(struct btrfs_subpage *subpage);
 | |
| 
 | |
| void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
 | |
| void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
 | |
| 
 | |
| void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
 | |
| 			  struct folio *folio, u64 start, u32 len);
 | |
| void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
 | |
| 			  struct folio *folio, u64 start, u32 len);
 | |
| void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
 | |
| 				 struct folio *folio, unsigned long bitmap);
 | |
| /*
 | |
|  * Template for subpage related operations.
 | |
|  *
 | |
|  * btrfs_subpage_*() are for call sites where the folio has subpage attached and
 | |
|  * the range is ensured to be inside the folio's single page.
 | |
|  *
 | |
|  * btrfs_folio_*() are for call sites where the page can either be subpage
 | |
|  * specific or regular folios. The function will handle both cases.
 | |
|  * But the range still needs to be inside one single page.
 | |
|  *
 | |
|  * btrfs_folio_clamp_*() are similar to btrfs_folio_*(), except the range doesn't
 | |
|  * need to be inside the page. Those functions will truncate the range
 | |
|  * automatically.
 | |
|  *
 | |
|  * Both btrfs_folio_*() and btrfs_folio_clamp_*() are for data folios.
 | |
|  *
 | |
|  * For metadata, one should use btrfs_meta_folio_*() helpers instead, and there
 | |
|  * is no clamp version for metadata helpers, as we either go subpage
 | |
|  * (nodesize < PAGE_SIZE) or go regular folio helpers (nodesize >= PAGE_SIZE,
 | |
|  * and our folio is never larger than nodesize).
 | |
|  */
 | |
| #define DECLARE_BTRFS_SUBPAGE_OPS(name)					\
 | |
| void btrfs_subpage_set_##name(const struct btrfs_fs_info *fs_info,	\
 | |
| 		struct folio *folio, u64 start, u32 len);			\
 | |
| void btrfs_subpage_clear_##name(const struct btrfs_fs_info *fs_info,	\
 | |
| 		struct folio *folio, u64 start, u32 len);			\
 | |
| bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info,	\
 | |
| 		struct folio *folio, u64 start, u32 len);			\
 | |
| void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info,	\
 | |
| 		struct folio *folio, u64 start, u32 len);			\
 | |
| void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info,	\
 | |
| 		struct folio *folio, u64 start, u32 len);			\
 | |
| bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info,	\
 | |
| 		struct folio *folio, u64 start, u32 len);			\
 | |
| void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info,	\
 | |
| 		struct folio *folio, u64 start, u32 len);			\
 | |
| void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info,	\
 | |
| 		struct folio *folio, u64 start, u32 len);			\
 | |
| bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info,	\
 | |
| 		struct folio *folio, u64 start, u32 len);		\
 | |
| void btrfs_meta_folio_set_##name(struct folio *folio, const struct extent_buffer *eb); \
 | |
| void btrfs_meta_folio_clear_##name(struct folio *folio, const struct extent_buffer *eb); \
 | |
| bool btrfs_meta_folio_test_##name(struct folio *folio, const struct extent_buffer *eb);
 | |
| 
 | |
| DECLARE_BTRFS_SUBPAGE_OPS(uptodate);
 | |
| DECLARE_BTRFS_SUBPAGE_OPS(dirty);
 | |
| DECLARE_BTRFS_SUBPAGE_OPS(writeback);
 | |
| DECLARE_BTRFS_SUBPAGE_OPS(ordered);
 | |
| DECLARE_BTRFS_SUBPAGE_OPS(checked);
 | |
| 
 | |
| /*
 | |
|  * Helper for error cleanup, where a folio will have its dirty flag cleared,
 | |
|  * with writeback started and finished.
 | |
|  */
 | |
| static inline void btrfs_folio_clamp_finish_io(struct btrfs_fs_info *fs_info,
 | |
| 					       struct folio *locked_folio,
 | |
| 					       u64 start, u32 len)
 | |
| {
 | |
| 	btrfs_folio_clamp_clear_dirty(fs_info, locked_folio, start, len);
 | |
| 	btrfs_folio_clamp_set_writeback(fs_info, locked_folio, start, len);
 | |
| 	btrfs_folio_clamp_clear_writeback(fs_info, locked_folio, start, len);
 | |
| }
 | |
| 
 | |
| bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
 | |
| 					struct folio *folio, u64 start, u32 len);
 | |
| 
 | |
| void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
 | |
| 				  struct folio *folio, u64 start, u32 len);
 | |
| bool btrfs_meta_folio_clear_and_test_dirty(struct folio *folio, const struct extent_buffer *eb);
 | |
| void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
 | |
| 				    struct folio *folio,
 | |
| 				    unsigned long *ret_bitmap);
 | |
| void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
 | |
| 				      struct folio *folio, u64 start, u32 len);
 | |
| 
 | |
| #endif
 |