btrfs: pass a pointer to get_range_bits() to cache first search result

Allow get_range_bits() to take an extent state pointer to pointer argument
so that we can cache the first extent state record in the target range, so
that a caller can use it for subsequent operations without doing a full
tree search. Currently the only user is try_release_extent_state(), which
then does a call to __clear_extent_bit() which can use such a cached state
record.

Signed-off-by: Filipe Manana <fdmanana@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Filipe Manana 2025-03-25 17:26:37 +00:00 committed by David Sterba
parent 32c523c578
commit c4669e4a8b
3 changed files with 26 additions and 9 deletions

View file

@ -1752,14 +1752,26 @@ bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32
return bitset;
}
void get_range_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 *bits)
void get_range_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 *bits,
struct extent_state **cached_state)
{
struct extent_state *state;
/*
* The cached state is currently mandatory and not used to start the
* search, only to cache the first state record found in the range.
*/
ASSERT(cached_state != NULL);
ASSERT(*cached_state == NULL);
*bits = 0;
spin_lock(&tree->lock);
state = tree_search(tree, start);
if (state && state->start < end) {
*cached_state = state;
refcount_inc(&state->refs);
}
while (state) {
if (state->start > end)
break;

View file

@ -170,7 +170,8 @@ void free_extent_state(struct extent_state *state);
bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
struct extent_state *cached_state);
bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit);
void get_range_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 *bits);
void get_range_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 *bits,
struct extent_state **cached_state);
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
u32 bits, struct extent_changeset *changeset);
int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,

View file

@ -2621,13 +2621,15 @@ int extent_invalidate_folio(struct extent_io_tree *tree,
static bool try_release_extent_state(struct extent_io_tree *tree,
struct folio *folio)
{
struct extent_state *cached_state = NULL;
u64 start = folio_pos(folio);
u64 end = start + folio_size(folio) - 1;
u32 range_bits;
u32 clear_bits;
int ret;
bool ret = false;
int ret2;
get_range_bits(tree, start, end, &range_bits);
get_range_bits(tree, start, end, &range_bits, &cached_state);
/*
* We can release the folio if it's locked only for ordered extent
@ -2635,7 +2637,7 @@ static bool try_release_extent_state(struct extent_io_tree *tree,
*/
if ((range_bits & EXTENT_LOCKED) &&
!(range_bits & EXTENT_FINISHING_ORDERED))
return false;
goto out;
clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM | EXTENT_DELALLOC_NEW |
EXTENT_CTLBITS | EXTENT_QGROUP_RESERVED |
@ -2645,15 +2647,17 @@ static bool try_release_extent_state(struct extent_io_tree *tree,
* nodatasum, delalloc new and finishing ordered bits. The delalloc new
* bit will be cleared by ordered extent completion.
*/
ret = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
ret2 = __clear_extent_bit(tree, start, end, clear_bits, &cached_state, NULL);
/*
* If clear_extent_bit failed for enomem reasons, we can't allow the
* release to continue.
*/
if (ret < 0)
return false;
if (ret2 == 0)
ret = true;
out:
free_extent_state(cached_state);
return true;
return ret;
}
/*