mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	btrfs: allow btrfs_submit_bio to split bios
Currently the I/O submitters have to split bios according to the chunk stripe boundaries. This leads to extra lookups in the extent trees and a lot of boilerplate code. To drop this requirement, split the bio when __btrfs_map_block returns a mapping that is smaller than the requested size and keep a count of pending bios in the original btrfs_bio so that the upper level completion is only invoked when all clones have completed. Based on a patch from Qu Wenruo. Reviewed-by: Josef Bacik <josef@toxicpanda.com> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Reviewed-by: Qu Wenruo <wqu@suse.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
		
							parent
							
								
									542e300e83
								
							
						
					
					
						commit
						852eee62d3
					
				
					 2 changed files with 93 additions and 18 deletions
				
			
		
							
								
								
									
										110
									
								
								fs/btrfs/bio.c
									
									
									
									
									
								
							
							
						
						
									
										110
									
								
								fs/btrfs/bio.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -17,6 +17,7 @@
 | 
			
		|||
#include "file-item.h"
 | 
			
		||||
 | 
			
		||||
static struct bio_set btrfs_bioset;
 | 
			
		||||
static struct bio_set btrfs_clone_bioset;
 | 
			
		||||
static struct bio_set btrfs_repair_bioset;
 | 
			
		||||
static mempool_t btrfs_failed_bio_pool;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -38,6 +39,7 @@ static inline void btrfs_bio_init(struct btrfs_bio *bbio,
 | 
			
		|||
	bbio->inode = inode;
 | 
			
		||||
	bbio->end_io = end_io;
 | 
			
		||||
	bbio->private = private;
 | 
			
		||||
	atomic_set(&bbio->pending_ios, 1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			@ -75,6 +77,59 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size,
 | 
			
		|||
	return bio;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static struct bio *btrfs_split_bio(struct bio *orig, u64 map_length)
 | 
			
		||||
{
 | 
			
		||||
	struct btrfs_bio *orig_bbio = btrfs_bio(orig);
 | 
			
		||||
	struct bio *bio;
 | 
			
		||||
 | 
			
		||||
	bio = bio_split(orig, map_length >> SECTOR_SHIFT, GFP_NOFS,
 | 
			
		||||
			&btrfs_clone_bioset);
 | 
			
		||||
	btrfs_bio_init(btrfs_bio(bio), orig_bbio->inode, NULL, orig_bbio);
 | 
			
		||||
 | 
			
		||||
	btrfs_bio(bio)->file_offset = orig_bbio->file_offset;
 | 
			
		||||
	if (!(orig->bi_opf & REQ_BTRFS_ONE_ORDERED))
 | 
			
		||||
		orig_bbio->file_offset += map_length;
 | 
			
		||||
 | 
			
		||||
	atomic_inc(&orig_bbio->pending_ios);
 | 
			
		||||
	return bio;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void btrfs_orig_write_end_io(struct bio *bio);
 | 
			
		||||
 | 
			
		||||
static void btrfs_bbio_propagate_error(struct btrfs_bio *bbio,
 | 
			
		||||
				       struct btrfs_bio *orig_bbio)
 | 
			
		||||
{
 | 
			
		||||
	/*
 | 
			
		||||
	 * For writes we tolerate nr_mirrors - 1 write failures, so we can't
 | 
			
		||||
	 * just blindly propagate a write failure here.  Instead increment the
 | 
			
		||||
	 * error count in the original I/O context so that it is guaranteed to
 | 
			
		||||
	 * be larger than the error tolerance.
 | 
			
		||||
	 */
 | 
			
		||||
	if (bbio->bio.bi_end_io == &btrfs_orig_write_end_io) {
 | 
			
		||||
		struct btrfs_io_stripe *orig_stripe = orig_bbio->bio.bi_private;
 | 
			
		||||
		struct btrfs_io_context *orig_bioc = orig_stripe->bioc;
 | 
			
		||||
 | 
			
		||||
		atomic_add(orig_bioc->max_errors, &orig_bioc->error);
 | 
			
		||||
	} else {
 | 
			
		||||
		orig_bbio->bio.bi_status = bbio->bio.bi_status;
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void btrfs_orig_bbio_end_io(struct btrfs_bio *bbio)
 | 
			
		||||
{
 | 
			
		||||
	if (bbio->bio.bi_pool == &btrfs_clone_bioset) {
 | 
			
		||||
		struct btrfs_bio *orig_bbio = bbio->private;
 | 
			
		||||
 | 
			
		||||
		if (bbio->bio.bi_status)
 | 
			
		||||
			btrfs_bbio_propagate_error(bbio, orig_bbio);
 | 
			
		||||
		bio_put(&bbio->bio);
 | 
			
		||||
		bbio = orig_bbio;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (atomic_dec_and_test(&bbio->pending_ios))
 | 
			
		||||
		bbio->end_io(bbio);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int next_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror)
 | 
			
		||||
{
 | 
			
		||||
	if (cur_mirror == fbio->num_copies)
 | 
			
		||||
| 
						 | 
				
			
			@ -92,7 +147,7 @@ static int prev_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror)
 | 
			
		|||
static void btrfs_repair_done(struct btrfs_failed_bio *fbio)
 | 
			
		||||
{
 | 
			
		||||
	if (atomic_dec_and_test(&fbio->repair_count)) {
 | 
			
		||||
		fbio->bbio->end_io(fbio->bbio);
 | 
			
		||||
		btrfs_orig_bbio_end_io(fbio->bbio);
 | 
			
		||||
		mempool_free(fbio, &btrfs_failed_bio_pool);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -229,7 +284,7 @@ static void btrfs_check_read_bio(struct btrfs_bio *bbio, struct btrfs_device *de
 | 
			
		|||
	if (fbio)
 | 
			
		||||
		btrfs_repair_done(fbio);
 | 
			
		||||
	else
 | 
			
		||||
		bbio->end_io(bbio);
 | 
			
		||||
		btrfs_orig_bbio_end_io(bbio);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev)
 | 
			
		||||
| 
						 | 
				
			
			@ -283,7 +338,7 @@ static void btrfs_simple_end_io(struct bio *bio)
 | 
			
		|||
	} else {
 | 
			
		||||
		if (bio_op(bio) == REQ_OP_ZONE_APPEND)
 | 
			
		||||
			btrfs_record_physical_zoned(bbio);
 | 
			
		||||
		bbio->end_io(bbio);
 | 
			
		||||
		btrfs_orig_bbio_end_io(bbio);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -297,7 +352,7 @@ static void btrfs_raid56_end_io(struct bio *bio)
 | 
			
		|||
	if (bio_op(bio) == REQ_OP_READ && !(bbio->bio.bi_opf & REQ_META))
 | 
			
		||||
		btrfs_check_read_bio(bbio, NULL);
 | 
			
		||||
	else
 | 
			
		||||
		bbio->end_io(bbio);
 | 
			
		||||
		btrfs_orig_bbio_end_io(bbio);
 | 
			
		||||
 | 
			
		||||
	btrfs_put_bioc(bioc);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -324,7 +379,7 @@ static void btrfs_orig_write_end_io(struct bio *bio)
 | 
			
		|||
	else
 | 
			
		||||
		bio->bi_status = BLK_STS_OK;
 | 
			
		||||
 | 
			
		||||
	bbio->end_io(bbio);
 | 
			
		||||
	btrfs_orig_bbio_end_io(bbio);
 | 
			
		||||
	btrfs_put_bioc(bioc);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -488,7 +543,7 @@ static void run_one_async_done(struct btrfs_work *work)
 | 
			
		|||
 | 
			
		||||
	/* If an error occurred we just want to clean up the bio and move on. */
 | 
			
		||||
	if (bio->bi_status) {
 | 
			
		||||
		btrfs_bio_end_io(async->bbio, bio->bi_status);
 | 
			
		||||
		btrfs_orig_bbio_end_io(async->bbio);
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -563,9 +618,11 @@ static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio,
 | 
			
		|||
	return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, int mirror_num)
 | 
			
		||||
static bool btrfs_submit_chunk(struct btrfs_fs_info *fs_info, struct bio *bio,
 | 
			
		||||
			       int mirror_num)
 | 
			
		||||
{
 | 
			
		||||
	struct btrfs_bio *bbio = btrfs_bio(bio);
 | 
			
		||||
	struct btrfs_bio *orig_bbio = bbio;
 | 
			
		||||
	u64 logical = bio->bi_iter.bi_sector << 9;
 | 
			
		||||
	u64 length = bio->bi_iter.bi_size;
 | 
			
		||||
	u64 map_length = length;
 | 
			
		||||
| 
						 | 
				
			
			@ -582,11 +639,10 @@ void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, int mirror
 | 
			
		|||
		goto fail;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	map_length = min(map_length, length);
 | 
			
		||||
	if (map_length < length) {
 | 
			
		||||
		btrfs_crit(fs_info,
 | 
			
		||||
			   "mapping failed logical %llu bio len %llu len %llu",
 | 
			
		||||
			   logical, length, map_length);
 | 
			
		||||
		BUG();
 | 
			
		||||
		bio = btrfs_split_bio(bio, map_length);
 | 
			
		||||
		bbio = btrfs_bio(bio);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
| 
						 | 
				
			
			@ -597,14 +653,14 @@ void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, int mirror
 | 
			
		|||
		bbio->saved_iter = bio->bi_iter;
 | 
			
		||||
		ret = btrfs_lookup_bio_sums(bbio);
 | 
			
		||||
		if (ret)
 | 
			
		||||
			goto fail;
 | 
			
		||||
			goto fail_put_bio;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
 | 
			
		||||
		if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
 | 
			
		||||
			ret = btrfs_extract_ordered_extent(btrfs_bio(bio));
 | 
			
		||||
			if (ret)
 | 
			
		||||
				goto fail;
 | 
			
		||||
				goto fail_put_bio;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
| 
						 | 
				
			
			@ -616,20 +672,32 @@ void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, int mirror
 | 
			
		|||
		    !btrfs_is_data_reloc_root(bbio->inode->root)) {
 | 
			
		||||
			if (should_async_write(bbio) &&
 | 
			
		||||
			    btrfs_wq_submit_bio(bbio, bioc, &smap, mirror_num))
 | 
			
		||||
				return;
 | 
			
		||||
				goto done;
 | 
			
		||||
 | 
			
		||||
			ret = btrfs_bio_csum(bbio);
 | 
			
		||||
			if (ret)
 | 
			
		||||
				goto fail;
 | 
			
		||||
				goto fail_put_bio;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	__btrfs_submit_bio(bio, bioc, &smap, mirror_num);
 | 
			
		||||
	return;
 | 
			
		||||
done:
 | 
			
		||||
	return map_length == length;
 | 
			
		||||
 | 
			
		||||
fail_put_bio:
 | 
			
		||||
	if (map_length < length)
 | 
			
		||||
		bio_put(bio);
 | 
			
		||||
fail:
 | 
			
		||||
	btrfs_bio_counter_dec(fs_info);
 | 
			
		||||
	btrfs_bio_end_io(bbio, ret);
 | 
			
		||||
	btrfs_bio_end_io(orig_bbio, ret);
 | 
			
		||||
	/* Do not submit another chunk */
 | 
			
		||||
	return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, int mirror_num)
 | 
			
		||||
{
 | 
			
		||||
	while (!btrfs_submit_chunk(fs_info, bio, mirror_num))
 | 
			
		||||
		;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			@ -737,10 +805,13 @@ int __init btrfs_bioset_init(void)
 | 
			
		|||
			offsetof(struct btrfs_bio, bio),
 | 
			
		||||
			BIOSET_NEED_BVECS))
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
	if (bioset_init(&btrfs_clone_bioset, BIO_POOL_SIZE,
 | 
			
		||||
			offsetof(struct btrfs_bio, bio), 0))
 | 
			
		||||
		goto out_free_bioset;
 | 
			
		||||
	if (bioset_init(&btrfs_repair_bioset, BIO_POOL_SIZE,
 | 
			
		||||
			offsetof(struct btrfs_bio, bio),
 | 
			
		||||
			BIOSET_NEED_BVECS))
 | 
			
		||||
		goto out_free_bioset;
 | 
			
		||||
		goto out_free_clone_bioset;
 | 
			
		||||
	if (mempool_init_kmalloc_pool(&btrfs_failed_bio_pool, BIO_POOL_SIZE,
 | 
			
		||||
				      sizeof(struct btrfs_failed_bio)))
 | 
			
		||||
		goto out_free_repair_bioset;
 | 
			
		||||
| 
						 | 
				
			
			@ -748,6 +819,8 @@ int __init btrfs_bioset_init(void)
 | 
			
		|||
 | 
			
		||||
out_free_repair_bioset:
 | 
			
		||||
	bioset_exit(&btrfs_repair_bioset);
 | 
			
		||||
out_free_clone_bioset:
 | 
			
		||||
	bioset_exit(&btrfs_clone_bioset);
 | 
			
		||||
out_free_bioset:
 | 
			
		||||
	bioset_exit(&btrfs_bioset);
 | 
			
		||||
	return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -757,5 +830,6 @@ void __cold btrfs_bioset_exit(void)
 | 
			
		|||
{
 | 
			
		||||
	mempool_exit(&btrfs_failed_bio_pool);
 | 
			
		||||
	bioset_exit(&btrfs_repair_bioset);
 | 
			
		||||
	bioset_exit(&btrfs_clone_bioset);
 | 
			
		||||
	bioset_exit(&btrfs_bioset);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -55,6 +55,7 @@ struct btrfs_bio {
 | 
			
		|||
 | 
			
		||||
	/* For internal use in read end I/O handling */
 | 
			
		||||
	unsigned int mirror_num;
 | 
			
		||||
	atomic_t pending_ios;
 | 
			
		||||
	struct work_struct end_io_work;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue