forked from mirrors/linux
		
	md/raid6: refactor raid5_read_one_chunk
Refactor raid5_read_one_chunk so that all simple checks are done before allocating the bio. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Song Liu <song@kernel.org> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> Acked-by: Damien Le Moal <damien.lemoal@wdc.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									6a59656968
								
							
						
					
					
						commit
						e82ed3a4fb
					
				
					 1 changed files with 45 additions and 63 deletions
				
			
		| 
						 | 
					@ -5393,91 +5393,73 @@ static void raid5_align_endio(struct bio *bi)
 | 
				
			||||||
static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
 | 
					static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct r5conf *conf = mddev->private;
 | 
						struct r5conf *conf = mddev->private;
 | 
				
			||||||
	int dd_idx;
 | 
						struct bio *align_bio;
 | 
				
			||||||
	struct bio* align_bi;
 | 
					 | 
				
			||||||
	struct md_rdev *rdev;
 | 
						struct md_rdev *rdev;
 | 
				
			||||||
	sector_t end_sector;
 | 
						sector_t sector, end_sector, first_bad;
 | 
				
			||||||
 | 
						int bad_sectors, dd_idx;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!in_chunk_boundary(mddev, raid_bio)) {
 | 
						if (!in_chunk_boundary(mddev, raid_bio)) {
 | 
				
			||||||
		pr_debug("%s: non aligned\n", __func__);
 | 
							pr_debug("%s: non aligned\n", __func__);
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * use bio_clone_fast to make a copy of the bio
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	align_bi = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set);
 | 
					 | 
				
			||||||
	if (!align_bi)
 | 
					 | 
				
			||||||
		return 0;
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 *   set bi_end_io to a new function, and set bi_private to the
 | 
					 | 
				
			||||||
	 *     original bio.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	align_bi->bi_end_io  = raid5_align_endio;
 | 
					 | 
				
			||||||
	align_bi->bi_private = raid_bio;
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 *	compute position
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	align_bi->bi_iter.bi_sector =
 | 
					 | 
				
			||||||
		raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
 | 
					 | 
				
			||||||
				     0, &dd_idx, NULL);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	end_sector = bio_end_sector(align_bi);
 | 
						sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0,
 | 
				
			||||||
 | 
									      &dd_idx, NULL);
 | 
				
			||||||
 | 
						end_sector = bio_end_sector(raid_bio);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rcu_read_lock();
 | 
						rcu_read_lock();
 | 
				
			||||||
 | 
						if (r5c_big_stripe_cached(conf, sector))
 | 
				
			||||||
 | 
							goto out_rcu_unlock;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rdev = rcu_dereference(conf->disks[dd_idx].replacement);
 | 
						rdev = rcu_dereference(conf->disks[dd_idx].replacement);
 | 
				
			||||||
	if (!rdev || test_bit(Faulty, &rdev->flags) ||
 | 
						if (!rdev || test_bit(Faulty, &rdev->flags) ||
 | 
				
			||||||
	    rdev->recovery_offset < end_sector) {
 | 
						    rdev->recovery_offset < end_sector) {
 | 
				
			||||||
		rdev = rcu_dereference(conf->disks[dd_idx].rdev);
 | 
							rdev = rcu_dereference(conf->disks[dd_idx].rdev);
 | 
				
			||||||
		if (rdev &&
 | 
							if (!rdev)
 | 
				
			||||||
		    (test_bit(Faulty, &rdev->flags) ||
 | 
								goto out_rcu_unlock;
 | 
				
			||||||
 | 
							if (test_bit(Faulty, &rdev->flags) ||
 | 
				
			||||||
		    !(test_bit(In_sync, &rdev->flags) ||
 | 
							    !(test_bit(In_sync, &rdev->flags) ||
 | 
				
			||||||
		      rdev->recovery_offset >= end_sector)))
 | 
							      rdev->recovery_offset >= end_sector))
 | 
				
			||||||
			rdev = NULL;
 | 
								goto out_rcu_unlock;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (r5c_big_stripe_cached(conf, align_bi->bi_iter.bi_sector)) {
 | 
					 | 
				
			||||||
		rcu_read_unlock();
 | 
					 | 
				
			||||||
		bio_put(align_bi);
 | 
					 | 
				
			||||||
		return 0;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (rdev) {
 | 
					 | 
				
			||||||
		sector_t first_bad;
 | 
					 | 
				
			||||||
		int bad_sectors;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	atomic_inc(&rdev->nr_pending);
 | 
						atomic_inc(&rdev->nr_pending);
 | 
				
			||||||
	rcu_read_unlock();
 | 
						rcu_read_unlock();
 | 
				
			||||||
		raid_bio->bi_next = (void*)rdev;
 | 
					 | 
				
			||||||
		bio_set_dev(align_bi, rdev->bdev);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
 | 
						align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set);
 | 
				
			||||||
				bio_sectors(align_bi),
 | 
						bio_set_dev(align_bio, rdev->bdev);
 | 
				
			||||||
				&first_bad, &bad_sectors)) {
 | 
						align_bio->bi_end_io = raid5_align_endio;
 | 
				
			||||||
			bio_put(align_bi);
 | 
						align_bio->bi_private = raid_bio;
 | 
				
			||||||
 | 
						align_bio->bi_iter.bi_sector = sector;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						raid_bio->bi_next = (void *)rdev;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (is_badblock(rdev, sector, bio_sectors(align_bio), &first_bad,
 | 
				
			||||||
 | 
								&bad_sectors)) {
 | 
				
			||||||
 | 
							bio_put(align_bio);
 | 
				
			||||||
		rdev_dec_pending(rdev, mddev);
 | 
							rdev_dec_pending(rdev, mddev);
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* No reshape active, so we can trust rdev->data_offset */
 | 
						/* No reshape active, so we can trust rdev->data_offset */
 | 
				
			||||||
		align_bi->bi_iter.bi_sector += rdev->data_offset;
 | 
						align_bio->bi_iter.bi_sector += rdev->data_offset;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock_irq(&conf->device_lock);
 | 
						spin_lock_irq(&conf->device_lock);
 | 
				
			||||||
		wait_event_lock_irq(conf->wait_for_quiescent,
 | 
						wait_event_lock_irq(conf->wait_for_quiescent, conf->quiesce == 0,
 | 
				
			||||||
				    conf->quiesce == 0,
 | 
					 | 
				
			||||||
			    conf->device_lock);
 | 
								    conf->device_lock);
 | 
				
			||||||
	atomic_inc(&conf->active_aligned_reads);
 | 
						atomic_inc(&conf->active_aligned_reads);
 | 
				
			||||||
	spin_unlock_irq(&conf->device_lock);
 | 
						spin_unlock_irq(&conf->device_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (mddev->gendisk)
 | 
						if (mddev->gendisk)
 | 
				
			||||||
			trace_block_bio_remap(align_bi, disk_devt(mddev->gendisk),
 | 
							trace_block_bio_remap(align_bio, disk_devt(mddev->gendisk),
 | 
				
			||||||
				      raid_bio->bi_iter.bi_sector);
 | 
									      raid_bio->bi_iter.bi_sector);
 | 
				
			||||||
		submit_bio_noacct(align_bi);
 | 
						submit_bio_noacct(align_bio);
 | 
				
			||||||
	return 1;
 | 
						return 1;
 | 
				
			||||||
	} else {
 | 
					
 | 
				
			||||||
 | 
					out_rcu_unlock:
 | 
				
			||||||
	rcu_read_unlock();
 | 
						rcu_read_unlock();
 | 
				
			||||||
		bio_put(align_bi);
 | 
					 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
 | 
					static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue