forked from mirrors/linux
		
	mm/block: convert rw_page users to bio op use
The rw_page users were not converted to use bio/req ops. As a result
bdev_write_page is not passing down REQ_OP_WRITE and the IOs will
be sent down as reads.
Signed-off-by: Mike Christie <mchristi@redhat.com>
Fixes: 4e1b2d52a8 ("block, fs, drivers: remove REQ_OP compat defs and related code")
Modified by me to:
1) Drop op_flags passing into ->rw_page(), as we don't use it.
2) Make op_is_write() and friends safe to use for !CONFIG_BLOCK
Signed-off-by: Jens Axboe <axboe@fb.com>
			
			
This commit is contained in:
		
							parent
							
								
									c1c87c2ba9
								
							
						
					
					
						commit
						abf545484d
					
				
					 11 changed files with 60 additions and 59 deletions
				
			
		|  | @ -300,20 +300,20 @@ static void copy_from_brd(void *dst, struct brd_device *brd, | ||||||
|  * Process a single bvec of a bio. |  * Process a single bvec of a bio. | ||||||
|  */ |  */ | ||||||
| static int brd_do_bvec(struct brd_device *brd, struct page *page, | static int brd_do_bvec(struct brd_device *brd, struct page *page, | ||||||
| 			unsigned int len, unsigned int off, int rw, | 			unsigned int len, unsigned int off, int op, | ||||||
| 			sector_t sector) | 			sector_t sector) | ||||||
| { | { | ||||||
| 	void *mem; | 	void *mem; | ||||||
| 	int err = 0; | 	int err = 0; | ||||||
| 
 | 
 | ||||||
| 	if (rw != READ) { | 	if (op_is_write(op)) { | ||||||
| 		err = copy_to_brd_setup(brd, sector, len); | 		err = copy_to_brd_setup(brd, sector, len); | ||||||
| 		if (err) | 		if (err) | ||||||
| 			goto out; | 			goto out; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	mem = kmap_atomic(page); | 	mem = kmap_atomic(page); | ||||||
| 	if (rw == READ) { | 	if (!op_is_write(op)) { | ||||||
| 		copy_from_brd(mem + off, brd, sector, len); | 		copy_from_brd(mem + off, brd, sector, len); | ||||||
| 		flush_dcache_page(page); | 		flush_dcache_page(page); | ||||||
| 	} else { | 	} else { | ||||||
|  | @ -330,7 +330,6 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) | ||||||
| { | { | ||||||
| 	struct block_device *bdev = bio->bi_bdev; | 	struct block_device *bdev = bio->bi_bdev; | ||||||
| 	struct brd_device *brd = bdev->bd_disk->private_data; | 	struct brd_device *brd = bdev->bd_disk->private_data; | ||||||
| 	int rw; |  | ||||||
| 	struct bio_vec bvec; | 	struct bio_vec bvec; | ||||||
| 	sector_t sector; | 	sector_t sector; | ||||||
| 	struct bvec_iter iter; | 	struct bvec_iter iter; | ||||||
|  | @ -347,14 +346,12 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) | ||||||
| 		goto out; | 		goto out; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	rw = bio_data_dir(bio); |  | ||||||
| 
 |  | ||||||
| 	bio_for_each_segment(bvec, bio, iter) { | 	bio_for_each_segment(bvec, bio, iter) { | ||||||
| 		unsigned int len = bvec.bv_len; | 		unsigned int len = bvec.bv_len; | ||||||
| 		int err; | 		int err; | ||||||
| 
 | 
 | ||||||
| 		err = brd_do_bvec(brd, bvec.bv_page, len, | 		err = brd_do_bvec(brd, bvec.bv_page, len, | ||||||
| 					bvec.bv_offset, rw, sector); | 					bvec.bv_offset, bio_op(bio), sector); | ||||||
| 		if (err) | 		if (err) | ||||||
| 			goto io_error; | 			goto io_error; | ||||||
| 		sector += len >> SECTOR_SHIFT; | 		sector += len >> SECTOR_SHIFT; | ||||||
|  | @ -369,11 +366,11 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int brd_rw_page(struct block_device *bdev, sector_t sector, | static int brd_rw_page(struct block_device *bdev, sector_t sector, | ||||||
| 		       struct page *page, int rw) | 		       struct page *page, int op) | ||||||
| { | { | ||||||
| 	struct brd_device *brd = bdev->bd_disk->private_data; | 	struct brd_device *brd = bdev->bd_disk->private_data; | ||||||
| 	int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector); | 	int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector); | ||||||
| 	page_endio(page, rw & WRITE, err); | 	page_endio(page, op, err); | ||||||
| 	return err; | 	return err; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -843,15 +843,15 @@ static void zram_bio_discard(struct zram *zram, u32 index, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, | static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, | ||||||
| 			int offset, int rw) | 			int offset, int op) | ||||||
| { | { | ||||||
| 	unsigned long start_time = jiffies; | 	unsigned long start_time = jiffies; | ||||||
| 	int ret; | 	int ret; | ||||||
| 
 | 
 | ||||||
| 	generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT, | 	generic_start_io_acct(op, bvec->bv_len >> SECTOR_SHIFT, | ||||||
| 			&zram->disk->part0); | 			&zram->disk->part0); | ||||||
| 
 | 
 | ||||||
| 	if (rw == READ) { | 	if (!op_is_write(op)) { | ||||||
| 		atomic64_inc(&zram->stats.num_reads); | 		atomic64_inc(&zram->stats.num_reads); | ||||||
| 		ret = zram_bvec_read(zram, bvec, index, offset); | 		ret = zram_bvec_read(zram, bvec, index, offset); | ||||||
| 	} else { | 	} else { | ||||||
|  | @ -859,10 +859,10 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, | ||||||
| 		ret = zram_bvec_write(zram, bvec, index, offset); | 		ret = zram_bvec_write(zram, bvec, index, offset); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	generic_end_io_acct(rw, &zram->disk->part0, start_time); | 	generic_end_io_acct(op, &zram->disk->part0, start_time); | ||||||
| 
 | 
 | ||||||
| 	if (unlikely(ret)) { | 	if (unlikely(ret)) { | ||||||
| 		if (rw == READ) | 		if (!op_is_write(op)) | ||||||
| 			atomic64_inc(&zram->stats.failed_reads); | 			atomic64_inc(&zram->stats.failed_reads); | ||||||
| 		else | 		else | ||||||
| 			atomic64_inc(&zram->stats.failed_writes); | 			atomic64_inc(&zram->stats.failed_writes); | ||||||
|  | @ -873,7 +873,7 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, | ||||||
| 
 | 
 | ||||||
| static void __zram_make_request(struct zram *zram, struct bio *bio) | static void __zram_make_request(struct zram *zram, struct bio *bio) | ||||||
| { | { | ||||||
| 	int offset, rw; | 	int offset; | ||||||
| 	u32 index; | 	u32 index; | ||||||
| 	struct bio_vec bvec; | 	struct bio_vec bvec; | ||||||
| 	struct bvec_iter iter; | 	struct bvec_iter iter; | ||||||
|  | @ -888,7 +888,6 @@ static void __zram_make_request(struct zram *zram, struct bio *bio) | ||||||
| 		return; | 		return; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	rw = bio_data_dir(bio); |  | ||||||
| 	bio_for_each_segment(bvec, bio, iter) { | 	bio_for_each_segment(bvec, bio, iter) { | ||||||
| 		int max_transfer_size = PAGE_SIZE - offset; | 		int max_transfer_size = PAGE_SIZE - offset; | ||||||
| 
 | 
 | ||||||
|  | @ -903,15 +902,18 @@ static void __zram_make_request(struct zram *zram, struct bio *bio) | ||||||
| 			bv.bv_len = max_transfer_size; | 			bv.bv_len = max_transfer_size; | ||||||
| 			bv.bv_offset = bvec.bv_offset; | 			bv.bv_offset = bvec.bv_offset; | ||||||
| 
 | 
 | ||||||
| 			if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0) | 			if (zram_bvec_rw(zram, &bv, index, offset, | ||||||
|  | 					 bio_op(bio)) < 0) | ||||||
| 				goto out; | 				goto out; | ||||||
| 
 | 
 | ||||||
| 			bv.bv_len = bvec.bv_len - max_transfer_size; | 			bv.bv_len = bvec.bv_len - max_transfer_size; | ||||||
| 			bv.bv_offset += max_transfer_size; | 			bv.bv_offset += max_transfer_size; | ||||||
| 			if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0) | 			if (zram_bvec_rw(zram, &bv, index + 1, 0, | ||||||
|  | 					 bio_op(bio)) < 0) | ||||||
| 				goto out; | 				goto out; | ||||||
| 		} else | 		} else | ||||||
| 			if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0) | 			if (zram_bvec_rw(zram, &bvec, index, offset, | ||||||
|  | 					 bio_op(bio)) < 0) | ||||||
| 				goto out; | 				goto out; | ||||||
| 
 | 
 | ||||||
| 		update_position(&index, &offset, &bvec); | 		update_position(&index, &offset, &bvec); | ||||||
|  | @ -968,7 +970,7 @@ static void zram_slot_free_notify(struct block_device *bdev, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int zram_rw_page(struct block_device *bdev, sector_t sector, | static int zram_rw_page(struct block_device *bdev, sector_t sector, | ||||||
| 		       struct page *page, int rw) | 		       struct page *page, int op) | ||||||
| { | { | ||||||
| 	int offset, err = -EIO; | 	int offset, err = -EIO; | ||||||
| 	u32 index; | 	u32 index; | ||||||
|  | @ -992,7 +994,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector, | ||||||
| 	bv.bv_len = PAGE_SIZE; | 	bv.bv_len = PAGE_SIZE; | ||||||
| 	bv.bv_offset = 0; | 	bv.bv_offset = 0; | ||||||
| 
 | 
 | ||||||
| 	err = zram_bvec_rw(zram, &bv, index, offset, rw); | 	err = zram_bvec_rw(zram, &bv, index, offset, op); | ||||||
| put_zram: | put_zram: | ||||||
| 	zram_meta_put(zram); | 	zram_meta_put(zram); | ||||||
| out: | out: | ||||||
|  | @ -1005,7 +1007,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector, | ||||||
| 	 * (e.g., SetPageError, set_page_dirty and extra works). | 	 * (e.g., SetPageError, set_page_dirty and extra works). | ||||||
| 	 */ | 	 */ | ||||||
| 	if (err == 0) | 	if (err == 0) | ||||||
| 		page_endio(page, rw, 0); | 		page_endio(page, op, 0); | ||||||
| 	return err; | 	return err; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1133,11 +1133,11 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip, | ||||||
| 
 | 
 | ||||||
| static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip, | static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip, | ||||||
| 			struct page *page, unsigned int len, unsigned int off, | 			struct page *page, unsigned int len, unsigned int off, | ||||||
| 			int rw, sector_t sector) | 			int op, sector_t sector) | ||||||
| { | { | ||||||
| 	int ret; | 	int ret; | ||||||
| 
 | 
 | ||||||
| 	if (rw == READ) { | 	if (!op_is_write(op)) { | ||||||
| 		ret = btt_read_pg(btt, bip, page, off, sector, len); | 		ret = btt_read_pg(btt, bip, page, off, sector, len); | ||||||
| 		flush_dcache_page(page); | 		flush_dcache_page(page); | ||||||
| 	} else { | 	} else { | ||||||
|  | @ -1155,7 +1155,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) | ||||||
| 	struct bvec_iter iter; | 	struct bvec_iter iter; | ||||||
| 	unsigned long start; | 	unsigned long start; | ||||||
| 	struct bio_vec bvec; | 	struct bio_vec bvec; | ||||||
| 	int err = 0, rw; | 	int err = 0; | ||||||
| 	bool do_acct; | 	bool do_acct; | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
|  | @ -1170,7 +1170,6 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	do_acct = nd_iostat_start(bio, &start); | 	do_acct = nd_iostat_start(bio, &start); | ||||||
| 	rw = bio_data_dir(bio); |  | ||||||
| 	bio_for_each_segment(bvec, bio, iter) { | 	bio_for_each_segment(bvec, bio, iter) { | ||||||
| 		unsigned int len = bvec.bv_len; | 		unsigned int len = bvec.bv_len; | ||||||
| 
 | 
 | ||||||
|  | @ -1181,11 +1180,12 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) | ||||||
| 		BUG_ON(len % btt->sector_size); | 		BUG_ON(len % btt->sector_size); | ||||||
| 
 | 
 | ||||||
| 		err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset, | 		err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset, | ||||||
| 				rw, iter.bi_sector); | 				  bio_op(bio), iter.bi_sector); | ||||||
| 		if (err) { | 		if (err) { | ||||||
| 			dev_info(&btt->nd_btt->dev, | 			dev_info(&btt->nd_btt->dev, | ||||||
| 					"io error in %s sector %lld, len %d,\n", | 					"io error in %s sector %lld, len %d,\n", | ||||||
| 					(rw == READ) ? "READ" : "WRITE", | 					(op_is_write(bio_op(bio))) ? "WRITE" : | ||||||
|  | 					"READ", | ||||||
| 					(unsigned long long) iter.bi_sector, len); | 					(unsigned long long) iter.bi_sector, len); | ||||||
| 			bio->bi_error = err; | 			bio->bi_error = err; | ||||||
| 			break; | 			break; | ||||||
|  | @ -1200,12 +1200,12 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int btt_rw_page(struct block_device *bdev, sector_t sector, | static int btt_rw_page(struct block_device *bdev, sector_t sector, | ||||||
| 		struct page *page, int rw) | 		struct page *page, int op) | ||||||
| { | { | ||||||
| 	struct btt *btt = bdev->bd_disk->private_data; | 	struct btt *btt = bdev->bd_disk->private_data; | ||||||
| 
 | 
 | ||||||
| 	btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, rw, sector); | 	btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, op, sector); | ||||||
| 	page_endio(page, rw & WRITE, 0); | 	page_endio(page, op, 0); | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -67,7 +67,7 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, | static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, | ||||||
| 			unsigned int len, unsigned int off, int rw, | 			unsigned int len, unsigned int off, int op, | ||||||
| 			sector_t sector) | 			sector_t sector) | ||||||
| { | { | ||||||
| 	int rc = 0; | 	int rc = 0; | ||||||
|  | @ -79,7 +79,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, | ||||||
| 	if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) | 	if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) | ||||||
| 		bad_pmem = true; | 		bad_pmem = true; | ||||||
| 
 | 
 | ||||||
| 	if (rw == READ) { | 	if (!op_is_write(op)) { | ||||||
| 		if (unlikely(bad_pmem)) | 		if (unlikely(bad_pmem)) | ||||||
| 			rc = -EIO; | 			rc = -EIO; | ||||||
| 		else { | 		else { | ||||||
|  | @ -134,7 +134,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) | ||||||
| 	do_acct = nd_iostat_start(bio, &start); | 	do_acct = nd_iostat_start(bio, &start); | ||||||
| 	bio_for_each_segment(bvec, bio, iter) { | 	bio_for_each_segment(bvec, bio, iter) { | ||||||
| 		rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, | 		rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, | ||||||
| 				bvec.bv_offset, bio_data_dir(bio), | 				bvec.bv_offset, bio_op(bio), | ||||||
| 				iter.bi_sector); | 				iter.bi_sector); | ||||||
| 		if (rc) { | 		if (rc) { | ||||||
| 			bio->bi_error = rc; | 			bio->bi_error = rc; | ||||||
|  | @ -152,12 +152,12 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int pmem_rw_page(struct block_device *bdev, sector_t sector, | static int pmem_rw_page(struct block_device *bdev, sector_t sector, | ||||||
| 		       struct page *page, int rw) | 		       struct page *page, int op) | ||||||
| { | { | ||||||
| 	struct pmem_device *pmem = bdev->bd_queue->queuedata; | 	struct pmem_device *pmem = bdev->bd_queue->queuedata; | ||||||
| 	int rc; | 	int rc; | ||||||
| 
 | 
 | ||||||
| 	rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector); | 	rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, op, sector); | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * The ->rw_page interface is subtle and tricky.  The core | 	 * The ->rw_page interface is subtle and tricky.  The core | ||||||
|  | @ -166,7 +166,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, | ||||||
| 	 * caused by double completion. | 	 * caused by double completion. | ||||||
| 	 */ | 	 */ | ||||||
| 	if (rc == 0) | 	if (rc == 0) | ||||||
| 		page_endio(page, rw & WRITE, 0); | 		page_endio(page, op, 0); | ||||||
| 
 | 
 | ||||||
| 	return rc; | 	return rc; | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -416,7 +416,8 @@ int bdev_read_page(struct block_device *bdev, sector_t sector, | ||||||
| 	result = blk_queue_enter(bdev->bd_queue, false); | 	result = blk_queue_enter(bdev->bd_queue, false); | ||||||
| 	if (result) | 	if (result) | ||||||
| 		return result; | 		return result; | ||||||
| 	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ); | 	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, | ||||||
|  | 			      REQ_OP_READ); | ||||||
| 	blk_queue_exit(bdev->bd_queue); | 	blk_queue_exit(bdev->bd_queue); | ||||||
| 	return result; | 	return result; | ||||||
| } | } | ||||||
|  | @ -445,7 +446,6 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, | ||||||
| 			struct page *page, struct writeback_control *wbc) | 			struct page *page, struct writeback_control *wbc) | ||||||
| { | { | ||||||
| 	int result; | 	int result; | ||||||
| 	int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE; |  | ||||||
| 	const struct block_device_operations *ops = bdev->bd_disk->fops; | 	const struct block_device_operations *ops = bdev->bd_disk->fops; | ||||||
| 
 | 
 | ||||||
| 	if (!ops->rw_page || bdev_get_integrity(bdev)) | 	if (!ops->rw_page || bdev_get_integrity(bdev)) | ||||||
|  | @ -455,7 +455,8 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, | ||||||
| 		return result; | 		return result; | ||||||
| 
 | 
 | ||||||
| 	set_page_writeback(page); | 	set_page_writeback(page); | ||||||
| 	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw); | 	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, | ||||||
|  | 			      REQ_OP_WRITE); | ||||||
| 	if (result) | 	if (result) | ||||||
| 		end_page_writeback(page); | 		end_page_writeback(page); | ||||||
| 	else | 	else | ||||||
|  |  | ||||||
|  | @ -50,7 +50,7 @@ static void mpage_end_io(struct bio *bio) | ||||||
| 
 | 
 | ||||||
| 	bio_for_each_segment_all(bv, bio, i) { | 	bio_for_each_segment_all(bv, bio, i) { | ||||||
| 		struct page *page = bv->bv_page; | 		struct page *page = bv->bv_page; | ||||||
| 		page_endio(page, bio_data_dir(bio), bio->bi_error); | 		page_endio(page, bio_op(bio), bio->bi_error); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	bio_put(bio); | 	bio_put(bio); | ||||||
|  |  | ||||||
|  | @ -18,6 +18,17 @@ struct cgroup_subsys_state; | ||||||
| typedef void (bio_end_io_t) (struct bio *); | typedef void (bio_end_io_t) (struct bio *); | ||||||
| typedef void (bio_destructor_t) (struct bio *); | typedef void (bio_destructor_t) (struct bio *); | ||||||
| 
 | 
 | ||||||
|  | enum req_op { | ||||||
|  | 	REQ_OP_READ, | ||||||
|  | 	REQ_OP_WRITE, | ||||||
|  | 	REQ_OP_DISCARD,		/* request to discard sectors */ | ||||||
|  | 	REQ_OP_SECURE_ERASE,	/* request to securely erase sectors */ | ||||||
|  | 	REQ_OP_WRITE_SAME,	/* write same block many times */ | ||||||
|  | 	REQ_OP_FLUSH,		/* request for cache flush */ | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | #define REQ_OP_BITS 3 | ||||||
|  | 
 | ||||||
| #ifdef CONFIG_BLOCK | #ifdef CONFIG_BLOCK | ||||||
| /*
 | /*
 | ||||||
|  * main unit of I/O for the block layer and lower layers (ie drivers and |  * main unit of I/O for the block layer and lower layers (ie drivers and | ||||||
|  | @ -228,17 +239,6 @@ enum rq_flag_bits { | ||||||
| #define REQ_HASHED		(1ULL << __REQ_HASHED) | #define REQ_HASHED		(1ULL << __REQ_HASHED) | ||||||
| #define REQ_MQ_INFLIGHT		(1ULL << __REQ_MQ_INFLIGHT) | #define REQ_MQ_INFLIGHT		(1ULL << __REQ_MQ_INFLIGHT) | ||||||
| 
 | 
 | ||||||
| enum req_op { |  | ||||||
| 	REQ_OP_READ, |  | ||||||
| 	REQ_OP_WRITE, |  | ||||||
| 	REQ_OP_DISCARD,		/* request to discard sectors */ |  | ||||||
| 	REQ_OP_SECURE_ERASE,	/* request to securely erase sectors */ |  | ||||||
| 	REQ_OP_WRITE_SAME,	/* write same block many times */ |  | ||||||
| 	REQ_OP_FLUSH,		/* request for cache flush */ |  | ||||||
| }; |  | ||||||
| 
 |  | ||||||
| #define REQ_OP_BITS 3 |  | ||||||
| 
 |  | ||||||
| typedef unsigned int blk_qc_t; | typedef unsigned int blk_qc_t; | ||||||
| #define BLK_QC_T_NONE	-1U | #define BLK_QC_T_NONE	-1U | ||||||
| #define BLK_QC_T_SHIFT	16 | #define BLK_QC_T_SHIFT	16 | ||||||
|  |  | ||||||
|  | @ -1672,7 +1672,7 @@ struct blk_dax_ctl { | ||||||
| struct block_device_operations { | struct block_device_operations { | ||||||
| 	int (*open) (struct block_device *, fmode_t); | 	int (*open) (struct block_device *, fmode_t); | ||||||
| 	void (*release) (struct gendisk *, fmode_t); | 	void (*release) (struct gendisk *, fmode_t); | ||||||
| 	int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); | 	int (*rw_page)(struct block_device *, sector_t, struct page *, int op); | ||||||
| 	int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 	int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | ||||||
| 	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | ||||||
| 	long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *, | 	long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *, | ||||||
|  |  | ||||||
|  | @ -2480,12 +2480,13 @@ extern void init_special_inode(struct inode *, umode_t, dev_t); | ||||||
| extern void make_bad_inode(struct inode *); | extern void make_bad_inode(struct inode *); | ||||||
| extern bool is_bad_inode(struct inode *); | extern bool is_bad_inode(struct inode *); | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_BLOCK |  | ||||||
| static inline bool op_is_write(unsigned int op) | static inline bool op_is_write(unsigned int op) | ||||||
| { | { | ||||||
| 	return op == REQ_OP_READ ? false : true; | 	return op == REQ_OP_READ ? false : true; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | #ifdef CONFIG_BLOCK | ||||||
|  | 
 | ||||||
| /*
 | /*
 | ||||||
|  * return data direction, READ or WRITE |  * return data direction, READ or WRITE | ||||||
|  */ |  */ | ||||||
|  |  | ||||||
|  | @ -510,7 +510,7 @@ static inline void wait_on_page_writeback(struct page *page) | ||||||
| extern void end_page_writeback(struct page *page); | extern void end_page_writeback(struct page *page); | ||||||
| void wait_for_stable_page(struct page *page); | void wait_for_stable_page(struct page *page); | ||||||
| 
 | 
 | ||||||
| void page_endio(struct page *page, int rw, int err); | void page_endio(struct page *page, int op, int err); | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  * Add an arbitrary waiter to a page's wait queue |  * Add an arbitrary waiter to a page's wait queue | ||||||
|  |  | ||||||
|  | @ -887,9 +887,9 @@ EXPORT_SYMBOL(end_page_writeback); | ||||||
|  * After completing I/O on a page, call this routine to update the page |  * After completing I/O on a page, call this routine to update the page | ||||||
|  * flags appropriately |  * flags appropriately | ||||||
|  */ |  */ | ||||||
| void page_endio(struct page *page, int rw, int err) | void page_endio(struct page *page, int op, int err) | ||||||
| { | { | ||||||
| 	if (rw == READ) { | 	if (!op_is_write(op)) { | ||||||
| 		if (!err) { | 		if (!err) { | ||||||
| 			SetPageUptodate(page); | 			SetPageUptodate(page); | ||||||
| 		} else { | 		} else { | ||||||
|  | @ -897,7 +897,7 @@ void page_endio(struct page *page, int rw, int err) | ||||||
| 			SetPageError(page); | 			SetPageError(page); | ||||||
| 		} | 		} | ||||||
| 		unlock_page(page); | 		unlock_page(page); | ||||||
| 	} else { /* rw == WRITE */ | 	} else { | ||||||
| 		if (err) { | 		if (err) { | ||||||
| 			SetPageError(page); | 			SetPageError(page); | ||||||
| 			if (page->mapping) | 			if (page->mapping) | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Mike Christie
						Mike Christie