forked from mirrors/linux
		
	blk-map: call blk_queue_bounce from blk_rq_append_bio
This makes moves the knowledge about bouncing out of the callers into the block core (just like we do for the normal I/O path), and allows to unexport blk_queue_bounce. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									e442cbf910
								
							
						
					
					
						commit
						caa4b02476
					
				
					 3 changed files with 4 additions and 10 deletions
				
			
		| 
						 | 
					@ -16,6 +16,8 @@
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
int blk_rq_append_bio(struct request *rq, struct bio *bio)
 | 
					int blk_rq_append_bio(struct request *rq, struct bio *bio)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						blk_queue_bounce(rq->q, &bio);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!rq->bio) {
 | 
						if (!rq->bio) {
 | 
				
			||||||
		blk_rq_bio_prep(rq->q, rq, bio);
 | 
							blk_rq_bio_prep(rq->q, rq, bio);
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
| 
						 | 
					@ -72,15 +74,13 @@ static int __blk_rq_map_user_iov(struct request *rq,
 | 
				
			||||||
		map_data->offset += bio->bi_iter.bi_size;
 | 
							map_data->offset += bio->bi_iter.bi_size;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	orig_bio = bio;
 | 
						orig_bio = bio;
 | 
				
			||||||
	blk_queue_bounce(q, &bio);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * We link the bounce buffer in and could have to traverse it
 | 
						 * We link the bounce buffer in and could have to traverse it
 | 
				
			||||||
	 * later so we have to get a ref to prevent it from being freed
 | 
						 * later so we have to get a ref to prevent it from being freed
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	bio_get(bio);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	ret = blk_rq_append_bio(rq, bio);
 | 
						ret = blk_rq_append_bio(rq, bio);
 | 
				
			||||||
 | 
						bio_get(bio);
 | 
				
			||||||
	if (ret) {
 | 
						if (ret) {
 | 
				
			||||||
		bio_endio(bio);
 | 
							bio_endio(bio);
 | 
				
			||||||
		__blk_rq_unmap_user(orig_bio);
 | 
							__blk_rq_unmap_user(orig_bio);
 | 
				
			||||||
| 
						 | 
					@ -249,7 +249,6 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	blk_queue_bounce(q, &rq->bio);
 | 
					 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(blk_rq_map_kern);
 | 
					EXPORT_SYMBOL(blk_rq_map_kern);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -284,5 +284,3 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	__blk_queue_bounce(q, bio_orig, pool);
 | 
						__blk_queue_bounce(q, bio_orig, pool);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					 | 
				
			||||||
EXPORT_SYMBOL(blk_queue_bounce);
 | 
					 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1576,10 +1576,7 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
 | 
				
			||||||
		return req;
 | 
							return req;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for_each_bio(bio) {
 | 
						for_each_bio(bio) {
 | 
				
			||||||
		struct bio *bounce_bio = bio;
 | 
							ret = blk_rq_append_bio(req, bio);
 | 
				
			||||||
 | 
					 | 
				
			||||||
		blk_queue_bounce(req->q, &bounce_bio);
 | 
					 | 
				
			||||||
		ret = blk_rq_append_bio(req, bounce_bio);
 | 
					 | 
				
			||||||
		if (ret)
 | 
							if (ret)
 | 
				
			||||||
			return ERR_PTR(ret);
 | 
								return ERR_PTR(ret);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue