forked from mirrors/linux
		
	dm mpath: avoid that path removal can trigger an infinite loop
If blk_get_request() fails, check whether the failure is due to a path being removed. If that is the case, fail the path by triggering a call to fail_path(). This avoids that the following scenario can be encountered while removing paths: * CPU usage of a kworker thread jumps to 100%. * Removing the DM device becomes impossible. Delay requeueing if blk_get_request() returns -EBUSY or -EWOULDBLOCK, and the queue is not dying, because in these cases immediate requeuing is inappropriate. Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com> Cc: Hannes Reinecke <hare@suse.com> Cc: Christoph Hellwig <hch@lst.de> Cc: <stable@vger.kernel.org> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
		
							parent
							
								
									89bfce763e
								
							
						
					
					
						commit
						7083abbbfc
					
				
					 1 changed files with 11 additions and 4 deletions
				
			
		|  | @ -489,6 +489,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, | ||||||
| 	struct pgpath *pgpath; | 	struct pgpath *pgpath; | ||||||
| 	struct block_device *bdev; | 	struct block_device *bdev; | ||||||
| 	struct dm_mpath_io *mpio = get_mpio(map_context); | 	struct dm_mpath_io *mpio = get_mpio(map_context); | ||||||
|  | 	struct request_queue *q; | ||||||
| 	struct request *clone; | 	struct request *clone; | ||||||
| 
 | 
 | ||||||
| 	/* Do we need to select a new pgpath? */ | 	/* Do we need to select a new pgpath? */ | ||||||
|  | @ -511,12 +512,18 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, | ||||||
| 	mpio->nr_bytes = nr_bytes; | 	mpio->nr_bytes = nr_bytes; | ||||||
| 
 | 
 | ||||||
| 	bdev = pgpath->path.dev->bdev; | 	bdev = pgpath->path.dev->bdev; | ||||||
| 
 | 	q = bdev_get_queue(bdev); | ||||||
| 	clone = blk_get_request(bdev_get_queue(bdev), | 	clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC); | ||||||
| 			rq->cmd_flags | REQ_NOMERGE, |  | ||||||
| 			GFP_ATOMIC); |  | ||||||
| 	if (IS_ERR(clone)) { | 	if (IS_ERR(clone)) { | ||||||
| 		/* EBUSY, ENODEV or EWOULDBLOCK: requeue */ | 		/* EBUSY, ENODEV or EWOULDBLOCK: requeue */ | ||||||
|  | 		bool queue_dying = blk_queue_dying(q); | ||||||
|  | 		DMERR_LIMIT("blk_get_request() returned %ld%s - requeuing", | ||||||
|  | 			    PTR_ERR(clone), queue_dying ? " (path offline)" : ""); | ||||||
|  | 		if (queue_dying) { | ||||||
|  | 			atomic_inc(&m->pg_init_in_progress); | ||||||
|  | 			activate_or_offline_path(pgpath); | ||||||
|  | 			return DM_MAPIO_REQUEUE; | ||||||
|  | 		} | ||||||
| 		return DM_MAPIO_DELAY_REQUEUE; | 		return DM_MAPIO_DELAY_REQUEUE; | ||||||
| 	} | 	} | ||||||
| 	clone->bio = clone->biotail = NULL; | 	clone->bio = clone->biotail = NULL; | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Bart Van Assche
						Bart Van Assche