mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	block: remove per-queue plugging
Code has been converted over to the new explicit on-stack plugging, and delay users have been converted to use the new API for that. So lets kill off the old plugging along with aops->sync_page(). Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
This commit is contained in:
		
							parent
							
								
									73c1010119
								
							
						
					
					
						commit
						7eaceaccab
					
				
					 119 changed files with 151 additions and 1269 deletions
				
			
		| 
						 | 
					@ -963,11 +963,6 @@ elevator_dispatch_fn*		fills the dispatch queue with ready requests.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
elevator_add_req_fn*		called to add a new request into the scheduler
 | 
					elevator_add_req_fn*		called to add a new request into the scheduler
 | 
				
			||||||
 | 
					
 | 
				
			||||||
elevator_queue_empty_fn		returns true if the merge queue is empty.
 | 
					 | 
				
			||||||
				Drivers shouldn't use this, but rather check
 | 
					 | 
				
			||||||
				if elv_next_request is NULL (without losing the
 | 
					 | 
				
			||||||
				request if one exists!)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
elevator_former_req_fn
 | 
					elevator_former_req_fn
 | 
				
			||||||
elevator_latter_req_fn		These return the request before or after the
 | 
					elevator_latter_req_fn		These return the request before or after the
 | 
				
			||||||
				one specified in disk sort order. Used by the
 | 
									one specified in disk sort order. Used by the
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										173
									
								
								block/blk-core.c
									
									
									
									
									
								
							
							
						
						
									
										173
									
								
								block/blk-core.c
									
									
									
									
									
								
							| 
						 | 
					@ -198,6 +198,19 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(blk_dump_rq_flags);
 | 
					EXPORT_SYMBOL(blk_dump_rq_flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Make sure that plugs that were pending when this function was entered,
 | 
				
			||||||
 | 
					 * are now complete and requests pushed to the queue.
 | 
				
			||||||
 | 
					*/
 | 
				
			||||||
 | 
					static inline void queue_sync_plugs(struct request_queue *q)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * If the current process is plugged and has barriers submitted,
 | 
				
			||||||
 | 
						 * we will livelock if we don't unplug first.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						blk_flush_plug(current);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void blk_delay_work(struct work_struct *work)
 | 
					static void blk_delay_work(struct work_struct *work)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct request_queue *q;
 | 
						struct request_queue *q;
 | 
				
			||||||
| 
						 | 
					@ -224,137 +237,6 @@ void blk_delay_queue(struct request_queue *q, unsigned long msecs)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(blk_delay_queue);
 | 
					EXPORT_SYMBOL(blk_delay_queue);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * "plug" the device if there are no outstanding requests: this will
 | 
					 | 
				
			||||||
 * force the transfer to start only after we have put all the requests
 | 
					 | 
				
			||||||
 * on the list.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * This is called with interrupts off and no requests on the queue and
 | 
					 | 
				
			||||||
 * with the queue lock held.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
void blk_plug_device(struct request_queue *q)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	WARN_ON(!irqs_disabled());
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * don't plug a stopped queue, it must be paired with blk_start_queue()
 | 
					 | 
				
			||||||
	 * which will restart the queueing
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (blk_queue_stopped(q))
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
 | 
					 | 
				
			||||||
		mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
 | 
					 | 
				
			||||||
		trace_block_plug(q);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
EXPORT_SYMBOL(blk_plug_device);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/**
 | 
					 | 
				
			||||||
 * blk_plug_device_unlocked - plug a device without queue lock held
 | 
					 | 
				
			||||||
 * @q:    The &struct request_queue to plug
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * Description:
 | 
					 | 
				
			||||||
 *   Like @blk_plug_device(), but grabs the queue lock and disables
 | 
					 | 
				
			||||||
 *   interrupts.
 | 
					 | 
				
			||||||
 **/
 | 
					 | 
				
			||||||
void blk_plug_device_unlocked(struct request_queue *q)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	unsigned long flags;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	spin_lock_irqsave(q->queue_lock, flags);
 | 
					 | 
				
			||||||
	blk_plug_device(q);
 | 
					 | 
				
			||||||
	spin_unlock_irqrestore(q->queue_lock, flags);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
EXPORT_SYMBOL(blk_plug_device_unlocked);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * remove the queue from the plugged list, if present. called with
 | 
					 | 
				
			||||||
 * queue lock held and interrupts disabled.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
int blk_remove_plug(struct request_queue *q)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	WARN_ON(!irqs_disabled());
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
 | 
					 | 
				
			||||||
		return 0;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	del_timer(&q->unplug_timer);
 | 
					 | 
				
			||||||
	return 1;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
EXPORT_SYMBOL(blk_remove_plug);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * remove the plug and let it rip..
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
void __generic_unplug_device(struct request_queue *q)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (unlikely(blk_queue_stopped(q)))
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
	if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	q->request_fn(q);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/**
 | 
					 | 
				
			||||||
 * generic_unplug_device - fire a request queue
 | 
					 | 
				
			||||||
 * @q:    The &struct request_queue in question
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * Description:
 | 
					 | 
				
			||||||
 *   Linux uses plugging to build bigger requests queues before letting
 | 
					 | 
				
			||||||
 *   the device have at them. If a queue is plugged, the I/O scheduler
 | 
					 | 
				
			||||||
 *   is still adding and merging requests on the queue. Once the queue
 | 
					 | 
				
			||||||
 *   gets unplugged, the request_fn defined for the queue is invoked and
 | 
					 | 
				
			||||||
 *   transfers started.
 | 
					 | 
				
			||||||
 **/
 | 
					 | 
				
			||||||
void generic_unplug_device(struct request_queue *q)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (blk_queue_plugged(q)) {
 | 
					 | 
				
			||||||
		spin_lock_irq(q->queue_lock);
 | 
					 | 
				
			||||||
		__generic_unplug_device(q);
 | 
					 | 
				
			||||||
		spin_unlock_irq(q->queue_lock);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
EXPORT_SYMBOL(generic_unplug_device);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
 | 
					 | 
				
			||||||
				   struct page *page)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct request_queue *q = bdi->unplug_io_data;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	blk_unplug(q);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void blk_unplug_work(struct work_struct *work)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct request_queue *q =
 | 
					 | 
				
			||||||
		container_of(work, struct request_queue, unplug_work);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	trace_block_unplug_io(q);
 | 
					 | 
				
			||||||
	q->unplug_fn(q);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void blk_unplug_timeout(unsigned long data)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct request_queue *q = (struct request_queue *)data;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	trace_block_unplug_timer(q);
 | 
					 | 
				
			||||||
	kblockd_schedule_work(q, &q->unplug_work);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void blk_unplug(struct request_queue *q)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * devices don't necessarily have an ->unplug_fn defined
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (q->unplug_fn) {
 | 
					 | 
				
			||||||
		trace_block_unplug_io(q);
 | 
					 | 
				
			||||||
		q->unplug_fn(q);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
EXPORT_SYMBOL(blk_unplug);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * blk_start_queue - restart a previously stopped queue
 | 
					 * blk_start_queue - restart a previously stopped queue
 | 
				
			||||||
 * @q:    The &struct request_queue in question
 | 
					 * @q:    The &struct request_queue in question
 | 
				
			||||||
| 
						 | 
					@ -389,7 +271,6 @@ EXPORT_SYMBOL(blk_start_queue);
 | 
				
			||||||
 **/
 | 
					 **/
 | 
				
			||||||
void blk_stop_queue(struct request_queue *q)
 | 
					void blk_stop_queue(struct request_queue *q)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	blk_remove_plug(q);
 | 
					 | 
				
			||||||
	cancel_delayed_work(&q->delay_work);
 | 
						cancel_delayed_work(&q->delay_work);
 | 
				
			||||||
	queue_flag_set(QUEUE_FLAG_STOPPED, q);
 | 
						queue_flag_set(QUEUE_FLAG_STOPPED, q);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -411,11 +292,10 @@ EXPORT_SYMBOL(blk_stop_queue);
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void blk_sync_queue(struct request_queue *q)
 | 
					void blk_sync_queue(struct request_queue *q)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	del_timer_sync(&q->unplug_timer);
 | 
					 | 
				
			||||||
	del_timer_sync(&q->timeout);
 | 
						del_timer_sync(&q->timeout);
 | 
				
			||||||
	cancel_work_sync(&q->unplug_work);
 | 
					 | 
				
			||||||
	throtl_shutdown_timer_wq(q);
 | 
						throtl_shutdown_timer_wq(q);
 | 
				
			||||||
	cancel_delayed_work_sync(&q->delay_work);
 | 
						cancel_delayed_work_sync(&q->delay_work);
 | 
				
			||||||
 | 
						queue_sync_plugs(q);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(blk_sync_queue);
 | 
					EXPORT_SYMBOL(blk_sync_queue);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -430,14 +310,9 @@ EXPORT_SYMBOL(blk_sync_queue);
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void __blk_run_queue(struct request_queue *q)
 | 
					void __blk_run_queue(struct request_queue *q)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	blk_remove_plug(q);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (unlikely(blk_queue_stopped(q)))
 | 
						if (unlikely(blk_queue_stopped(q)))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (elv_queue_empty(q))
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Only recurse once to avoid overrunning the stack, let the unplug
 | 
						 * Only recurse once to avoid overrunning the stack, let the unplug
 | 
				
			||||||
	 * handling reinvoke the handler shortly if we already got there.
 | 
						 * handling reinvoke the handler shortly if we already got there.
 | 
				
			||||||
| 
						 | 
					@ -445,10 +320,8 @@ void __blk_run_queue(struct request_queue *q)
 | 
				
			||||||
	if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
 | 
						if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
 | 
				
			||||||
		q->request_fn(q);
 | 
							q->request_fn(q);
 | 
				
			||||||
		queue_flag_clear(QUEUE_FLAG_REENTER, q);
 | 
							queue_flag_clear(QUEUE_FLAG_REENTER, q);
 | 
				
			||||||
	} else {
 | 
						} else
 | 
				
			||||||
		queue_flag_set(QUEUE_FLAG_PLUGGED, q);
 | 
							queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
 | 
				
			||||||
		kblockd_schedule_work(q, &q->unplug_work);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(__blk_run_queue);
 | 
					EXPORT_SYMBOL(__blk_run_queue);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -535,8 +408,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 | 
				
			||||||
	if (!q)
 | 
						if (!q)
 | 
				
			||||||
		return NULL;
 | 
							return NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
 | 
					 | 
				
			||||||
	q->backing_dev_info.unplug_io_data = q;
 | 
					 | 
				
			||||||
	q->backing_dev_info.ra_pages =
 | 
						q->backing_dev_info.ra_pages =
 | 
				
			||||||
			(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
 | 
								(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
 | 
				
			||||||
	q->backing_dev_info.state = 0;
 | 
						q->backing_dev_info.state = 0;
 | 
				
			||||||
| 
						 | 
					@ -556,13 +427,11 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
 | 
						setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
 | 
				
			||||||
		    laptop_mode_timer_fn, (unsigned long) q);
 | 
							    laptop_mode_timer_fn, (unsigned long) q);
 | 
				
			||||||
	init_timer(&q->unplug_timer);
 | 
					 | 
				
			||||||
	setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
 | 
						setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
 | 
				
			||||||
	INIT_LIST_HEAD(&q->timeout_list);
 | 
						INIT_LIST_HEAD(&q->timeout_list);
 | 
				
			||||||
	INIT_LIST_HEAD(&q->flush_queue[0]);
 | 
						INIT_LIST_HEAD(&q->flush_queue[0]);
 | 
				
			||||||
	INIT_LIST_HEAD(&q->flush_queue[1]);
 | 
						INIT_LIST_HEAD(&q->flush_queue[1]);
 | 
				
			||||||
	INIT_LIST_HEAD(&q->flush_data_in_flight);
 | 
						INIT_LIST_HEAD(&q->flush_data_in_flight);
 | 
				
			||||||
	INIT_WORK(&q->unplug_work, blk_unplug_work);
 | 
					 | 
				
			||||||
	INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
 | 
						INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	kobject_init(&q->kobj, &blk_queue_ktype);
 | 
						kobject_init(&q->kobj, &blk_queue_ktype);
 | 
				
			||||||
| 
						 | 
					@ -652,7 +521,6 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
 | 
				
			||||||
	q->request_fn		= rfn;
 | 
						q->request_fn		= rfn;
 | 
				
			||||||
	q->prep_rq_fn		= NULL;
 | 
						q->prep_rq_fn		= NULL;
 | 
				
			||||||
	q->unprep_rq_fn		= NULL;
 | 
						q->unprep_rq_fn		= NULL;
 | 
				
			||||||
	q->unplug_fn		= generic_unplug_device;
 | 
					 | 
				
			||||||
	q->queue_flags		= QUEUE_FLAG_DEFAULT;
 | 
						q->queue_flags		= QUEUE_FLAG_DEFAULT;
 | 
				
			||||||
	q->queue_lock		= lock;
 | 
						q->queue_lock		= lock;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -910,8 +778,8 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * No available requests for this queue, unplug the device and wait for some
 | 
					 * No available requests for this queue, wait for some requests to become
 | 
				
			||||||
 * requests to become available.
 | 
					 * available.
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * Called with q->queue_lock held, and returns with it unlocked.
 | 
					 * Called with q->queue_lock held, and returns with it unlocked.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
| 
						 | 
					@ -932,7 +800,6 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		trace_block_sleeprq(q, bio, rw_flags & 1);
 | 
							trace_block_sleeprq(q, bio, rw_flags & 1);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		__generic_unplug_device(q);
 | 
					 | 
				
			||||||
		spin_unlock_irq(q->queue_lock);
 | 
							spin_unlock_irq(q->queue_lock);
 | 
				
			||||||
		io_schedule();
 | 
							io_schedule();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1058,7 +925,7 @@ static void add_acct_request(struct request_queue *q, struct request *rq,
 | 
				
			||||||
			     int where)
 | 
								     int where)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	drive_stat_acct(rq, 1);
 | 
						drive_stat_acct(rq, 1);
 | 
				
			||||||
	__elv_add_request(q, rq, where, 0);
 | 
						__elv_add_request(q, rq, where);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					@ -2798,7 +2665,7 @@ static void flush_plug_list(struct blk_plug *plug)
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * rq is already accounted, so use raw insert
 | 
							 * rq is already accounted, so use raw insert
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		__elv_add_request(q, rq, ELEVATOR_INSERT_SORT, 0);
 | 
							__elv_add_request(q, rq, ELEVATOR_INSERT_SORT);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (q) {
 | 
						if (q) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -54,8 +54,8 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
 | 
				
			||||||
	rq->end_io = done;
 | 
						rq->end_io = done;
 | 
				
			||||||
	WARN_ON(irqs_disabled());
 | 
						WARN_ON(irqs_disabled());
 | 
				
			||||||
	spin_lock_irq(q->queue_lock);
 | 
						spin_lock_irq(q->queue_lock);
 | 
				
			||||||
	__elv_add_request(q, rq, where, 1);
 | 
						__elv_add_request(q, rq, where);
 | 
				
			||||||
	__generic_unplug_device(q);
 | 
						__blk_run_queue(q);
 | 
				
			||||||
	/* the queue is stopped so it won't be plugged+unplugged */
 | 
						/* the queue is stopped so it won't be plugged+unplugged */
 | 
				
			||||||
	if (rq->cmd_type == REQ_TYPE_PM_RESUME)
 | 
						if (rq->cmd_type == REQ_TYPE_PM_RESUME)
 | 
				
			||||||
		q->request_fn(q);
 | 
							q->request_fn(q);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -194,7 +194,6 @@ static void flush_end_io(struct request *flush_rq, int error)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct request_queue *q = flush_rq->q;
 | 
						struct request_queue *q = flush_rq->q;
 | 
				
			||||||
	struct list_head *running = &q->flush_queue[q->flush_running_idx];
 | 
						struct list_head *running = &q->flush_queue[q->flush_running_idx];
 | 
				
			||||||
	bool was_empty = elv_queue_empty(q);
 | 
					 | 
				
			||||||
	bool queued = false;
 | 
						bool queued = false;
 | 
				
			||||||
	struct request *rq, *n;
 | 
						struct request *rq, *n;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -213,7 +212,7 @@ static void flush_end_io(struct request *flush_rq, int error)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* after populating an empty queue, kick it to avoid stall */
 | 
						/* after populating an empty queue, kick it to avoid stall */
 | 
				
			||||||
	if (queued && was_empty)
 | 
						if (queued)
 | 
				
			||||||
		__blk_run_queue(q);
 | 
							__blk_run_queue(q);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -164,14 +164,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
 | 
				
			||||||
	blk_queue_congestion_threshold(q);
 | 
						blk_queue_congestion_threshold(q);
 | 
				
			||||||
	q->nr_batching = BLK_BATCH_REQ;
 | 
						q->nr_batching = BLK_BATCH_REQ;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	q->unplug_thresh = 4;		/* hmm */
 | 
					 | 
				
			||||||
	q->unplug_delay = msecs_to_jiffies(3);	/* 3 milliseconds */
 | 
					 | 
				
			||||||
	if (q->unplug_delay == 0)
 | 
					 | 
				
			||||||
		q->unplug_delay = 1;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	q->unplug_timer.function = blk_unplug_timeout;
 | 
					 | 
				
			||||||
	q->unplug_timer.data = (unsigned long)q;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	blk_set_default_limits(&q->limits);
 | 
						blk_set_default_limits(&q->limits);
 | 
				
			||||||
	blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
 | 
						blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -800,7 +800,6 @@ static int throtl_dispatch(struct request_queue *q)
 | 
				
			||||||
	if (nr_disp) {
 | 
						if (nr_disp) {
 | 
				
			||||||
		while((bio = bio_list_pop(&bio_list_on_stack)))
 | 
							while((bio = bio_list_pop(&bio_list_on_stack)))
 | 
				
			||||||
			generic_make_request(bio);
 | 
								generic_make_request(bio);
 | 
				
			||||||
		blk_unplug(q);
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return nr_disp;
 | 
						return nr_disp;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -18,8 +18,6 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
 | 
				
			||||||
void blk_dequeue_request(struct request *rq);
 | 
					void blk_dequeue_request(struct request *rq);
 | 
				
			||||||
void __blk_queue_free_tags(struct request_queue *q);
 | 
					void __blk_queue_free_tags(struct request_queue *q);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void blk_unplug_work(struct work_struct *work);
 | 
					 | 
				
			||||||
void blk_unplug_timeout(unsigned long data);
 | 
					 | 
				
			||||||
void blk_rq_timed_out_timer(unsigned long data);
 | 
					void blk_rq_timed_out_timer(unsigned long data);
 | 
				
			||||||
void blk_delete_timer(struct request *);
 | 
					void blk_delete_timer(struct request *);
 | 
				
			||||||
void blk_add_timer(struct request *);
 | 
					void blk_add_timer(struct request *);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -499,13 +499,6 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int cfq_queue_empty(struct request_queue *q)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct cfq_data *cfqd = q->elevator->elevator_data;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return !cfqd->rq_queued;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Scale schedule slice based on io priority. Use the sync time slice only
 | 
					 * Scale schedule slice based on io priority. Use the sync time slice only
 | 
				
			||||||
 * if a queue is marked sync and has sync io queued. A sync queue with async
 | 
					 * if a queue is marked sync and has sync io queued. A sync queue with async
 | 
				
			||||||
| 
						 | 
					@ -4061,7 +4054,6 @@ static struct elevator_type iosched_cfq = {
 | 
				
			||||||
		.elevator_add_req_fn =		cfq_insert_request,
 | 
							.elevator_add_req_fn =		cfq_insert_request,
 | 
				
			||||||
		.elevator_activate_req_fn =	cfq_activate_request,
 | 
							.elevator_activate_req_fn =	cfq_activate_request,
 | 
				
			||||||
		.elevator_deactivate_req_fn =	cfq_deactivate_request,
 | 
							.elevator_deactivate_req_fn =	cfq_deactivate_request,
 | 
				
			||||||
		.elevator_queue_empty_fn =	cfq_queue_empty,
 | 
					 | 
				
			||||||
		.elevator_completed_req_fn =	cfq_completed_request,
 | 
							.elevator_completed_req_fn =	cfq_completed_request,
 | 
				
			||||||
		.elevator_former_req_fn =	elv_rb_former_request,
 | 
							.elevator_former_req_fn =	elv_rb_former_request,
 | 
				
			||||||
		.elevator_latter_req_fn =	elv_rb_latter_request,
 | 
							.elevator_latter_req_fn =	elv_rb_latter_request,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -326,14 +326,6 @@ static int deadline_dispatch_requests(struct request_queue *q, int force)
 | 
				
			||||||
	return 1;
 | 
						return 1;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int deadline_queue_empty(struct request_queue *q)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct deadline_data *dd = q->elevator->elevator_data;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return list_empty(&dd->fifo_list[WRITE])
 | 
					 | 
				
			||||||
		&& list_empty(&dd->fifo_list[READ]);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void deadline_exit_queue(struct elevator_queue *e)
 | 
					static void deadline_exit_queue(struct elevator_queue *e)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct deadline_data *dd = e->elevator_data;
 | 
						struct deadline_data *dd = e->elevator_data;
 | 
				
			||||||
| 
						 | 
					@ -445,7 +437,6 @@ static struct elevator_type iosched_deadline = {
 | 
				
			||||||
		.elevator_merge_req_fn =	deadline_merged_requests,
 | 
							.elevator_merge_req_fn =	deadline_merged_requests,
 | 
				
			||||||
		.elevator_dispatch_fn =		deadline_dispatch_requests,
 | 
							.elevator_dispatch_fn =		deadline_dispatch_requests,
 | 
				
			||||||
		.elevator_add_req_fn =		deadline_add_request,
 | 
							.elevator_add_req_fn =		deadline_add_request,
 | 
				
			||||||
		.elevator_queue_empty_fn =	deadline_queue_empty,
 | 
					 | 
				
			||||||
		.elevator_former_req_fn =	elv_rb_former_request,
 | 
							.elevator_former_req_fn =	elv_rb_former_request,
 | 
				
			||||||
		.elevator_latter_req_fn =	elv_rb_latter_request,
 | 
							.elevator_latter_req_fn =	elv_rb_latter_request,
 | 
				
			||||||
		.elevator_init_fn =		deadline_init_queue,
 | 
							.elevator_init_fn =		deadline_init_queue,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -619,21 +619,12 @@ void elv_quiesce_end(struct request_queue *q)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void elv_insert(struct request_queue *q, struct request *rq, int where)
 | 
					void elv_insert(struct request_queue *q, struct request *rq, int where)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int unplug_it = 1;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	trace_block_rq_insert(q, rq);
 | 
						trace_block_rq_insert(q, rq);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rq->q = q;
 | 
						rq->q = q;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	switch (where) {
 | 
						switch (where) {
 | 
				
			||||||
	case ELEVATOR_INSERT_REQUEUE:
 | 
						case ELEVATOR_INSERT_REQUEUE:
 | 
				
			||||||
		/*
 | 
					 | 
				
			||||||
		 * Most requeues happen because of a busy condition,
 | 
					 | 
				
			||||||
		 * don't force unplug of the queue for that case.
 | 
					 | 
				
			||||||
		 * Clear unplug_it and fall through.
 | 
					 | 
				
			||||||
		 */
 | 
					 | 
				
			||||||
		unplug_it = 0;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	case ELEVATOR_INSERT_FRONT:
 | 
						case ELEVATOR_INSERT_FRONT:
 | 
				
			||||||
		rq->cmd_flags |= REQ_SOFTBARRIER;
 | 
							rq->cmd_flags |= REQ_SOFTBARRIER;
 | 
				
			||||||
		list_add(&rq->queuelist, &q->queue_head);
 | 
							list_add(&rq->queuelist, &q->queue_head);
 | 
				
			||||||
| 
						 | 
					@ -679,24 +670,14 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
 | 
				
			||||||
		rq->cmd_flags |= REQ_SOFTBARRIER;
 | 
							rq->cmd_flags |= REQ_SOFTBARRIER;
 | 
				
			||||||
		blk_insert_flush(rq);
 | 
							blk_insert_flush(rq);
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
 | 
					 | 
				
			||||||
	default:
 | 
						default:
 | 
				
			||||||
		printk(KERN_ERR "%s: bad insertion point %d\n",
 | 
							printk(KERN_ERR "%s: bad insertion point %d\n",
 | 
				
			||||||
		       __func__, where);
 | 
							       __func__, where);
 | 
				
			||||||
		BUG();
 | 
							BUG();
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (unplug_it && blk_queue_plugged(q)) {
 | 
					 | 
				
			||||||
		int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
 | 
					 | 
				
			||||||
				- queue_in_flight(q);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (nrq >= q->unplug_thresh)
 | 
					 | 
				
			||||||
			__generic_unplug_device(q);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void __elv_add_request(struct request_queue *q, struct request *rq, int where,
 | 
					void __elv_add_request(struct request_queue *q, struct request *rq, int where)
 | 
				
			||||||
		       int plug)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
 | 
						BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -711,38 +692,20 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
 | 
				
			||||||
		    where == ELEVATOR_INSERT_SORT)
 | 
							    where == ELEVATOR_INSERT_SORT)
 | 
				
			||||||
		where = ELEVATOR_INSERT_BACK;
 | 
							where = ELEVATOR_INSERT_BACK;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (plug)
 | 
					 | 
				
			||||||
		blk_plug_device(q);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	elv_insert(q, rq, where);
 | 
						elv_insert(q, rq, where);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(__elv_add_request);
 | 
					EXPORT_SYMBOL(__elv_add_request);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void elv_add_request(struct request_queue *q, struct request *rq, int where,
 | 
					void elv_add_request(struct request_queue *q, struct request *rq, int where)
 | 
				
			||||||
		     int plug)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long flags;
 | 
						unsigned long flags;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock_irqsave(q->queue_lock, flags);
 | 
						spin_lock_irqsave(q->queue_lock, flags);
 | 
				
			||||||
	__elv_add_request(q, rq, where, plug);
 | 
						__elv_add_request(q, rq, where);
 | 
				
			||||||
	spin_unlock_irqrestore(q->queue_lock, flags);
 | 
						spin_unlock_irqrestore(q->queue_lock, flags);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(elv_add_request);
 | 
					EXPORT_SYMBOL(elv_add_request);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int elv_queue_empty(struct request_queue *q)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct elevator_queue *e = q->elevator;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!list_empty(&q->queue_head))
 | 
					 | 
				
			||||||
		return 0;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (e->ops->elevator_queue_empty_fn)
 | 
					 | 
				
			||||||
		return e->ops->elevator_queue_empty_fn(q);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return 1;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
EXPORT_SYMBOL(elv_queue_empty);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
struct request *elv_latter_request(struct request_queue *q, struct request *rq)
 | 
					struct request *elv_latter_request(struct request_queue *q, struct request *rq)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct elevator_queue *e = q->elevator;
 | 
						struct elevator_queue *e = q->elevator;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -39,13 +39,6 @@ static void noop_add_request(struct request_queue *q, struct request *rq)
 | 
				
			||||||
	list_add_tail(&rq->queuelist, &nd->queue);
 | 
						list_add_tail(&rq->queuelist, &nd->queue);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int noop_queue_empty(struct request_queue *q)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct noop_data *nd = q->elevator->elevator_data;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return list_empty(&nd->queue);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static struct request *
 | 
					static struct request *
 | 
				
			||||||
noop_former_request(struct request_queue *q, struct request *rq)
 | 
					noop_former_request(struct request_queue *q, struct request *rq)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -90,7 +83,6 @@ static struct elevator_type elevator_noop = {
 | 
				
			||||||
		.elevator_merge_req_fn		= noop_merged_requests,
 | 
							.elevator_merge_req_fn		= noop_merged_requests,
 | 
				
			||||||
		.elevator_dispatch_fn		= noop_dispatch,
 | 
							.elevator_dispatch_fn		= noop_dispatch,
 | 
				
			||||||
		.elevator_add_req_fn		= noop_add_request,
 | 
							.elevator_add_req_fn		= noop_add_request,
 | 
				
			||||||
		.elevator_queue_empty_fn	= noop_queue_empty,
 | 
					 | 
				
			||||||
		.elevator_former_req_fn		= noop_former_request,
 | 
							.elevator_former_req_fn		= noop_former_request,
 | 
				
			||||||
		.elevator_latter_req_fn		= noop_latter_request,
 | 
							.elevator_latter_req_fn		= noop_latter_request,
 | 
				
			||||||
		.elevator_init_fn		= noop_init_queue,
 | 
							.elevator_init_fn		= noop_init_queue,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3170,12 +3170,6 @@ static void do_cciss_request(struct request_queue *q)
 | 
				
			||||||
	int sg_index = 0;
 | 
						int sg_index = 0;
 | 
				
			||||||
	int chained = 0;
 | 
						int chained = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* We call start_io here in case there is a command waiting on the
 | 
					 | 
				
			||||||
	 * queue that has not been sent.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (blk_queue_plugged(q))
 | 
					 | 
				
			||||||
		goto startio;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
      queue:
 | 
					      queue:
 | 
				
			||||||
	creq = blk_peek_request(q);
 | 
						creq = blk_peek_request(q);
 | 
				
			||||||
	if (!creq)
 | 
						if (!creq)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -911,9 +911,6 @@ static void do_ida_request(struct request_queue *q)
 | 
				
			||||||
	struct scatterlist tmp_sg[SG_MAX];
 | 
						struct scatterlist tmp_sg[SG_MAX];
 | 
				
			||||||
	int i, dir, seg;
 | 
						int i, dir, seg;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (blk_queue_plugged(q))
 | 
					 | 
				
			||||||
		goto startio;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
queue_next:
 | 
					queue_next:
 | 
				
			||||||
	creq = blk_peek_request(q);
 | 
						creq = blk_peek_request(q);
 | 
				
			||||||
	if (!creq)
 | 
						if (!creq)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -689,8 +689,6 @@ void drbd_al_to_on_disk_bm(struct drbd_conf *mdev)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev));
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* always (try to) flush bitmap to stable storage */
 | 
						/* always (try to) flush bitmap to stable storage */
 | 
				
			||||||
	drbd_md_flush(mdev);
 | 
						drbd_md_flush(mdev);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -840,7 +840,6 @@ static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local)
 | 
				
			||||||
	for (i = 0; i < num_pages; i++)
 | 
						for (i = 0; i < num_pages; i++)
 | 
				
			||||||
		bm_page_io_async(mdev, b, i, rw);
 | 
							bm_page_io_async(mdev, b, i, rw);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev));
 | 
					 | 
				
			||||||
	wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0);
 | 
						wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) {
 | 
						if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2382,20 +2382,6 @@ static inline int drbd_queue_order_type(struct drbd_conf *mdev)
 | 
				
			||||||
	return QUEUE_ORDERED_NONE;
 | 
						return QUEUE_ORDERED_NONE;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void drbd_blk_run_queue(struct request_queue *q)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (q && q->unplug_fn)
 | 
					 | 
				
			||||||
		q->unplug_fn(q);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline void drbd_kick_lo(struct drbd_conf *mdev)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (get_ldev(mdev)) {
 | 
					 | 
				
			||||||
		drbd_blk_run_queue(bdev_get_queue(mdev->ldev->backing_bdev));
 | 
					 | 
				
			||||||
		put_ldev(mdev);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline void drbd_md_flush(struct drbd_conf *mdev)
 | 
					static inline void drbd_md_flush(struct drbd_conf *mdev)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int r;
 | 
						int r;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2719,35 +2719,6 @@ static int drbd_release(struct gendisk *gd, fmode_t mode)
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void drbd_unplug_fn(struct request_queue *q)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct drbd_conf *mdev = q->queuedata;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* unplug FIRST */
 | 
					 | 
				
			||||||
	spin_lock_irq(q->queue_lock);
 | 
					 | 
				
			||||||
	blk_remove_plug(q);
 | 
					 | 
				
			||||||
	spin_unlock_irq(q->queue_lock);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* only if connected */
 | 
					 | 
				
			||||||
	spin_lock_irq(&mdev->req_lock);
 | 
					 | 
				
			||||||
	if (mdev->state.pdsk >= D_INCONSISTENT && mdev->state.conn >= C_CONNECTED) {
 | 
					 | 
				
			||||||
		D_ASSERT(mdev->state.role == R_PRIMARY);
 | 
					 | 
				
			||||||
		if (test_and_clear_bit(UNPLUG_REMOTE, &mdev->flags)) {
 | 
					 | 
				
			||||||
			/* add to the data.work queue,
 | 
					 | 
				
			||||||
			 * unless already queued.
 | 
					 | 
				
			||||||
			 * XXX this might be a good addition to drbd_queue_work
 | 
					 | 
				
			||||||
			 * anyways, to detect "double queuing" ... */
 | 
					 | 
				
			||||||
			if (list_empty(&mdev->unplug_work.list))
 | 
					 | 
				
			||||||
				drbd_queue_work(&mdev->data.work,
 | 
					 | 
				
			||||||
						&mdev->unplug_work);
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	spin_unlock_irq(&mdev->req_lock);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (mdev->state.disk >= D_INCONSISTENT)
 | 
					 | 
				
			||||||
		drbd_kick_lo(mdev);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void drbd_set_defaults(struct drbd_conf *mdev)
 | 
					static void drbd_set_defaults(struct drbd_conf *mdev)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	/* This way we get a compile error when sync_conf grows,
 | 
						/* This way we get a compile error when sync_conf grows,
 | 
				
			||||||
| 
						 | 
					@ -3222,9 +3193,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
 | 
				
			||||||
	blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE);
 | 
						blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE);
 | 
				
			||||||
	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
 | 
						blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
 | 
				
			||||||
	blk_queue_merge_bvec(q, drbd_merge_bvec);
 | 
						blk_queue_merge_bvec(q, drbd_merge_bvec);
 | 
				
			||||||
	q->queue_lock = &mdev->req_lock; /* needed since we use */
 | 
						q->queue_lock = &mdev->req_lock;
 | 
				
			||||||
		/* plugging on a queue, that actually has no requests! */
 | 
					 | 
				
			||||||
	q->unplug_fn = drbd_unplug_fn;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mdev->md_io_page = alloc_page(GFP_KERNEL);
 | 
						mdev->md_io_page = alloc_page(GFP_KERNEL);
 | 
				
			||||||
	if (!mdev->md_io_page)
 | 
						if (!mdev->md_io_page)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -187,15 +187,6 @@ static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int
 | 
				
			||||||
	return NULL;
 | 
						return NULL;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* kick lower level device, if we have more than (arbitrary number)
 | 
					 | 
				
			||||||
 * reference counts on it, which typically are locally submitted io
 | 
					 | 
				
			||||||
 * requests.  don't use unacked_cnt, so we speed up proto A and B, too. */
 | 
					 | 
				
			||||||
static void maybe_kick_lo(struct drbd_conf *mdev)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark)
 | 
					 | 
				
			||||||
		drbd_kick_lo(mdev);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
 | 
					static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct drbd_epoch_entry *e;
 | 
						struct drbd_epoch_entry *e;
 | 
				
			||||||
| 
						 | 
					@ -219,7 +210,6 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
 | 
				
			||||||
	LIST_HEAD(reclaimed);
 | 
						LIST_HEAD(reclaimed);
 | 
				
			||||||
	struct drbd_epoch_entry *e, *t;
 | 
						struct drbd_epoch_entry *e, *t;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	maybe_kick_lo(mdev);
 | 
					 | 
				
			||||||
	spin_lock_irq(&mdev->req_lock);
 | 
						spin_lock_irq(&mdev->req_lock);
 | 
				
			||||||
	reclaim_net_ee(mdev, &reclaimed);
 | 
						reclaim_net_ee(mdev, &reclaimed);
 | 
				
			||||||
	spin_unlock_irq(&mdev->req_lock);
 | 
						spin_unlock_irq(&mdev->req_lock);
 | 
				
			||||||
| 
						 | 
					@ -436,8 +426,7 @@ void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
 | 
				
			||||||
	while (!list_empty(head)) {
 | 
						while (!list_empty(head)) {
 | 
				
			||||||
		prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
 | 
							prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
 | 
				
			||||||
		spin_unlock_irq(&mdev->req_lock);
 | 
							spin_unlock_irq(&mdev->req_lock);
 | 
				
			||||||
		drbd_kick_lo(mdev);
 | 
							io_schedule();
 | 
				
			||||||
		schedule();
 | 
					 | 
				
			||||||
		finish_wait(&mdev->ee_wait, &wait);
 | 
							finish_wait(&mdev->ee_wait, &wait);
 | 
				
			||||||
		spin_lock_irq(&mdev->req_lock);
 | 
							spin_lock_irq(&mdev->req_lock);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -1147,7 +1136,6 @@ int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		drbd_generic_make_request(mdev, fault_type, bio);
 | 
							drbd_generic_make_request(mdev, fault_type, bio);
 | 
				
			||||||
	} while (bios);
 | 
						} while (bios);
 | 
				
			||||||
	maybe_kick_lo(mdev);
 | 
					 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
fail:
 | 
					fail:
 | 
				
			||||||
| 
						 | 
					@ -1167,9 +1155,6 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	inc_unacked(mdev);
 | 
						inc_unacked(mdev);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (mdev->net_conf->wire_protocol != DRBD_PROT_C)
 | 
					 | 
				
			||||||
		drbd_kick_lo(mdev);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	mdev->current_epoch->barrier_nr = p->barrier;
 | 
						mdev->current_epoch->barrier_nr = p->barrier;
 | 
				
			||||||
	rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
 | 
						rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3556,9 +3541,6 @@ static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
 | 
					static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (mdev->state.disk >= D_INCONSISTENT)
 | 
					 | 
				
			||||||
		drbd_kick_lo(mdev);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* Make sure we've acked all the TCP data associated
 | 
						/* Make sure we've acked all the TCP data associated
 | 
				
			||||||
	 * with the data requests being unplugged */
 | 
						 * with the data requests being unplugged */
 | 
				
			||||||
	drbd_tcp_quickack(mdev->data.socket);
 | 
						drbd_tcp_quickack(mdev->data.socket);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -960,10 +960,6 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
 | 
				
			||||||
			bio_endio(req->private_bio, -EIO);
 | 
								bio_endio(req->private_bio, -EIO);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* we need to plug ALWAYS since we possibly need to kick lo_dev.
 | 
					 | 
				
			||||||
	 * we plug after submit, so we won't miss an unplug event */
 | 
					 | 
				
			||||||
	drbd_plug_device(mdev);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
fail_conflicting:
 | 
					fail_conflicting:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -792,7 +792,6 @@ int drbd_resync_finished(struct drbd_conf *mdev)
 | 
				
			||||||
		 * queue (or even the read operations for those packets
 | 
							 * queue (or even the read operations for those packets
 | 
				
			||||||
		 * is not finished by now).   Retry in 100ms. */
 | 
							 * is not finished by now).   Retry in 100ms. */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		drbd_kick_lo(mdev);
 | 
					 | 
				
			||||||
		__set_current_state(TASK_INTERRUPTIBLE);
 | 
							__set_current_state(TASK_INTERRUPTIBLE);
 | 
				
			||||||
		schedule_timeout(HZ / 10);
 | 
							schedule_timeout(HZ / 10);
 | 
				
			||||||
		w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
 | 
							w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -45,24 +45,6 @@ static inline void drbd_generic_make_request(struct drbd_conf *mdev,
 | 
				
			||||||
		generic_make_request(bio);
 | 
							generic_make_request(bio);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void drbd_plug_device(struct drbd_conf *mdev)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct request_queue *q;
 | 
					 | 
				
			||||||
	q = bdev_get_queue(mdev->this_bdev);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	spin_lock_irq(q->queue_lock);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/* XXX the check on !blk_queue_plugged is redundant,
 | 
					 | 
				
			||||||
 * implicitly checked in blk_plug_device */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!blk_queue_plugged(q)) {
 | 
					 | 
				
			||||||
		blk_plug_device(q);
 | 
					 | 
				
			||||||
		del_timer(&q->unplug_timer);
 | 
					 | 
				
			||||||
		/* unplugging should not happen automatically... */
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	spin_unlock_irq(q->queue_lock);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline int drbd_crypto_is_hash(struct crypto_tfm *tfm)
 | 
					static inline int drbd_crypto_is_hash(struct crypto_tfm *tfm)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
        return (crypto_tfm_alg_type(tfm) & CRYPTO_ALG_TYPE_HASH_MASK)
 | 
					        return (crypto_tfm_alg_type(tfm) & CRYPTO_ALG_TYPE_HASH_MASK)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3837,7 +3837,6 @@ static int __floppy_read_block_0(struct block_device *bdev)
 | 
				
			||||||
	bio.bi_end_io = floppy_rb0_complete;
 | 
						bio.bi_end_io = floppy_rb0_complete;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	submit_bio(READ, &bio);
 | 
						submit_bio(READ, &bio);
 | 
				
			||||||
	generic_unplug_device(bdev_get_queue(bdev));
 | 
					 | 
				
			||||||
	process_fd_request();
 | 
						process_fd_request();
 | 
				
			||||||
	wait_for_completion(&complete);
 | 
						wait_for_completion(&complete);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -541,17 +541,6 @@ static int loop_make_request(struct request_queue *q, struct bio *old_bio)
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * kick off io on the underlying address space
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
static void loop_unplug(struct request_queue *q)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct loop_device *lo = q->queuedata;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	queue_flag_clear_unlocked(QUEUE_FLAG_PLUGGED, q);
 | 
					 | 
				
			||||||
	blk_run_address_space(lo->lo_backing_file->f_mapping);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
struct switch_request {
 | 
					struct switch_request {
 | 
				
			||||||
	struct file *file;
 | 
						struct file *file;
 | 
				
			||||||
	struct completion wait;
 | 
						struct completion wait;
 | 
				
			||||||
| 
						 | 
					@ -918,7 +907,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	blk_queue_make_request(lo->lo_queue, loop_make_request);
 | 
						blk_queue_make_request(lo->lo_queue, loop_make_request);
 | 
				
			||||||
	lo->lo_queue->queuedata = lo;
 | 
						lo->lo_queue->queuedata = lo;
 | 
				
			||||||
	lo->lo_queue->unplug_fn = loop_unplug;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
 | 
						if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
 | 
				
			||||||
		blk_queue_flush(lo->lo_queue, REQ_FLUSH);
 | 
							blk_queue_flush(lo->lo_queue, REQ_FLUSH);
 | 
				
			||||||
| 
						 | 
					@ -1020,7 +1008,6 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	kthread_stop(lo->lo_thread);
 | 
						kthread_stop(lo->lo_thread);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	lo->lo_queue->unplug_fn = NULL;
 | 
					 | 
				
			||||||
	lo->lo_backing_file = NULL;
 | 
						lo->lo_backing_file = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	loop_release_xfer(lo);
 | 
						loop_release_xfer(lo);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1606,8 +1606,6 @@ static int kcdrwd(void *foobar)
 | 
				
			||||||
					min_sleep_time = pkt->sleep_time;
 | 
										min_sleep_time = pkt->sleep_time;
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			generic_unplug_device(bdev_get_queue(pd->bdev));
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			VPRINTK("kcdrwd: sleeping\n");
 | 
								VPRINTK("kcdrwd: sleeping\n");
 | 
				
			||||||
			residue = schedule_timeout(min_sleep_time);
 | 
								residue = schedule_timeout(min_sleep_time);
 | 
				
			||||||
			VPRINTK("kcdrwd: wake up\n");
 | 
								VPRINTK("kcdrwd: wake up\n");
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -241,8 +241,7 @@ static void dump_dmastat(struct cardinfo *card, unsigned int dmastat)
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * Whenever IO on the active page completes, the Ready page is activated
 | 
					 * Whenever IO on the active page completes, the Ready page is activated
 | 
				
			||||||
 * and the ex-Active page is clean out and made Ready.
 | 
					 * and the ex-Active page is clean out and made Ready.
 | 
				
			||||||
 * Otherwise the Ready page is only activated when it becomes full, or
 | 
					 * Otherwise the Ready page is only activated when it becomes full.
 | 
				
			||||||
 * when mm_unplug_device is called via the unplug_io_fn.
 | 
					 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * If a request arrives while both pages a full, it is queued, and b_rdev is
 | 
					 * If a request arrives while both pages a full, it is queued, and b_rdev is
 | 
				
			||||||
 * overloaded to record whether it was a read or a write.
 | 
					 * overloaded to record whether it was a read or a write.
 | 
				
			||||||
| 
						 | 
					@ -333,17 +332,6 @@ static inline void reset_page(struct mm_page *page)
 | 
				
			||||||
	page->biotail = &page->bio;
 | 
						page->biotail = &page->bio;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void mm_unplug_device(struct request_queue *q)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct cardinfo *card = q->queuedata;
 | 
					 | 
				
			||||||
	unsigned long flags;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	spin_lock_irqsave(&card->lock, flags);
 | 
					 | 
				
			||||||
	if (blk_remove_plug(q))
 | 
					 | 
				
			||||||
		activate(card);
 | 
					 | 
				
			||||||
	spin_unlock_irqrestore(&card->lock, flags);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * If there is room on Ready page, take
 | 
					 * If there is room on Ready page, take
 | 
				
			||||||
 * one bh off list and add it.
 | 
					 * one bh off list and add it.
 | 
				
			||||||
| 
						 | 
					@ -535,7 +523,6 @@ static int mm_make_request(struct request_queue *q, struct bio *bio)
 | 
				
			||||||
	*card->biotail = bio;
 | 
						*card->biotail = bio;
 | 
				
			||||||
	bio->bi_next = NULL;
 | 
						bio->bi_next = NULL;
 | 
				
			||||||
	card->biotail = &bio->bi_next;
 | 
						card->biotail = &bio->bi_next;
 | 
				
			||||||
	blk_plug_device(q);
 | 
					 | 
				
			||||||
	spin_unlock_irq(&card->lock);
 | 
						spin_unlock_irq(&card->lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
| 
						 | 
					@ -907,7 +894,6 @@ static int __devinit mm_pci_probe(struct pci_dev *dev,
 | 
				
			||||||
	blk_queue_make_request(card->queue, mm_make_request);
 | 
						blk_queue_make_request(card->queue, mm_make_request);
 | 
				
			||||||
	card->queue->queue_lock = &card->lock;
 | 
						card->queue->queue_lock = &card->lock;
 | 
				
			||||||
	card->queue->queuedata = card;
 | 
						card->queue->queuedata = card;
 | 
				
			||||||
	card->queue->unplug_fn = mm_unplug_device;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	tasklet_init(&card->tasklet, process_page, (unsigned long)card);
 | 
						tasklet_init(&card->tasklet, process_page, (unsigned long)card);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -233,8 +233,7 @@ int ide_queue_sense_rq(ide_drive_t *drive, void *special)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	drive->hwif->rq = NULL;
 | 
						drive->hwif->rq = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	elv_add_request(drive->queue, &drive->sense_rq,
 | 
						elv_add_request(drive->queue, &drive->sense_rq, ELEVATOR_INSERT_FRONT);
 | 
				
			||||||
			ELEVATOR_INSERT_FRONT, 0);
 | 
					 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
 | 
					EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -549,8 +549,6 @@ void do_ide_request(struct request_queue *q)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (rq)
 | 
						if (rq)
 | 
				
			||||||
		blk_requeue_request(q, rq);
 | 
							blk_requeue_request(q, rq);
 | 
				
			||||||
	if (!elv_queue_empty(q))
 | 
					 | 
				
			||||||
		blk_plug_device(q);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
 | 
					void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
 | 
				
			||||||
| 
						 | 
					@ -562,8 +560,6 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (rq)
 | 
						if (rq)
 | 
				
			||||||
		blk_requeue_request(q, rq);
 | 
							blk_requeue_request(q, rq);
 | 
				
			||||||
	if (!elv_queue_empty(q))
 | 
					 | 
				
			||||||
		blk_plug_device(q);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_unlock_irqrestore(q->queue_lock, flags);
 | 
						spin_unlock_irqrestore(q->queue_lock, flags);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -52,7 +52,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
 | 
				
			||||||
	rq->cmd[0] = REQ_UNPARK_HEADS;
 | 
						rq->cmd[0] = REQ_UNPARK_HEADS;
 | 
				
			||||||
	rq->cmd_len = 1;
 | 
						rq->cmd_len = 1;
 | 
				
			||||||
	rq->cmd_type = REQ_TYPE_SPECIAL;
 | 
						rq->cmd_type = REQ_TYPE_SPECIAL;
 | 
				
			||||||
	elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1);
 | 
						elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
out:
 | 
					out:
 | 
				
			||||||
	return;
 | 
						return;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1339,8 +1339,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
 | 
				
			||||||
			prepare_to_wait(&bitmap->overflow_wait, &__wait,
 | 
								prepare_to_wait(&bitmap->overflow_wait, &__wait,
 | 
				
			||||||
					TASK_UNINTERRUPTIBLE);
 | 
										TASK_UNINTERRUPTIBLE);
 | 
				
			||||||
			spin_unlock_irq(&bitmap->lock);
 | 
								spin_unlock_irq(&bitmap->lock);
 | 
				
			||||||
			md_unplug(bitmap->mddev);
 | 
								io_schedule();
 | 
				
			||||||
			schedule();
 | 
					 | 
				
			||||||
			finish_wait(&bitmap->overflow_wait, &__wait);
 | 
								finish_wait(&bitmap->overflow_wait, &__wait);
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -991,11 +991,6 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
 | 
				
			||||||
	clone->bi_destructor = dm_crypt_bio_destructor;
 | 
						clone->bi_destructor = dm_crypt_bio_destructor;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void kcryptd_unplug(struct crypt_config *cc)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	blk_unplug(bdev_get_queue(cc->dev->bdev));
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
 | 
					static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct crypt_config *cc = io->target->private;
 | 
						struct crypt_config *cc = io->target->private;
 | 
				
			||||||
| 
						 | 
					@ -1008,10 +1003,8 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
 | 
				
			||||||
	 * one in order to decrypt the whole bio data *afterwards*.
 | 
						 * one in order to decrypt the whole bio data *afterwards*.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
 | 
						clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
 | 
				
			||||||
	if (!clone) {
 | 
						if (!clone)
 | 
				
			||||||
		kcryptd_unplug(cc);
 | 
					 | 
				
			||||||
		return 1;
 | 
							return 1;
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	crypt_inc_pending(io);
 | 
						crypt_inc_pending(io);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -37,13 +37,6 @@ struct dm_kcopyd_client {
 | 
				
			||||||
	unsigned int nr_pages;
 | 
						unsigned int nr_pages;
 | 
				
			||||||
	unsigned int nr_free_pages;
 | 
						unsigned int nr_free_pages;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * Block devices to unplug.
 | 
					 | 
				
			||||||
	 * Non-NULL pointer means that a block device has some pending requests
 | 
					 | 
				
			||||||
	 * and needs to be unplugged.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	struct block_device *unplug[2];
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	struct dm_io_client *io_client;
 | 
						struct dm_io_client *io_client;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	wait_queue_head_t destroyq;
 | 
						wait_queue_head_t destroyq;
 | 
				
			||||||
| 
						 | 
					@ -315,31 +308,6 @@ static int run_complete_job(struct kcopyd_job *job)
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * Unplug the block device at the specified index.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
static void unplug(struct dm_kcopyd_client *kc, int rw)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (kc->unplug[rw] != NULL) {
 | 
					 | 
				
			||||||
		blk_unplug(bdev_get_queue(kc->unplug[rw]));
 | 
					 | 
				
			||||||
		kc->unplug[rw] = NULL;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * Prepare block device unplug. If there's another device
 | 
					 | 
				
			||||||
 * to be unplugged at the same array index, we unplug that
 | 
					 | 
				
			||||||
 * device first.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
static void prepare_unplug(struct dm_kcopyd_client *kc, int rw,
 | 
					 | 
				
			||||||
			   struct block_device *bdev)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (likely(kc->unplug[rw] == bdev))
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
	unplug(kc, rw);
 | 
					 | 
				
			||||||
	kc->unplug[rw] = bdev;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void complete_io(unsigned long error, void *context)
 | 
					static void complete_io(unsigned long error, void *context)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct kcopyd_job *job = (struct kcopyd_job *) context;
 | 
						struct kcopyd_job *job = (struct kcopyd_job *) context;
 | 
				
			||||||
| 
						 | 
					@ -386,15 +354,12 @@ static int run_io_job(struct kcopyd_job *job)
 | 
				
			||||||
		.client = job->kc->io_client,
 | 
							.client = job->kc->io_client,
 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (job->rw == READ) {
 | 
						if (job->rw == READ)
 | 
				
			||||||
		r = dm_io(&io_req, 1, &job->source, NULL);
 | 
							r = dm_io(&io_req, 1, &job->source, NULL);
 | 
				
			||||||
		prepare_unplug(job->kc, READ, job->source.bdev);
 | 
						else {
 | 
				
			||||||
	} else {
 | 
					 | 
				
			||||||
		if (job->num_dests > 1)
 | 
							if (job->num_dests > 1)
 | 
				
			||||||
			io_req.bi_rw |= REQ_UNPLUG;
 | 
								io_req.bi_rw |= REQ_UNPLUG;
 | 
				
			||||||
		r = dm_io(&io_req, job->num_dests, job->dests, NULL);
 | 
							r = dm_io(&io_req, job->num_dests, job->dests, NULL);
 | 
				
			||||||
		if (!(io_req.bi_rw & REQ_UNPLUG))
 | 
					 | 
				
			||||||
			prepare_unplug(job->kc, WRITE, job->dests[0].bdev);
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return r;
 | 
						return r;
 | 
				
			||||||
| 
						 | 
					@ -466,6 +431,7 @@ static void do_work(struct work_struct *work)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct dm_kcopyd_client *kc = container_of(work,
 | 
						struct dm_kcopyd_client *kc = container_of(work,
 | 
				
			||||||
					struct dm_kcopyd_client, kcopyd_work);
 | 
										struct dm_kcopyd_client, kcopyd_work);
 | 
				
			||||||
 | 
						struct blk_plug plug;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * The order that these are called is *very* important.
 | 
						 * The order that these are called is *very* important.
 | 
				
			||||||
| 
						 | 
					@ -473,18 +439,12 @@ static void do_work(struct work_struct *work)
 | 
				
			||||||
	 * Pages jobs when successful will jump onto the io jobs
 | 
						 * Pages jobs when successful will jump onto the io jobs
 | 
				
			||||||
	 * list.  io jobs call wake when they complete and it all
 | 
						 * list.  io jobs call wake when they complete and it all
 | 
				
			||||||
	 * starts again.
 | 
						 * starts again.
 | 
				
			||||||
	 *
 | 
					 | 
				
			||||||
	 * Note that io_jobs add block devices to the unplug array,
 | 
					 | 
				
			||||||
	 * this array is cleared with "unplug" calls. It is thus
 | 
					 | 
				
			||||||
	 * forbidden to run complete_jobs after io_jobs and before
 | 
					 | 
				
			||||||
	 * unplug because the block device could be destroyed in
 | 
					 | 
				
			||||||
	 * job completion callback.
 | 
					 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
 | 
						blk_start_plug(&plug);
 | 
				
			||||||
	process_jobs(&kc->complete_jobs, kc, run_complete_job);
 | 
						process_jobs(&kc->complete_jobs, kc, run_complete_job);
 | 
				
			||||||
	process_jobs(&kc->pages_jobs, kc, run_pages_job);
 | 
						process_jobs(&kc->pages_jobs, kc, run_pages_job);
 | 
				
			||||||
	process_jobs(&kc->io_jobs, kc, run_io_job);
 | 
						process_jobs(&kc->io_jobs, kc, run_io_job);
 | 
				
			||||||
	unplug(kc, READ);
 | 
						blk_finish_plug(&plug);
 | 
				
			||||||
	unplug(kc, WRITE);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -665,8 +625,6 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
 | 
				
			||||||
	INIT_LIST_HEAD(&kc->io_jobs);
 | 
						INIT_LIST_HEAD(&kc->io_jobs);
 | 
				
			||||||
	INIT_LIST_HEAD(&kc->pages_jobs);
 | 
						INIT_LIST_HEAD(&kc->pages_jobs);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	memset(kc->unplug, 0, sizeof(kc->unplug));
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
 | 
						kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
 | 
				
			||||||
	if (!kc->job_pool)
 | 
						if (!kc->job_pool)
 | 
				
			||||||
		goto bad_slab;
 | 
							goto bad_slab;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -394,7 +394,7 @@ static void raid_unplug(struct dm_target_callbacks *cb)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
 | 
						struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	md_raid5_unplug_device(rs->md.private);
 | 
						md_raid5_kick_device(rs->md.private);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -842,8 +842,6 @@ static void do_mirror(struct work_struct *work)
 | 
				
			||||||
	do_reads(ms, &reads);
 | 
						do_reads(ms, &reads);
 | 
				
			||||||
	do_writes(ms, &writes);
 | 
						do_writes(ms, &writes);
 | 
				
			||||||
	do_failures(ms, &failures);
 | 
						do_failures(ms, &failures);
 | 
				
			||||||
 | 
					 | 
				
			||||||
	dm_table_unplug_all(ms->ti->table);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*-----------------------------------------------------------------
 | 
					/*-----------------------------------------------------------------
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1275,29 +1275,6 @@ int dm_table_any_busy_target(struct dm_table *t)
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void dm_table_unplug_all(struct dm_table *t)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct dm_dev_internal *dd;
 | 
					 | 
				
			||||||
	struct list_head *devices = dm_table_get_devices(t);
 | 
					 | 
				
			||||||
	struct dm_target_callbacks *cb;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	list_for_each_entry(dd, devices, list) {
 | 
					 | 
				
			||||||
		struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
 | 
					 | 
				
			||||||
		char b[BDEVNAME_SIZE];
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (likely(q))
 | 
					 | 
				
			||||||
			blk_unplug(q);
 | 
					 | 
				
			||||||
		else
 | 
					 | 
				
			||||||
			DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
 | 
					 | 
				
			||||||
				     dm_device_name(t->md),
 | 
					 | 
				
			||||||
				     bdevname(dd->dm_dev.bdev, b));
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	list_for_each_entry(cb, &t->target_callbacks, list)
 | 
					 | 
				
			||||||
		if (cb->unplug_fn)
 | 
					 | 
				
			||||||
			cb->unplug_fn(cb);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
struct mapped_device *dm_table_get_md(struct dm_table *t)
 | 
					struct mapped_device *dm_table_get_md(struct dm_table *t)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return t->md;
 | 
						return t->md;
 | 
				
			||||||
| 
						 | 
					@ -1345,4 +1322,3 @@ EXPORT_SYMBOL(dm_table_get_mode);
 | 
				
			||||||
EXPORT_SYMBOL(dm_table_get_md);
 | 
					EXPORT_SYMBOL(dm_table_get_md);
 | 
				
			||||||
EXPORT_SYMBOL(dm_table_put);
 | 
					EXPORT_SYMBOL(dm_table_put);
 | 
				
			||||||
EXPORT_SYMBOL(dm_table_get);
 | 
					EXPORT_SYMBOL(dm_table_get);
 | 
				
			||||||
EXPORT_SYMBOL(dm_table_unplug_all);
 | 
					 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -807,8 +807,6 @@ void dm_requeue_unmapped_request(struct request *clone)
 | 
				
			||||||
	dm_unprep_request(rq);
 | 
						dm_unprep_request(rq);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock_irqsave(q->queue_lock, flags);
 | 
						spin_lock_irqsave(q->queue_lock, flags);
 | 
				
			||||||
	if (elv_queue_empty(q))
 | 
					 | 
				
			||||||
		blk_plug_device(q);
 | 
					 | 
				
			||||||
	blk_requeue_request(q, rq);
 | 
						blk_requeue_request(q, rq);
 | 
				
			||||||
	spin_unlock_irqrestore(q->queue_lock, flags);
 | 
						spin_unlock_irqrestore(q->queue_lock, flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1613,10 +1611,10 @@ static void dm_request_fn(struct request_queue *q)
 | 
				
			||||||
	 * number of in-flight I/Os after the queue is stopped in
 | 
						 * number of in-flight I/Os after the queue is stopped in
 | 
				
			||||||
	 * dm_suspend().
 | 
						 * dm_suspend().
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
 | 
						while (!blk_queue_stopped(q)) {
 | 
				
			||||||
		rq = blk_peek_request(q);
 | 
							rq = blk_peek_request(q);
 | 
				
			||||||
		if (!rq)
 | 
							if (!rq)
 | 
				
			||||||
			goto plug_and_out;
 | 
								goto delay_and_out;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* always use block 0 to find the target for flushes for now */
 | 
							/* always use block 0 to find the target for flushes for now */
 | 
				
			||||||
		pos = 0;
 | 
							pos = 0;
 | 
				
			||||||
| 
						 | 
					@ -1627,7 +1625,7 @@ static void dm_request_fn(struct request_queue *q)
 | 
				
			||||||
		BUG_ON(!dm_target_is_valid(ti));
 | 
							BUG_ON(!dm_target_is_valid(ti));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (ti->type->busy && ti->type->busy(ti))
 | 
							if (ti->type->busy && ti->type->busy(ti))
 | 
				
			||||||
			goto plug_and_out;
 | 
								goto delay_and_out;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		blk_start_request(rq);
 | 
							blk_start_request(rq);
 | 
				
			||||||
		clone = rq->special;
 | 
							clone = rq->special;
 | 
				
			||||||
| 
						 | 
					@ -1647,11 +1645,8 @@ static void dm_request_fn(struct request_queue *q)
 | 
				
			||||||
	BUG_ON(!irqs_disabled());
 | 
						BUG_ON(!irqs_disabled());
 | 
				
			||||||
	spin_lock(q->queue_lock);
 | 
						spin_lock(q->queue_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
plug_and_out:
 | 
					delay_and_out:
 | 
				
			||||||
	if (!elv_queue_empty(q))
 | 
						blk_delay_queue(q, HZ / 10);
 | 
				
			||||||
		/* Some requests still remain, retry later */
 | 
					 | 
				
			||||||
		blk_plug_device(q);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
out:
 | 
					out:
 | 
				
			||||||
	dm_table_put(map);
 | 
						dm_table_put(map);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1680,20 +1675,6 @@ static int dm_lld_busy(struct request_queue *q)
 | 
				
			||||||
	return r;
 | 
						return r;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void dm_unplug_all(struct request_queue *q)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct mapped_device *md = q->queuedata;
 | 
					 | 
				
			||||||
	struct dm_table *map = dm_get_live_table(md);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (map) {
 | 
					 | 
				
			||||||
		if (dm_request_based(md))
 | 
					 | 
				
			||||||
			generic_unplug_device(q);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		dm_table_unplug_all(map);
 | 
					 | 
				
			||||||
		dm_table_put(map);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static int dm_any_congested(void *congested_data, int bdi_bits)
 | 
					static int dm_any_congested(void *congested_data, int bdi_bits)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int r = bdi_bits;
 | 
						int r = bdi_bits;
 | 
				
			||||||
| 
						 | 
					@ -1817,7 +1798,6 @@ static void dm_init_md_queue(struct mapped_device *md)
 | 
				
			||||||
	md->queue->backing_dev_info.congested_data = md;
 | 
						md->queue->backing_dev_info.congested_data = md;
 | 
				
			||||||
	blk_queue_make_request(md->queue, dm_request);
 | 
						blk_queue_make_request(md->queue, dm_request);
 | 
				
			||||||
	blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
 | 
						blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
 | 
				
			||||||
	md->queue->unplug_fn = dm_unplug_all;
 | 
					 | 
				
			||||||
	blk_queue_merge_bvec(md->queue, dm_merge_bvec);
 | 
						blk_queue_merge_bvec(md->queue, dm_merge_bvec);
 | 
				
			||||||
	blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA);
 | 
						blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -2263,8 +2243,6 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
 | 
				
			||||||
	int r = 0;
 | 
						int r = 0;
 | 
				
			||||||
	DECLARE_WAITQUEUE(wait, current);
 | 
						DECLARE_WAITQUEUE(wait, current);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_unplug_all(md->queue);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	add_wait_queue(&md->wait, &wait);
 | 
						add_wait_queue(&md->wait, &wait);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	while (1) {
 | 
						while (1) {
 | 
				
			||||||
| 
						 | 
					@ -2539,7 +2517,6 @@ int dm_resume(struct mapped_device *md)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	clear_bit(DMF_SUSPENDED, &md->flags);
 | 
						clear_bit(DMF_SUSPENDED, &md->flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_table_unplug_all(map);
 | 
					 | 
				
			||||||
	r = 0;
 | 
						r = 0;
 | 
				
			||||||
out:
 | 
					out:
 | 
				
			||||||
	dm_table_put(map);
 | 
						dm_table_put(map);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -87,22 +87,6 @@ static int linear_mergeable_bvec(struct request_queue *q,
 | 
				
			||||||
	return maxsectors << 9;
 | 
						return maxsectors << 9;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void linear_unplug(struct request_queue *q)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	mddev_t *mddev = q->queuedata;
 | 
					 | 
				
			||||||
	linear_conf_t *conf;
 | 
					 | 
				
			||||||
	int i;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	rcu_read_lock();
 | 
					 | 
				
			||||||
	conf = rcu_dereference(mddev->private);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i=0; i < mddev->raid_disks; i++) {
 | 
					 | 
				
			||||||
		struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
 | 
					 | 
				
			||||||
		blk_unplug(r_queue);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	rcu_read_unlock();
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static int linear_congested(void *data, int bits)
 | 
					static int linear_congested(void *data, int bits)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	mddev_t *mddev = data;
 | 
						mddev_t *mddev = data;
 | 
				
			||||||
| 
						 | 
					@ -225,7 +209,6 @@ static int linear_run (mddev_t *mddev)
 | 
				
			||||||
	md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
 | 
						md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
 | 
						blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
 | 
				
			||||||
	mddev->queue->unplug_fn = linear_unplug;
 | 
					 | 
				
			||||||
	mddev->queue->backing_dev_info.congested_fn = linear_congested;
 | 
						mddev->queue->backing_dev_info.congested_fn = linear_congested;
 | 
				
			||||||
	mddev->queue->backing_dev_info.congested_data = mddev;
 | 
						mddev->queue->backing_dev_info.congested_data = mddev;
 | 
				
			||||||
	md_integrity_register(mddev);
 | 
						md_integrity_register(mddev);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -4812,7 +4812,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
 | 
				
			||||||
		__md_stop_writes(mddev);
 | 
							__md_stop_writes(mddev);
 | 
				
			||||||
		md_stop(mddev);
 | 
							md_stop(mddev);
 | 
				
			||||||
		mddev->queue->merge_bvec_fn = NULL;
 | 
							mddev->queue->merge_bvec_fn = NULL;
 | 
				
			||||||
		mddev->queue->unplug_fn = NULL;
 | 
					 | 
				
			||||||
		mddev->queue->backing_dev_info.congested_fn = NULL;
 | 
							mddev->queue->backing_dev_info.congested_fn = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* tell userspace to handle 'inactive' */
 | 
							/* tell userspace to handle 'inactive' */
 | 
				
			||||||
| 
						 | 
					@ -6669,8 +6668,6 @@ EXPORT_SYMBOL_GPL(md_allow_write);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void md_unplug(mddev_t *mddev)
 | 
					void md_unplug(mddev_t *mddev)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (mddev->queue)
 | 
					 | 
				
			||||||
		blk_unplug(mddev->queue);
 | 
					 | 
				
			||||||
	if (mddev->plug)
 | 
						if (mddev->plug)
 | 
				
			||||||
		mddev->plug->unplug_fn(mddev->plug);
 | 
							mddev->plug->unplug_fn(mddev->plug);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -6853,7 +6850,6 @@ void md_do_sync(mddev_t *mddev)
 | 
				
			||||||
		     >= mddev->resync_max - mddev->curr_resync_completed
 | 
							     >= mddev->resync_max - mddev->curr_resync_completed
 | 
				
			||||||
			    )) {
 | 
								    )) {
 | 
				
			||||||
			/* time to update curr_resync_completed */
 | 
								/* time to update curr_resync_completed */
 | 
				
			||||||
			md_unplug(mddev);
 | 
					 | 
				
			||||||
			wait_event(mddev->recovery_wait,
 | 
								wait_event(mddev->recovery_wait,
 | 
				
			||||||
				   atomic_read(&mddev->recovery_active) == 0);
 | 
									   atomic_read(&mddev->recovery_active) == 0);
 | 
				
			||||||
			mddev->curr_resync_completed = j;
 | 
								mddev->curr_resync_completed = j;
 | 
				
			||||||
| 
						 | 
					@ -6929,7 +6925,6 @@ void md_do_sync(mddev_t *mddev)
 | 
				
			||||||
		 * about not overloading the IO subsystem. (things like an
 | 
							 * about not overloading the IO subsystem. (things like an
 | 
				
			||||||
		 * e2fsck being done on the RAID array should execute fast)
 | 
							 * e2fsck being done on the RAID array should execute fast)
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		md_unplug(mddev);
 | 
					 | 
				
			||||||
		cond_resched();
 | 
							cond_resched();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
 | 
							currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
 | 
				
			||||||
| 
						 | 
					@ -6948,8 +6943,6 @@ void md_do_sync(mddev_t *mddev)
 | 
				
			||||||
	 * this also signals 'finished resyncing' to md_stop
 | 
						 * this also signals 'finished resyncing' to md_stop
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
 out:
 | 
					 out:
 | 
				
			||||||
	md_unplug(mddev);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
 | 
						wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* tell personality that we are finished */
 | 
						/* tell personality that we are finished */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -106,36 +106,6 @@ static void multipath_end_request(struct bio *bio, int error)
 | 
				
			||||||
	rdev_dec_pending(rdev, conf->mddev);
 | 
						rdev_dec_pending(rdev, conf->mddev);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void unplug_slaves(mddev_t *mddev)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	multipath_conf_t *conf = mddev->private;
 | 
					 | 
				
			||||||
	int i;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	rcu_read_lock();
 | 
					 | 
				
			||||||
	for (i=0; i<mddev->raid_disks; i++) {
 | 
					 | 
				
			||||||
		mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
 | 
					 | 
				
			||||||
		if (rdev && !test_bit(Faulty, &rdev->flags)
 | 
					 | 
				
			||||||
		    && atomic_read(&rdev->nr_pending)) {
 | 
					 | 
				
			||||||
			struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			atomic_inc(&rdev->nr_pending);
 | 
					 | 
				
			||||||
			rcu_read_unlock();
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			blk_unplug(r_queue);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			rdev_dec_pending(rdev, mddev);
 | 
					 | 
				
			||||||
			rcu_read_lock();
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	rcu_read_unlock();
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void multipath_unplug(struct request_queue *q)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	unplug_slaves(q->queuedata);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static int multipath_make_request(mddev_t *mddev, struct bio * bio)
 | 
					static int multipath_make_request(mddev_t *mddev, struct bio * bio)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	multipath_conf_t *conf = mddev->private;
 | 
						multipath_conf_t *conf = mddev->private;
 | 
				
			||||||
| 
						 | 
					@ -518,7 +488,6 @@ static int multipath_run (mddev_t *mddev)
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	md_set_array_sectors(mddev, multipath_size(mddev, 0, 0));
 | 
						md_set_array_sectors(mddev, multipath_size(mddev, 0, 0));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mddev->queue->unplug_fn = multipath_unplug;
 | 
					 | 
				
			||||||
	mddev->queue->backing_dev_info.congested_fn = multipath_congested;
 | 
						mddev->queue->backing_dev_info.congested_fn = multipath_congested;
 | 
				
			||||||
	mddev->queue->backing_dev_info.congested_data = mddev;
 | 
						mddev->queue->backing_dev_info.congested_data = mddev;
 | 
				
			||||||
	md_integrity_register(mddev);
 | 
						md_integrity_register(mddev);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -25,21 +25,6 @@
 | 
				
			||||||
#include "raid0.h"
 | 
					#include "raid0.h"
 | 
				
			||||||
#include "raid5.h"
 | 
					#include "raid5.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void raid0_unplug(struct request_queue *q)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	mddev_t *mddev = q->queuedata;
 | 
					 | 
				
			||||||
	raid0_conf_t *conf = mddev->private;
 | 
					 | 
				
			||||||
	mdk_rdev_t **devlist = conf->devlist;
 | 
					 | 
				
			||||||
	int raid_disks = conf->strip_zone[0].nb_dev;
 | 
					 | 
				
			||||||
	int i;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i=0; i < raid_disks; i++) {
 | 
					 | 
				
			||||||
		struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		blk_unplug(r_queue);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static int raid0_congested(void *data, int bits)
 | 
					static int raid0_congested(void *data, int bits)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	mddev_t *mddev = data;
 | 
						mddev_t *mddev = data;
 | 
				
			||||||
| 
						 | 
					@ -272,7 +257,6 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
 | 
				
			||||||
		       mdname(mddev),
 | 
							       mdname(mddev),
 | 
				
			||||||
		       (unsigned long long)smallest->sectors);
 | 
							       (unsigned long long)smallest->sectors);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	mddev->queue->unplug_fn = raid0_unplug;
 | 
					 | 
				
			||||||
	mddev->queue->backing_dev_info.congested_fn = raid0_congested;
 | 
						mddev->queue->backing_dev_info.congested_fn = raid0_congested;
 | 
				
			||||||
	mddev->queue->backing_dev_info.congested_data = mddev;
 | 
						mddev->queue->backing_dev_info.congested_data = mddev;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -52,23 +52,16 @@
 | 
				
			||||||
#define	NR_RAID1_BIOS 256
 | 
					#define	NR_RAID1_BIOS 256
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void unplug_slaves(mddev_t *mddev);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void allow_barrier(conf_t *conf);
 | 
					static void allow_barrier(conf_t *conf);
 | 
				
			||||||
static void lower_barrier(conf_t *conf);
 | 
					static void lower_barrier(conf_t *conf);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
 | 
					static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct pool_info *pi = data;
 | 
						struct pool_info *pi = data;
 | 
				
			||||||
	r1bio_t *r1_bio;
 | 
					 | 
				
			||||||
	int size = offsetof(r1bio_t, bios[pi->raid_disks]);
 | 
						int size = offsetof(r1bio_t, bios[pi->raid_disks]);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* allocate a r1bio with room for raid_disks entries in the bios array */
 | 
						/* allocate a r1bio with room for raid_disks entries in the bios array */
 | 
				
			||||||
	r1_bio = kzalloc(size, gfp_flags);
 | 
						return kzalloc(size, gfp_flags);
 | 
				
			||||||
	if (!r1_bio && pi->mddev)
 | 
					 | 
				
			||||||
		unplug_slaves(pi->mddev);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return r1_bio;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void r1bio_pool_free(void *r1_bio, void *data)
 | 
					static void r1bio_pool_free(void *r1_bio, void *data)
 | 
				
			||||||
| 
						 | 
					@ -91,10 +84,8 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
 | 
				
			||||||
	int i, j;
 | 
						int i, j;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r1_bio = r1bio_pool_alloc(gfp_flags, pi);
 | 
						r1_bio = r1bio_pool_alloc(gfp_flags, pi);
 | 
				
			||||||
	if (!r1_bio) {
 | 
						if (!r1_bio)
 | 
				
			||||||
		unplug_slaves(pi->mddev);
 | 
					 | 
				
			||||||
		return NULL;
 | 
							return NULL;
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Allocate bios : 1 for reading, n-1 for writing
 | 
						 * Allocate bios : 1 for reading, n-1 for writing
 | 
				
			||||||
| 
						 | 
					@ -520,37 +511,6 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
 | 
				
			||||||
	return new_disk;
 | 
						return new_disk;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void unplug_slaves(mddev_t *mddev)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	conf_t *conf = mddev->private;
 | 
					 | 
				
			||||||
	int i;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	rcu_read_lock();
 | 
					 | 
				
			||||||
	for (i=0; i<mddev->raid_disks; i++) {
 | 
					 | 
				
			||||||
		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
 | 
					 | 
				
			||||||
		if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
 | 
					 | 
				
			||||||
			struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			atomic_inc(&rdev->nr_pending);
 | 
					 | 
				
			||||||
			rcu_read_unlock();
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			blk_unplug(r_queue);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			rdev_dec_pending(rdev, mddev);
 | 
					 | 
				
			||||||
			rcu_read_lock();
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	rcu_read_unlock();
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void raid1_unplug(struct request_queue *q)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	mddev_t *mddev = q->queuedata;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	unplug_slaves(mddev);
 | 
					 | 
				
			||||||
	md_wakeup_thread(mddev->thread);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static int raid1_congested(void *data, int bits)
 | 
					static int raid1_congested(void *data, int bits)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	mddev_t *mddev = data;
 | 
						mddev_t *mddev = data;
 | 
				
			||||||
| 
						 | 
					@ -580,20 +540,16 @@ static int raid1_congested(void *data, int bits)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int flush_pending_writes(conf_t *conf)
 | 
					static void flush_pending_writes(conf_t *conf)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	/* Any writes that have been queued but are awaiting
 | 
						/* Any writes that have been queued but are awaiting
 | 
				
			||||||
	 * bitmap updates get flushed here.
 | 
						 * bitmap updates get flushed here.
 | 
				
			||||||
	 * We return 1 if any requests were actually submitted.
 | 
					 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	int rv = 0;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	spin_lock_irq(&conf->device_lock);
 | 
						spin_lock_irq(&conf->device_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (conf->pending_bio_list.head) {
 | 
						if (conf->pending_bio_list.head) {
 | 
				
			||||||
		struct bio *bio;
 | 
							struct bio *bio;
 | 
				
			||||||
		bio = bio_list_get(&conf->pending_bio_list);
 | 
							bio = bio_list_get(&conf->pending_bio_list);
 | 
				
			||||||
		blk_remove_plug(conf->mddev->queue);
 | 
					 | 
				
			||||||
		spin_unlock_irq(&conf->device_lock);
 | 
							spin_unlock_irq(&conf->device_lock);
 | 
				
			||||||
		/* flush any pending bitmap writes to
 | 
							/* flush any pending bitmap writes to
 | 
				
			||||||
		 * disk before proceeding w/ I/O */
 | 
							 * disk before proceeding w/ I/O */
 | 
				
			||||||
| 
						 | 
					@ -605,10 +561,14 @@ static int flush_pending_writes(conf_t *conf)
 | 
				
			||||||
			generic_make_request(bio);
 | 
								generic_make_request(bio);
 | 
				
			||||||
			bio = next;
 | 
								bio = next;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		rv = 1;
 | 
					 | 
				
			||||||
	} else
 | 
						} else
 | 
				
			||||||
		spin_unlock_irq(&conf->device_lock);
 | 
							spin_unlock_irq(&conf->device_lock);
 | 
				
			||||||
	return rv;
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void md_kick_device(mddev_t *mddev)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						blk_flush_plug(current);
 | 
				
			||||||
 | 
						md_wakeup_thread(mddev->thread);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Barriers....
 | 
					/* Barriers....
 | 
				
			||||||
| 
						 | 
					@ -640,8 +600,7 @@ static void raise_barrier(conf_t *conf)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Wait until no block IO is waiting */
 | 
						/* Wait until no block IO is waiting */
 | 
				
			||||||
	wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
 | 
						wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
 | 
				
			||||||
			    conf->resync_lock,
 | 
								    conf->resync_lock, md_kick_device(conf->mddev));
 | 
				
			||||||
			    raid1_unplug(conf->mddev->queue));
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* block any new IO from starting */
 | 
						/* block any new IO from starting */
 | 
				
			||||||
	conf->barrier++;
 | 
						conf->barrier++;
 | 
				
			||||||
| 
						 | 
					@ -649,8 +608,7 @@ static void raise_barrier(conf_t *conf)
 | 
				
			||||||
	/* Now wait for all pending IO to complete */
 | 
						/* Now wait for all pending IO to complete */
 | 
				
			||||||
	wait_event_lock_irq(conf->wait_barrier,
 | 
						wait_event_lock_irq(conf->wait_barrier,
 | 
				
			||||||
			    !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
 | 
								    !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
 | 
				
			||||||
			    conf->resync_lock,
 | 
								    conf->resync_lock, md_kick_device(conf->mddev));
 | 
				
			||||||
			    raid1_unplug(conf->mddev->queue));
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_unlock_irq(&conf->resync_lock);
 | 
						spin_unlock_irq(&conf->resync_lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -672,7 +630,7 @@ static void wait_barrier(conf_t *conf)
 | 
				
			||||||
		conf->nr_waiting++;
 | 
							conf->nr_waiting++;
 | 
				
			||||||
		wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
 | 
							wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
 | 
				
			||||||
				    conf->resync_lock,
 | 
									    conf->resync_lock,
 | 
				
			||||||
				    raid1_unplug(conf->mddev->queue));
 | 
									    md_kick_device(conf->mddev));
 | 
				
			||||||
		conf->nr_waiting--;
 | 
							conf->nr_waiting--;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	conf->nr_pending++;
 | 
						conf->nr_pending++;
 | 
				
			||||||
| 
						 | 
					@ -709,7 +667,7 @@ static void freeze_array(conf_t *conf)
 | 
				
			||||||
			    conf->nr_pending == conf->nr_queued+1,
 | 
								    conf->nr_pending == conf->nr_queued+1,
 | 
				
			||||||
			    conf->resync_lock,
 | 
								    conf->resync_lock,
 | 
				
			||||||
			    ({ flush_pending_writes(conf);
 | 
								    ({ flush_pending_writes(conf);
 | 
				
			||||||
			       raid1_unplug(conf->mddev->queue); }));
 | 
								       md_kick_device(conf->mddev); }));
 | 
				
			||||||
	spin_unlock_irq(&conf->resync_lock);
 | 
						spin_unlock_irq(&conf->resync_lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
static void unfreeze_array(conf_t *conf)
 | 
					static void unfreeze_array(conf_t *conf)
 | 
				
			||||||
| 
						 | 
					@ -959,7 +917,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
 | 
				
			||||||
		atomic_inc(&r1_bio->remaining);
 | 
							atomic_inc(&r1_bio->remaining);
 | 
				
			||||||
		spin_lock_irqsave(&conf->device_lock, flags);
 | 
							spin_lock_irqsave(&conf->device_lock, flags);
 | 
				
			||||||
		bio_list_add(&conf->pending_bio_list, mbio);
 | 
							bio_list_add(&conf->pending_bio_list, mbio);
 | 
				
			||||||
		blk_plug_device(mddev->queue);
 | 
					 | 
				
			||||||
		spin_unlock_irqrestore(&conf->device_lock, flags);
 | 
							spin_unlock_irqrestore(&conf->device_lock, flags);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
 | 
						r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
 | 
				
			||||||
| 
						 | 
					@ -968,7 +925,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
 | 
				
			||||||
	/* In case raid1d snuck in to freeze_array */
 | 
						/* In case raid1d snuck in to freeze_array */
 | 
				
			||||||
	wake_up(&conf->wait_barrier);
 | 
						wake_up(&conf->wait_barrier);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (do_sync)
 | 
						if (do_sync || !bitmap)
 | 
				
			||||||
		md_wakeup_thread(mddev->thread);
 | 
							md_wakeup_thread(mddev->thread);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
| 
						 | 
					@ -1558,7 +1515,6 @@ static void raid1d(mddev_t *mddev)
 | 
				
			||||||
	unsigned long flags;
 | 
						unsigned long flags;
 | 
				
			||||||
	conf_t *conf = mddev->private;
 | 
						conf_t *conf = mddev->private;
 | 
				
			||||||
	struct list_head *head = &conf->retry_list;
 | 
						struct list_head *head = &conf->retry_list;
 | 
				
			||||||
	int unplug=0;
 | 
					 | 
				
			||||||
	mdk_rdev_t *rdev;
 | 
						mdk_rdev_t *rdev;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	md_check_recovery(mddev);
 | 
						md_check_recovery(mddev);
 | 
				
			||||||
| 
						 | 
					@ -1566,7 +1522,7 @@ static void raid1d(mddev_t *mddev)
 | 
				
			||||||
	for (;;) {
 | 
						for (;;) {
 | 
				
			||||||
		char b[BDEVNAME_SIZE];
 | 
							char b[BDEVNAME_SIZE];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		unplug += flush_pending_writes(conf);
 | 
							flush_pending_writes(conf);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		spin_lock_irqsave(&conf->device_lock, flags);
 | 
							spin_lock_irqsave(&conf->device_lock, flags);
 | 
				
			||||||
		if (list_empty(head)) {
 | 
							if (list_empty(head)) {
 | 
				
			||||||
| 
						 | 
					@ -1580,10 +1536,9 @@ static void raid1d(mddev_t *mddev)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		mddev = r1_bio->mddev;
 | 
							mddev = r1_bio->mddev;
 | 
				
			||||||
		conf = mddev->private;
 | 
							conf = mddev->private;
 | 
				
			||||||
		if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
 | 
							if (test_bit(R1BIO_IsSync, &r1_bio->state))
 | 
				
			||||||
			sync_request_write(mddev, r1_bio);
 | 
								sync_request_write(mddev, r1_bio);
 | 
				
			||||||
			unplug = 1;
 | 
							else {
 | 
				
			||||||
		} else {
 | 
					 | 
				
			||||||
			int disk;
 | 
								int disk;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			/* we got a read error. Maybe the drive is bad.  Maybe just
 | 
								/* we got a read error. Maybe the drive is bad.  Maybe just
 | 
				
			||||||
| 
						 | 
					@ -1633,14 +1588,11 @@ static void raid1d(mddev_t *mddev)
 | 
				
			||||||
				bio->bi_end_io = raid1_end_read_request;
 | 
									bio->bi_end_io = raid1_end_read_request;
 | 
				
			||||||
				bio->bi_rw = READ | do_sync;
 | 
									bio->bi_rw = READ | do_sync;
 | 
				
			||||||
				bio->bi_private = r1_bio;
 | 
									bio->bi_private = r1_bio;
 | 
				
			||||||
				unplug = 1;
 | 
					 | 
				
			||||||
				generic_make_request(bio);
 | 
									generic_make_request(bio);
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		cond_resched();
 | 
							cond_resched();
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if (unplug)
 | 
					 | 
				
			||||||
		unplug_slaves(mddev);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2064,7 +2016,6 @@ static int run(mddev_t *mddev)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
 | 
						md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mddev->queue->unplug_fn = raid1_unplug;
 | 
					 | 
				
			||||||
	mddev->queue->backing_dev_info.congested_fn = raid1_congested;
 | 
						mddev->queue->backing_dev_info.congested_fn = raid1_congested;
 | 
				
			||||||
	mddev->queue->backing_dev_info.congested_data = mddev;
 | 
						mddev->queue->backing_dev_info.congested_data = mddev;
 | 
				
			||||||
	md_integrity_register(mddev);
 | 
						md_integrity_register(mddev);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -57,23 +57,16 @@
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
#define	NR_RAID10_BIOS 256
 | 
					#define	NR_RAID10_BIOS 256
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void unplug_slaves(mddev_t *mddev);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void allow_barrier(conf_t *conf);
 | 
					static void allow_barrier(conf_t *conf);
 | 
				
			||||||
static void lower_barrier(conf_t *conf);
 | 
					static void lower_barrier(conf_t *conf);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
 | 
					static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	conf_t *conf = data;
 | 
						conf_t *conf = data;
 | 
				
			||||||
	r10bio_t *r10_bio;
 | 
					 | 
				
			||||||
	int size = offsetof(struct r10bio_s, devs[conf->copies]);
 | 
						int size = offsetof(struct r10bio_s, devs[conf->copies]);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* allocate a r10bio with room for raid_disks entries in the bios array */
 | 
						/* allocate a r10bio with room for raid_disks entries in the bios array */
 | 
				
			||||||
	r10_bio = kzalloc(size, gfp_flags);
 | 
						return kzalloc(size, gfp_flags);
 | 
				
			||||||
	if (!r10_bio && conf->mddev)
 | 
					 | 
				
			||||||
		unplug_slaves(conf->mddev);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return r10_bio;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void r10bio_pool_free(void *r10_bio, void *data)
 | 
					static void r10bio_pool_free(void *r10_bio, void *data)
 | 
				
			||||||
| 
						 | 
					@ -106,10 +99,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
 | 
				
			||||||
	int nalloc;
 | 
						int nalloc;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r10_bio = r10bio_pool_alloc(gfp_flags, conf);
 | 
						r10_bio = r10bio_pool_alloc(gfp_flags, conf);
 | 
				
			||||||
	if (!r10_bio) {
 | 
						if (!r10_bio)
 | 
				
			||||||
		unplug_slaves(conf->mddev);
 | 
					 | 
				
			||||||
		return NULL;
 | 
							return NULL;
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
 | 
						if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
 | 
				
			||||||
		nalloc = conf->copies; /* resync */
 | 
							nalloc = conf->copies; /* resync */
 | 
				
			||||||
| 
						 | 
					@ -597,37 +588,6 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
 | 
				
			||||||
	return disk;
 | 
						return disk;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void unplug_slaves(mddev_t *mddev)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	conf_t *conf = mddev->private;
 | 
					 | 
				
			||||||
	int i;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	rcu_read_lock();
 | 
					 | 
				
			||||||
	for (i=0; i < conf->raid_disks; i++) {
 | 
					 | 
				
			||||||
		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
 | 
					 | 
				
			||||||
		if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
 | 
					 | 
				
			||||||
			struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			atomic_inc(&rdev->nr_pending);
 | 
					 | 
				
			||||||
			rcu_read_unlock();
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			blk_unplug(r_queue);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			rdev_dec_pending(rdev, mddev);
 | 
					 | 
				
			||||||
			rcu_read_lock();
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	rcu_read_unlock();
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void raid10_unplug(struct request_queue *q)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	mddev_t *mddev = q->queuedata;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	unplug_slaves(q->queuedata);
 | 
					 | 
				
			||||||
	md_wakeup_thread(mddev->thread);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static int raid10_congested(void *data, int bits)
 | 
					static int raid10_congested(void *data, int bits)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	mddev_t *mddev = data;
 | 
						mddev_t *mddev = data;
 | 
				
			||||||
| 
						 | 
					@ -649,20 +609,16 @@ static int raid10_congested(void *data, int bits)
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int flush_pending_writes(conf_t *conf)
 | 
					static void flush_pending_writes(conf_t *conf)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	/* Any writes that have been queued but are awaiting
 | 
						/* Any writes that have been queued but are awaiting
 | 
				
			||||||
	 * bitmap updates get flushed here.
 | 
						 * bitmap updates get flushed here.
 | 
				
			||||||
	 * We return 1 if any requests were actually submitted.
 | 
					 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	int rv = 0;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	spin_lock_irq(&conf->device_lock);
 | 
						spin_lock_irq(&conf->device_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (conf->pending_bio_list.head) {
 | 
						if (conf->pending_bio_list.head) {
 | 
				
			||||||
		struct bio *bio;
 | 
							struct bio *bio;
 | 
				
			||||||
		bio = bio_list_get(&conf->pending_bio_list);
 | 
							bio = bio_list_get(&conf->pending_bio_list);
 | 
				
			||||||
		blk_remove_plug(conf->mddev->queue);
 | 
					 | 
				
			||||||
		spin_unlock_irq(&conf->device_lock);
 | 
							spin_unlock_irq(&conf->device_lock);
 | 
				
			||||||
		/* flush any pending bitmap writes to disk
 | 
							/* flush any pending bitmap writes to disk
 | 
				
			||||||
		 * before proceeding w/ I/O */
 | 
							 * before proceeding w/ I/O */
 | 
				
			||||||
| 
						 | 
					@ -674,11 +630,16 @@ static int flush_pending_writes(conf_t *conf)
 | 
				
			||||||
			generic_make_request(bio);
 | 
								generic_make_request(bio);
 | 
				
			||||||
			bio = next;
 | 
								bio = next;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		rv = 1;
 | 
					 | 
				
			||||||
	} else
 | 
						} else
 | 
				
			||||||
		spin_unlock_irq(&conf->device_lock);
 | 
							spin_unlock_irq(&conf->device_lock);
 | 
				
			||||||
	return rv;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void md_kick_device(mddev_t *mddev)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						blk_flush_plug(current);
 | 
				
			||||||
 | 
						md_wakeup_thread(mddev->thread);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Barriers....
 | 
					/* Barriers....
 | 
				
			||||||
 * Sometimes we need to suspend IO while we do something else,
 | 
					 * Sometimes we need to suspend IO while we do something else,
 | 
				
			||||||
 * either some resync/recovery, or reconfigure the array.
 | 
					 * either some resync/recovery, or reconfigure the array.
 | 
				
			||||||
| 
						 | 
					@ -708,8 +669,7 @@ static void raise_barrier(conf_t *conf, int force)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Wait until no block IO is waiting (unless 'force') */
 | 
						/* Wait until no block IO is waiting (unless 'force') */
 | 
				
			||||||
	wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
 | 
						wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
 | 
				
			||||||
			    conf->resync_lock,
 | 
								    conf->resync_lock, md_kick_device(conf->mddev));
 | 
				
			||||||
			    raid10_unplug(conf->mddev->queue));
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* block any new IO from starting */
 | 
						/* block any new IO from starting */
 | 
				
			||||||
	conf->barrier++;
 | 
						conf->barrier++;
 | 
				
			||||||
| 
						 | 
					@ -717,8 +677,7 @@ static void raise_barrier(conf_t *conf, int force)
 | 
				
			||||||
	/* No wait for all pending IO to complete */
 | 
						/* No wait for all pending IO to complete */
 | 
				
			||||||
	wait_event_lock_irq(conf->wait_barrier,
 | 
						wait_event_lock_irq(conf->wait_barrier,
 | 
				
			||||||
			    !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
 | 
								    !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
 | 
				
			||||||
			    conf->resync_lock,
 | 
								    conf->resync_lock, md_kick_device(conf->mddev));
 | 
				
			||||||
			    raid10_unplug(conf->mddev->queue));
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_unlock_irq(&conf->resync_lock);
 | 
						spin_unlock_irq(&conf->resync_lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -739,7 +698,7 @@ static void wait_barrier(conf_t *conf)
 | 
				
			||||||
		conf->nr_waiting++;
 | 
							conf->nr_waiting++;
 | 
				
			||||||
		wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
 | 
							wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
 | 
				
			||||||
				    conf->resync_lock,
 | 
									    conf->resync_lock,
 | 
				
			||||||
				    raid10_unplug(conf->mddev->queue));
 | 
									    md_kick_device(conf->mddev));
 | 
				
			||||||
		conf->nr_waiting--;
 | 
							conf->nr_waiting--;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	conf->nr_pending++;
 | 
						conf->nr_pending++;
 | 
				
			||||||
| 
						 | 
					@ -776,7 +735,7 @@ static void freeze_array(conf_t *conf)
 | 
				
			||||||
			    conf->nr_pending == conf->nr_queued+1,
 | 
								    conf->nr_pending == conf->nr_queued+1,
 | 
				
			||||||
			    conf->resync_lock,
 | 
								    conf->resync_lock,
 | 
				
			||||||
			    ({ flush_pending_writes(conf);
 | 
								    ({ flush_pending_writes(conf);
 | 
				
			||||||
			       raid10_unplug(conf->mddev->queue); }));
 | 
								       md_kick_device(conf->mddev); }));
 | 
				
			||||||
	spin_unlock_irq(&conf->resync_lock);
 | 
						spin_unlock_irq(&conf->resync_lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -971,7 +930,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
 | 
				
			||||||
		atomic_inc(&r10_bio->remaining);
 | 
							atomic_inc(&r10_bio->remaining);
 | 
				
			||||||
		spin_lock_irqsave(&conf->device_lock, flags);
 | 
							spin_lock_irqsave(&conf->device_lock, flags);
 | 
				
			||||||
		bio_list_add(&conf->pending_bio_list, mbio);
 | 
							bio_list_add(&conf->pending_bio_list, mbio);
 | 
				
			||||||
		blk_plug_device(mddev->queue);
 | 
					 | 
				
			||||||
		spin_unlock_irqrestore(&conf->device_lock, flags);
 | 
							spin_unlock_irqrestore(&conf->device_lock, flags);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -988,7 +946,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
 | 
				
			||||||
	/* In case raid10d snuck in to freeze_array */
 | 
						/* In case raid10d snuck in to freeze_array */
 | 
				
			||||||
	wake_up(&conf->wait_barrier);
 | 
						wake_up(&conf->wait_barrier);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (do_sync)
 | 
						if (do_sync || !mddev->bitmap)
 | 
				
			||||||
		md_wakeup_thread(mddev->thread);
 | 
							md_wakeup_thread(mddev->thread);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
| 
						 | 
					@ -1681,7 +1639,6 @@ static void raid10d(mddev_t *mddev)
 | 
				
			||||||
	unsigned long flags;
 | 
						unsigned long flags;
 | 
				
			||||||
	conf_t *conf = mddev->private;
 | 
						conf_t *conf = mddev->private;
 | 
				
			||||||
	struct list_head *head = &conf->retry_list;
 | 
						struct list_head *head = &conf->retry_list;
 | 
				
			||||||
	int unplug=0;
 | 
					 | 
				
			||||||
	mdk_rdev_t *rdev;
 | 
						mdk_rdev_t *rdev;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	md_check_recovery(mddev);
 | 
						md_check_recovery(mddev);
 | 
				
			||||||
| 
						 | 
					@ -1689,7 +1646,7 @@ static void raid10d(mddev_t *mddev)
 | 
				
			||||||
	for (;;) {
 | 
						for (;;) {
 | 
				
			||||||
		char b[BDEVNAME_SIZE];
 | 
							char b[BDEVNAME_SIZE];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		unplug += flush_pending_writes(conf);
 | 
							flush_pending_writes(conf);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		spin_lock_irqsave(&conf->device_lock, flags);
 | 
							spin_lock_irqsave(&conf->device_lock, flags);
 | 
				
			||||||
		if (list_empty(head)) {
 | 
							if (list_empty(head)) {
 | 
				
			||||||
| 
						 | 
					@ -1703,13 +1660,11 @@ static void raid10d(mddev_t *mddev)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		mddev = r10_bio->mddev;
 | 
							mddev = r10_bio->mddev;
 | 
				
			||||||
		conf = mddev->private;
 | 
							conf = mddev->private;
 | 
				
			||||||
		if (test_bit(R10BIO_IsSync, &r10_bio->state)) {
 | 
							if (test_bit(R10BIO_IsSync, &r10_bio->state))
 | 
				
			||||||
			sync_request_write(mddev, r10_bio);
 | 
								sync_request_write(mddev, r10_bio);
 | 
				
			||||||
			unplug = 1;
 | 
							else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
 | 
				
			||||||
		} else 	if (test_bit(R10BIO_IsRecover, &r10_bio->state)) {
 | 
					 | 
				
			||||||
			recovery_request_write(mddev, r10_bio);
 | 
								recovery_request_write(mddev, r10_bio);
 | 
				
			||||||
			unplug = 1;
 | 
							else {
 | 
				
			||||||
		} else {
 | 
					 | 
				
			||||||
			int mirror;
 | 
								int mirror;
 | 
				
			||||||
			/* we got a read error. Maybe the drive is bad.  Maybe just
 | 
								/* we got a read error. Maybe the drive is bad.  Maybe just
 | 
				
			||||||
			 * the block and we can fix it.
 | 
								 * the block and we can fix it.
 | 
				
			||||||
| 
						 | 
					@ -1756,14 +1711,11 @@ static void raid10d(mddev_t *mddev)
 | 
				
			||||||
				bio->bi_rw = READ | do_sync;
 | 
									bio->bi_rw = READ | do_sync;
 | 
				
			||||||
				bio->bi_private = r10_bio;
 | 
									bio->bi_private = r10_bio;
 | 
				
			||||||
				bio->bi_end_io = raid10_end_read_request;
 | 
									bio->bi_end_io = raid10_end_read_request;
 | 
				
			||||||
				unplug = 1;
 | 
					 | 
				
			||||||
				generic_make_request(bio);
 | 
									generic_make_request(bio);
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		cond_resched();
 | 
							cond_resched();
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if (unplug)
 | 
					 | 
				
			||||||
		unplug_slaves(mddev);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2376,7 +2328,6 @@ static int run(mddev_t *mddev)
 | 
				
			||||||
	md_set_array_sectors(mddev, size);
 | 
						md_set_array_sectors(mddev, size);
 | 
				
			||||||
	mddev->resync_max_sectors = size;
 | 
						mddev->resync_max_sectors = size;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mddev->queue->unplug_fn = raid10_unplug;
 | 
					 | 
				
			||||||
	mddev->queue->backing_dev_info.congested_fn = raid10_congested;
 | 
						mddev->queue->backing_dev_info.congested_fn = raid10_congested;
 | 
				
			||||||
	mddev->queue->backing_dev_info.congested_data = mddev;
 | 
						mddev->queue->backing_dev_info.congested_data = mddev;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -433,8 +433,6 @@ static int has_failed(raid5_conf_t *conf)
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void unplug_slaves(mddev_t *mddev);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static struct stripe_head *
 | 
					static struct stripe_head *
 | 
				
			||||||
get_active_stripe(raid5_conf_t *conf, sector_t sector,
 | 
					get_active_stripe(raid5_conf_t *conf, sector_t sector,
 | 
				
			||||||
		  int previous, int noblock, int noquiesce)
 | 
							  int previous, int noblock, int noquiesce)
 | 
				
			||||||
| 
						 | 
					@ -463,8 +461,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
 | 
				
			||||||
						     < (conf->max_nr_stripes *3/4)
 | 
											     < (conf->max_nr_stripes *3/4)
 | 
				
			||||||
						     || !conf->inactive_blocked),
 | 
											     || !conf->inactive_blocked),
 | 
				
			||||||
						    conf->device_lock,
 | 
											    conf->device_lock,
 | 
				
			||||||
						    md_raid5_unplug_device(conf)
 | 
											    md_raid5_kick_device(conf));
 | 
				
			||||||
					);
 | 
					 | 
				
			||||||
				conf->inactive_blocked = 0;
 | 
									conf->inactive_blocked = 0;
 | 
				
			||||||
			} else
 | 
								} else
 | 
				
			||||||
				init_stripe(sh, sector, previous);
 | 
									init_stripe(sh, sector, previous);
 | 
				
			||||||
| 
						 | 
					@ -1473,8 +1470,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
 | 
				
			||||||
		wait_event_lock_irq(conf->wait_for_stripe,
 | 
							wait_event_lock_irq(conf->wait_for_stripe,
 | 
				
			||||||
				    !list_empty(&conf->inactive_list),
 | 
									    !list_empty(&conf->inactive_list),
 | 
				
			||||||
				    conf->device_lock,
 | 
									    conf->device_lock,
 | 
				
			||||||
				    unplug_slaves(conf->mddev)
 | 
									    blk_flush_plug(current));
 | 
				
			||||||
			);
 | 
					 | 
				
			||||||
		osh = get_free_stripe(conf);
 | 
							osh = get_free_stripe(conf);
 | 
				
			||||||
		spin_unlock_irq(&conf->device_lock);
 | 
							spin_unlock_irq(&conf->device_lock);
 | 
				
			||||||
		atomic_set(&nsh->count, 1);
 | 
							atomic_set(&nsh->count, 1);
 | 
				
			||||||
| 
						 | 
					@ -3645,58 +3641,19 @@ static void activate_bit_delay(raid5_conf_t *conf)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void unplug_slaves(mddev_t *mddev)
 | 
					void md_raid5_kick_device(raid5_conf_t *conf)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	raid5_conf_t *conf = mddev->private;
 | 
						blk_flush_plug(current);
 | 
				
			||||||
	int i;
 | 
					 | 
				
			||||||
	int devs = max(conf->raid_disks, conf->previous_raid_disks);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	rcu_read_lock();
 | 
					 | 
				
			||||||
	for (i = 0; i < devs; i++) {
 | 
					 | 
				
			||||||
		mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
 | 
					 | 
				
			||||||
		if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
 | 
					 | 
				
			||||||
			struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			atomic_inc(&rdev->nr_pending);
 | 
					 | 
				
			||||||
			rcu_read_unlock();
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			blk_unplug(r_queue);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			rdev_dec_pending(rdev, mddev);
 | 
					 | 
				
			||||||
			rcu_read_lock();
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	rcu_read_unlock();
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void md_raid5_unplug_device(raid5_conf_t *conf)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	unsigned long flags;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	spin_lock_irqsave(&conf->device_lock, flags);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (plugger_remove_plug(&conf->plug)) {
 | 
					 | 
				
			||||||
		conf->seq_flush++;
 | 
					 | 
				
			||||||
	raid5_activate_delayed(conf);
 | 
						raid5_activate_delayed(conf);
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	md_wakeup_thread(conf->mddev->thread);
 | 
						md_wakeup_thread(conf->mddev->thread);
 | 
				
			||||||
 | 
					 | 
				
			||||||
	spin_unlock_irqrestore(&conf->device_lock, flags);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	unplug_slaves(conf->mddev);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(md_raid5_unplug_device);
 | 
					EXPORT_SYMBOL_GPL(md_raid5_kick_device);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void raid5_unplug(struct plug_handle *plug)
 | 
					static void raid5_unplug(struct plug_handle *plug)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
 | 
						raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
 | 
				
			||||||
	md_raid5_unplug_device(conf);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void raid5_unplug_queue(struct request_queue *q)
 | 
						md_raid5_kick_device(conf);
 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	mddev_t *mddev = q->queuedata;
 | 
					 | 
				
			||||||
	md_raid5_unplug_device(mddev->private);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int md_raid5_congested(mddev_t *mddev, int bits)
 | 
					int md_raid5_congested(mddev_t *mddev, int bits)
 | 
				
			||||||
| 
						 | 
					@ -4100,7 +4057,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
 | 
				
			||||||
				 * add failed due to overlap.  Flush everything
 | 
									 * add failed due to overlap.  Flush everything
 | 
				
			||||||
				 * and wait a while
 | 
									 * and wait a while
 | 
				
			||||||
				 */
 | 
									 */
 | 
				
			||||||
				md_raid5_unplug_device(conf);
 | 
									md_raid5_kick_device(conf);
 | 
				
			||||||
				release_stripe(sh);
 | 
									release_stripe(sh);
 | 
				
			||||||
				schedule();
 | 
									schedule();
 | 
				
			||||||
				goto retry;
 | 
									goto retry;
 | 
				
			||||||
| 
						 | 
					@ -4365,7 +4322,6 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (sector_nr >= max_sector) {
 | 
						if (sector_nr >= max_sector) {
 | 
				
			||||||
		/* just being told to finish up .. nothing much to do */
 | 
							/* just being told to finish up .. nothing much to do */
 | 
				
			||||||
		unplug_slaves(mddev);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
 | 
							if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
 | 
				
			||||||
			end_reshape(conf);
 | 
								end_reshape(conf);
 | 
				
			||||||
| 
						 | 
					@ -4569,7 +4525,6 @@ static void raid5d(mddev_t *mddev)
 | 
				
			||||||
	spin_unlock_irq(&conf->device_lock);
 | 
						spin_unlock_irq(&conf->device_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	async_tx_issue_pending_all();
 | 
						async_tx_issue_pending_all();
 | 
				
			||||||
	unplug_slaves(mddev);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pr_debug("--- raid5d inactive\n");
 | 
						pr_debug("--- raid5d inactive\n");
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -5205,7 +5160,6 @@ static int run(mddev_t *mddev)
 | 
				
			||||||
		mddev->queue->backing_dev_info.congested_data = mddev;
 | 
							mddev->queue->backing_dev_info.congested_data = mddev;
 | 
				
			||||||
		mddev->queue->backing_dev_info.congested_fn = raid5_congested;
 | 
							mddev->queue->backing_dev_info.congested_fn = raid5_congested;
 | 
				
			||||||
		mddev->queue->queue_lock = &conf->device_lock;
 | 
							mddev->queue->queue_lock = &conf->device_lock;
 | 
				
			||||||
		mddev->queue->unplug_fn = raid5_unplug_queue;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		chunk_size = mddev->chunk_sectors << 9;
 | 
							chunk_size = mddev->chunk_sectors << 9;
 | 
				
			||||||
		blk_queue_io_min(mddev->queue, chunk_size);
 | 
							blk_queue_io_min(mddev->queue, chunk_size);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -503,6 +503,6 @@ static inline int algorithm_is_DDF(int layout)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern int md_raid5_congested(mddev_t *mddev, int bits);
 | 
					extern int md_raid5_congested(mddev_t *mddev, int bits);
 | 
				
			||||||
extern void md_raid5_unplug_device(raid5_conf_t *conf);
 | 
					extern void md_raid5_kick_device(raid5_conf_t *conf);
 | 
				
			||||||
extern int raid5_set_cache_size(mddev_t *mddev, int size);
 | 
					extern int raid5_set_cache_size(mddev_t *mddev, int size);
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -895,11 +895,7 @@ static void i2o_block_request_fn(struct request_queue *q)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct request *req;
 | 
						struct request *req;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	while (!blk_queue_plugged(q)) {
 | 
						while ((req = blk_peek_request(q)) != NULL) {
 | 
				
			||||||
		req = blk_peek_request(q);
 | 
					 | 
				
			||||||
		if (!req)
 | 
					 | 
				
			||||||
			break;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (req->cmd_type == REQ_TYPE_FS) {
 | 
							if (req->cmd_type == REQ_TYPE_FS) {
 | 
				
			||||||
			struct i2o_block_delayed_request *dreq;
 | 
								struct i2o_block_delayed_request *dreq;
 | 
				
			||||||
			struct i2o_block_request *ireq = req->special;
 | 
								struct i2o_block_request *ireq = req->special;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -55,7 +55,6 @@ static int mmc_queue_thread(void *d)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		spin_lock_irq(q->queue_lock);
 | 
							spin_lock_irq(q->queue_lock);
 | 
				
			||||||
		set_current_state(TASK_INTERRUPTIBLE);
 | 
							set_current_state(TASK_INTERRUPTIBLE);
 | 
				
			||||||
		if (!blk_queue_plugged(q))
 | 
					 | 
				
			||||||
		req = blk_fetch_request(q);
 | 
							req = blk_fetch_request(q);
 | 
				
			||||||
		mq->req = req;
 | 
							mq->req = req;
 | 
				
			||||||
		spin_unlock_irq(q->queue_lock);
 | 
							spin_unlock_irq(q->queue_lock);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1917,7 +1917,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	/* Now we try to fetch requests from the request queue */
 | 
						/* Now we try to fetch requests from the request queue */
 | 
				
			||||||
	while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
 | 
						while ((req = blk_peek_request(queue))) {
 | 
				
			||||||
		if (basedev->features & DASD_FEATURE_READONLY &&
 | 
							if (basedev->features & DASD_FEATURE_READONLY &&
 | 
				
			||||||
		    rq_data_dir(req) == WRITE) {
 | 
							    rq_data_dir(req) == WRITE) {
 | 
				
			||||||
			DBF_DEV_EVENT(DBF_ERR, basedev,
 | 
								DBF_DEV_EVENT(DBF_ERR, basedev,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -161,7 +161,6 @@ tapeblock_requeue(struct work_struct *work) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock_irq(&device->blk_data.request_queue_lock);
 | 
						spin_lock_irq(&device->blk_data.request_queue_lock);
 | 
				
			||||||
	while (
 | 
						while (
 | 
				
			||||||
		!blk_queue_plugged(queue) &&
 | 
					 | 
				
			||||||
		blk_peek_request(queue) &&
 | 
							blk_peek_request(queue) &&
 | 
				
			||||||
		nr_queued < TAPEBLOCK_MIN_REQUEUE
 | 
							nr_queued < TAPEBLOCK_MIN_REQUEUE
 | 
				
			||||||
	) {
 | 
						) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3913,7 +3913,7 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
 | 
				
			||||||
	if (!get_device(dev))
 | 
						if (!get_device(dev))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	while (!blk_queue_plugged(q)) {
 | 
						while (1) {
 | 
				
			||||||
		if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
 | 
							if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
 | 
				
			||||||
		    !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
 | 
							    !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -173,11 +173,7 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
	int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
 | 
						int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	while (!blk_queue_plugged(q)) {
 | 
						while ((req = blk_fetch_request(q)) != NULL) {
 | 
				
			||||||
		req = blk_fetch_request(q);
 | 
					 | 
				
			||||||
		if (!req)
 | 
					 | 
				
			||||||
			break;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		spin_unlock_irq(q->queue_lock);
 | 
							spin_unlock_irq(q->queue_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		handler = to_sas_internal(shost->transportt)->f->smp_handler;
 | 
							handler = to_sas_internal(shost->transportt)->f->smp_handler;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -392,9 +392,8 @@ static int iblock_do_task(struct se_task *task)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct se_device *dev = task->task_se_cmd->se_dev;
 | 
						struct se_device *dev = task->task_se_cmd->se_dev;
 | 
				
			||||||
	struct iblock_req *req = IBLOCK_REQ(task);
 | 
						struct iblock_req *req = IBLOCK_REQ(task);
 | 
				
			||||||
	struct iblock_dev *ibd = (struct iblock_dev *)req->ib_dev;
 | 
					 | 
				
			||||||
	struct request_queue *q = bdev_get_queue(ibd->ibd_bd);
 | 
					 | 
				
			||||||
	struct bio *bio = req->ib_bio, *nbio = NULL;
 | 
						struct bio *bio = req->ib_bio, *nbio = NULL;
 | 
				
			||||||
 | 
						struct blk_plug plug;
 | 
				
			||||||
	int rw;
 | 
						int rw;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (task->task_data_direction == DMA_TO_DEVICE) {
 | 
						if (task->task_data_direction == DMA_TO_DEVICE) {
 | 
				
			||||||
| 
						 | 
					@ -412,6 +411,7 @@ static int iblock_do_task(struct se_task *task)
 | 
				
			||||||
		rw = READ;
 | 
							rw = READ;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						blk_start_plug(&plug);
 | 
				
			||||||
	while (bio) {
 | 
						while (bio) {
 | 
				
			||||||
		nbio = bio->bi_next;
 | 
							nbio = bio->bi_next;
 | 
				
			||||||
		bio->bi_next = NULL;
 | 
							bio->bi_next = NULL;
 | 
				
			||||||
| 
						 | 
					@ -421,9 +421,8 @@ static int iblock_do_task(struct se_task *task)
 | 
				
			||||||
		submit_bio(rw, bio);
 | 
							submit_bio(rw, bio);
 | 
				
			||||||
		bio = nbio;
 | 
							bio = nbio;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						blk_finish_plug(&plug);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (q->unplug_fn)
 | 
					 | 
				
			||||||
		q->unplug_fn(q);
 | 
					 | 
				
			||||||
	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
 | 
						return PYX_TRANSPORT_SENT_TO_TRANSPORT;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -73,7 +73,6 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
 | 
				
			||||||
static const struct address_space_operations adfs_aops = {
 | 
					static const struct address_space_operations adfs_aops = {
 | 
				
			||||||
	.readpage	= adfs_readpage,
 | 
						.readpage	= adfs_readpage,
 | 
				
			||||||
	.writepage	= adfs_writepage,
 | 
						.writepage	= adfs_writepage,
 | 
				
			||||||
	.sync_page	= block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin	= adfs_write_begin,
 | 
						.write_begin	= adfs_write_begin,
 | 
				
			||||||
	.write_end	= generic_write_end,
 | 
						.write_end	= generic_write_end,
 | 
				
			||||||
	.bmap		= _adfs_bmap
 | 
						.bmap		= _adfs_bmap
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -429,7 +429,6 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
 | 
				
			||||||
const struct address_space_operations affs_aops = {
 | 
					const struct address_space_operations affs_aops = {
 | 
				
			||||||
	.readpage = affs_readpage,
 | 
						.readpage = affs_readpage,
 | 
				
			||||||
	.writepage = affs_writepage,
 | 
						.writepage = affs_writepage,
 | 
				
			||||||
	.sync_page = block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin = affs_write_begin,
 | 
						.write_begin = affs_write_begin,
 | 
				
			||||||
	.write_end = generic_write_end,
 | 
						.write_end = generic_write_end,
 | 
				
			||||||
	.bmap = _affs_bmap
 | 
						.bmap = _affs_bmap
 | 
				
			||||||
| 
						 | 
					@ -786,7 +785,6 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
 | 
				
			||||||
const struct address_space_operations affs_aops_ofs = {
 | 
					const struct address_space_operations affs_aops_ofs = {
 | 
				
			||||||
	.readpage = affs_readpage_ofs,
 | 
						.readpage = affs_readpage_ofs,
 | 
				
			||||||
	//.writepage = affs_writepage_ofs,
 | 
						//.writepage = affs_writepage_ofs,
 | 
				
			||||||
	//.sync_page = affs_sync_page_ofs,
 | 
					 | 
				
			||||||
	.write_begin = affs_write_begin_ofs,
 | 
						.write_begin = affs_write_begin_ofs,
 | 
				
			||||||
	.write_end = affs_write_end_ofs
 | 
						.write_end = affs_write_end_ofs
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										4
									
								
								fs/aio.c
									
									
									
									
									
								
							
							
						
						
									
										4
									
								
								fs/aio.c
									
									
									
									
									
								
							| 
						 | 
					@ -1550,9 +1550,11 @@ static void aio_batch_free(struct hlist_head *batch_hash)
 | 
				
			||||||
	struct hlist_node *pos, *n;
 | 
						struct hlist_node *pos, *n;
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * TODO: kill this
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
	for (i = 0; i < AIO_BATCH_HASH_SIZE; i++) {
 | 
						for (i = 0; i < AIO_BATCH_HASH_SIZE; i++) {
 | 
				
			||||||
		hlist_for_each_entry_safe(abe, pos, n, &batch_hash[i], list) {
 | 
							hlist_for_each_entry_safe(abe, pos, n, &batch_hash[i], list) {
 | 
				
			||||||
			blk_run_address_space(abe->mapping);
 | 
					 | 
				
			||||||
			iput(abe->mapping->host);
 | 
								iput(abe->mapping->host);
 | 
				
			||||||
			hlist_del(&abe->list);
 | 
								hlist_del(&abe->list);
 | 
				
			||||||
			mempool_free(abe, abe_pool);
 | 
								mempool_free(abe, abe_pool);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -75,7 +75,6 @@ static const struct inode_operations befs_dir_inode_operations = {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static const struct address_space_operations befs_aops = {
 | 
					static const struct address_space_operations befs_aops = {
 | 
				
			||||||
	.readpage	= befs_readpage,
 | 
						.readpage	= befs_readpage,
 | 
				
			||||||
	.sync_page	= block_sync_page,
 | 
					 | 
				
			||||||
	.bmap		= befs_bmap,
 | 
						.bmap		= befs_bmap,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -186,7 +186,6 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
 | 
				
			||||||
const struct address_space_operations bfs_aops = {
 | 
					const struct address_space_operations bfs_aops = {
 | 
				
			||||||
	.readpage	= bfs_readpage,
 | 
						.readpage	= bfs_readpage,
 | 
				
			||||||
	.writepage	= bfs_writepage,
 | 
						.writepage	= bfs_writepage,
 | 
				
			||||||
	.sync_page	= block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin	= bfs_write_begin,
 | 
						.write_begin	= bfs_write_begin,
 | 
				
			||||||
	.write_end	= generic_write_end,
 | 
						.write_end	= generic_write_end,
 | 
				
			||||||
	.bmap		= bfs_bmap,
 | 
						.bmap		= bfs_bmap,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1520,7 +1520,6 @@ static int blkdev_releasepage(struct page *page, gfp_t wait)
 | 
				
			||||||
static const struct address_space_operations def_blk_aops = {
 | 
					static const struct address_space_operations def_blk_aops = {
 | 
				
			||||||
	.readpage	= blkdev_readpage,
 | 
						.readpage	= blkdev_readpage,
 | 
				
			||||||
	.writepage	= blkdev_writepage,
 | 
						.writepage	= blkdev_writepage,
 | 
				
			||||||
	.sync_page	= block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin	= blkdev_write_begin,
 | 
						.write_begin	= blkdev_write_begin,
 | 
				
			||||||
	.write_end	= blkdev_write_end,
 | 
						.write_end	= blkdev_write_end,
 | 
				
			||||||
	.writepages	= generic_writepages,
 | 
						.writepages	= generic_writepages,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -847,7 +847,6 @@ static const struct address_space_operations btree_aops = {
 | 
				
			||||||
	.writepages	= btree_writepages,
 | 
						.writepages	= btree_writepages,
 | 
				
			||||||
	.releasepage	= btree_releasepage,
 | 
						.releasepage	= btree_releasepage,
 | 
				
			||||||
	.invalidatepage = btree_invalidatepage,
 | 
						.invalidatepage = btree_invalidatepage,
 | 
				
			||||||
	.sync_page	= block_sync_page,
 | 
					 | 
				
			||||||
#ifdef CONFIG_MIGRATION
 | 
					#ifdef CONFIG_MIGRATION
 | 
				
			||||||
	.migratepage	= btree_migratepage,
 | 
						.migratepage	= btree_migratepage,
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					@ -1330,82 +1329,6 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * this unplugs every device on the box, and it is only used when page
 | 
					 | 
				
			||||||
 * is null
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct btrfs_device *device;
 | 
					 | 
				
			||||||
	struct btrfs_fs_info *info;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	info = (struct btrfs_fs_info *)bdi->unplug_io_data;
 | 
					 | 
				
			||||||
	list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
 | 
					 | 
				
			||||||
		if (!device->bdev)
 | 
					 | 
				
			||||||
			continue;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		bdi = blk_get_backing_dev_info(device->bdev);
 | 
					 | 
				
			||||||
		if (bdi->unplug_io_fn)
 | 
					 | 
				
			||||||
			bdi->unplug_io_fn(bdi, page);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct inode *inode;
 | 
					 | 
				
			||||||
	struct extent_map_tree *em_tree;
 | 
					 | 
				
			||||||
	struct extent_map *em;
 | 
					 | 
				
			||||||
	struct address_space *mapping;
 | 
					 | 
				
			||||||
	u64 offset;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* the generic O_DIRECT read code does this */
 | 
					 | 
				
			||||||
	if (1 || !page) {
 | 
					 | 
				
			||||||
		__unplug_io_fn(bdi, page);
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * page->mapping may change at any time.  Get a consistent copy
 | 
					 | 
				
			||||||
	 * and use that for everything below
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	smp_mb();
 | 
					 | 
				
			||||||
	mapping = page->mapping;
 | 
					 | 
				
			||||||
	if (!mapping)
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	inode = mapping->host;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * don't do the expensive searching for a small number of
 | 
					 | 
				
			||||||
	 * devices
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) {
 | 
					 | 
				
			||||||
		__unplug_io_fn(bdi, page);
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	offset = page_offset(page);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	em_tree = &BTRFS_I(inode)->extent_tree;
 | 
					 | 
				
			||||||
	read_lock(&em_tree->lock);
 | 
					 | 
				
			||||||
	em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
 | 
					 | 
				
			||||||
	read_unlock(&em_tree->lock);
 | 
					 | 
				
			||||||
	if (!em) {
 | 
					 | 
				
			||||||
		__unplug_io_fn(bdi, page);
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
 | 
					 | 
				
			||||||
		free_extent_map(em);
 | 
					 | 
				
			||||||
		__unplug_io_fn(bdi, page);
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	offset = offset - em->start;
 | 
					 | 
				
			||||||
	btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
 | 
					 | 
				
			||||||
			  em->block_start + offset, page);
 | 
					 | 
				
			||||||
	free_extent_map(em);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * If this fails, caller must call bdi_destroy() to get rid of the
 | 
					 * If this fails, caller must call bdi_destroy() to get rid of the
 | 
				
			||||||
 * bdi again.
 | 
					 * bdi again.
 | 
				
			||||||
| 
						 | 
					@ -1420,8 +1343,6 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
 | 
				
			||||||
		return err;
 | 
							return err;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	bdi->ra_pages	= default_backing_dev_info.ra_pages;
 | 
						bdi->ra_pages	= default_backing_dev_info.ra_pages;
 | 
				
			||||||
	bdi->unplug_io_fn	= btrfs_unplug_io_fn;
 | 
					 | 
				
			||||||
	bdi->unplug_io_data	= info;
 | 
					 | 
				
			||||||
	bdi->congested_fn	= btrfs_congested_fn;
 | 
						bdi->congested_fn	= btrfs_congested_fn;
 | 
				
			||||||
	bdi->congested_data	= info;
 | 
						bdi->congested_data	= info;
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -7218,7 +7218,6 @@ static const struct address_space_operations btrfs_aops = {
 | 
				
			||||||
	.writepage	= btrfs_writepage,
 | 
						.writepage	= btrfs_writepage,
 | 
				
			||||||
	.writepages	= btrfs_writepages,
 | 
						.writepages	= btrfs_writepages,
 | 
				
			||||||
	.readpages	= btrfs_readpages,
 | 
						.readpages	= btrfs_readpages,
 | 
				
			||||||
	.sync_page	= block_sync_page,
 | 
					 | 
				
			||||||
	.direct_IO	= btrfs_direct_IO,
 | 
						.direct_IO	= btrfs_direct_IO,
 | 
				
			||||||
	.invalidatepage = btrfs_invalidatepage,
 | 
						.invalidatepage = btrfs_invalidatepage,
 | 
				
			||||||
	.releasepage	= btrfs_releasepage,
 | 
						.releasepage	= btrfs_releasepage,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -162,7 +162,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
 | 
				
			||||||
	struct bio *cur;
 | 
						struct bio *cur;
 | 
				
			||||||
	int again = 0;
 | 
						int again = 0;
 | 
				
			||||||
	unsigned long num_run;
 | 
						unsigned long num_run;
 | 
				
			||||||
	unsigned long num_sync_run;
 | 
					 | 
				
			||||||
	unsigned long batch_run = 0;
 | 
						unsigned long batch_run = 0;
 | 
				
			||||||
	unsigned long limit;
 | 
						unsigned long limit;
 | 
				
			||||||
	unsigned long last_waited = 0;
 | 
						unsigned long last_waited = 0;
 | 
				
			||||||
| 
						 | 
					@ -173,11 +172,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
 | 
				
			||||||
	limit = btrfs_async_submit_limit(fs_info);
 | 
						limit = btrfs_async_submit_limit(fs_info);
 | 
				
			||||||
	limit = limit * 2 / 3;
 | 
						limit = limit * 2 / 3;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* we want to make sure that every time we switch from the sync
 | 
					 | 
				
			||||||
	 * list to the normal list, we unplug
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	num_sync_run = 0;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
loop:
 | 
					loop:
 | 
				
			||||||
	spin_lock(&device->io_lock);
 | 
						spin_lock(&device->io_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -223,15 +217,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_unlock(&device->io_lock);
 | 
						spin_unlock(&device->io_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * if we're doing the regular priority list, make sure we unplug
 | 
					 | 
				
			||||||
	 * for any high prio bios we've sent down
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (pending_bios == &device->pending_bios && num_sync_run > 0) {
 | 
					 | 
				
			||||||
		num_sync_run = 0;
 | 
					 | 
				
			||||||
		blk_run_backing_dev(bdi, NULL);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	while (pending) {
 | 
						while (pending) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		rmb();
 | 
							rmb();
 | 
				
			||||||
| 
						 | 
					@ -259,19 +244,11 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		BUG_ON(atomic_read(&cur->bi_cnt) == 0);
 | 
							BUG_ON(atomic_read(&cur->bi_cnt) == 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (cur->bi_rw & REQ_SYNC)
 | 
					 | 
				
			||||||
			num_sync_run++;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		submit_bio(cur->bi_rw, cur);
 | 
							submit_bio(cur->bi_rw, cur);
 | 
				
			||||||
		num_run++;
 | 
							num_run++;
 | 
				
			||||||
		batch_run++;
 | 
							batch_run++;
 | 
				
			||||||
		if (need_resched()) {
 | 
							if (need_resched())
 | 
				
			||||||
			if (num_sync_run) {
 | 
					 | 
				
			||||||
				blk_run_backing_dev(bdi, NULL);
 | 
					 | 
				
			||||||
				num_sync_run = 0;
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			cond_resched();
 | 
								cond_resched();
 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * we made progress, there is more work to do and the bdi
 | 
							 * we made progress, there is more work to do and the bdi
 | 
				
			||||||
| 
						 | 
					@ -304,13 +281,8 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
 | 
				
			||||||
				 * against it before looping
 | 
									 * against it before looping
 | 
				
			||||||
				 */
 | 
									 */
 | 
				
			||||||
				last_waited = ioc->last_waited;
 | 
									last_waited = ioc->last_waited;
 | 
				
			||||||
				if (need_resched()) {
 | 
									if (need_resched())
 | 
				
			||||||
					if (num_sync_run) {
 | 
					 | 
				
			||||||
						blk_run_backing_dev(bdi, NULL);
 | 
					 | 
				
			||||||
						num_sync_run = 0;
 | 
					 | 
				
			||||||
					}
 | 
					 | 
				
			||||||
					cond_resched();
 | 
										cond_resched();
 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
				continue;
 | 
									continue;
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			spin_lock(&device->io_lock);
 | 
								spin_lock(&device->io_lock);
 | 
				
			||||||
| 
						 | 
					@ -323,22 +295,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (num_sync_run) {
 | 
					 | 
				
			||||||
		num_sync_run = 0;
 | 
					 | 
				
			||||||
		blk_run_backing_dev(bdi, NULL);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * IO has already been through a long path to get here.  Checksumming,
 | 
					 | 
				
			||||||
	 * async helper threads, perhaps compression.  We've done a pretty
 | 
					 | 
				
			||||||
	 * good job of collecting a batch of IO and should just unplug
 | 
					 | 
				
			||||||
	 * the device right away.
 | 
					 | 
				
			||||||
	 *
 | 
					 | 
				
			||||||
	 * This will help anyone who is waiting on the IO, they might have
 | 
					 | 
				
			||||||
	 * already unplugged, but managed to do so before the bio they
 | 
					 | 
				
			||||||
	 * cared about found its way down here.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	blk_run_backing_dev(bdi, NULL);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	cond_resched();
 | 
						cond_resched();
 | 
				
			||||||
	if (again)
 | 
						if (again)
 | 
				
			||||||
		goto loop;
 | 
							goto loop;
 | 
				
			||||||
| 
						 | 
					@ -2948,7 +2904,7 @@ static int find_live_mirror(struct map_lookup *map, int first, int num,
 | 
				
			||||||
static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
 | 
					static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
 | 
				
			||||||
			     u64 logical, u64 *length,
 | 
								     u64 logical, u64 *length,
 | 
				
			||||||
			     struct btrfs_multi_bio **multi_ret,
 | 
								     struct btrfs_multi_bio **multi_ret,
 | 
				
			||||||
			     int mirror_num, struct page *unplug_page)
 | 
								     int mirror_num)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct extent_map *em;
 | 
						struct extent_map *em;
 | 
				
			||||||
	struct map_lookup *map;
 | 
						struct map_lookup *map;
 | 
				
			||||||
| 
						 | 
					@ -2980,11 +2936,6 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
 | 
				
			||||||
	em = lookup_extent_mapping(em_tree, logical, *length);
 | 
						em = lookup_extent_mapping(em_tree, logical, *length);
 | 
				
			||||||
	read_unlock(&em_tree->lock);
 | 
						read_unlock(&em_tree->lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!em && unplug_page) {
 | 
					 | 
				
			||||||
		kfree(multi);
 | 
					 | 
				
			||||||
		return 0;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!em) {
 | 
						if (!em) {
 | 
				
			||||||
		printk(KERN_CRIT "unable to find logical %llu len %llu\n",
 | 
							printk(KERN_CRIT "unable to find logical %llu len %llu\n",
 | 
				
			||||||
		       (unsigned long long)logical,
 | 
							       (unsigned long long)logical,
 | 
				
			||||||
| 
						 | 
					@ -3040,13 +2991,13 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
 | 
				
			||||||
		*length = em->len - offset;
 | 
							*length = em->len - offset;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!multi_ret && !unplug_page)
 | 
						if (!multi_ret)
 | 
				
			||||||
		goto out;
 | 
							goto out;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	num_stripes = 1;
 | 
						num_stripes = 1;
 | 
				
			||||||
	stripe_index = 0;
 | 
						stripe_index = 0;
 | 
				
			||||||
	if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
 | 
						if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
 | 
				
			||||||
		if (unplug_page || (rw & REQ_WRITE))
 | 
							if (rw & REQ_WRITE)
 | 
				
			||||||
			num_stripes = map->num_stripes;
 | 
								num_stripes = map->num_stripes;
 | 
				
			||||||
		else if (mirror_num)
 | 
							else if (mirror_num)
 | 
				
			||||||
			stripe_index = mirror_num - 1;
 | 
								stripe_index = mirror_num - 1;
 | 
				
			||||||
| 
						 | 
					@ -3068,7 +3019,7 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
 | 
				
			||||||
		stripe_index = do_div(stripe_nr, factor);
 | 
							stripe_index = do_div(stripe_nr, factor);
 | 
				
			||||||
		stripe_index *= map->sub_stripes;
 | 
							stripe_index *= map->sub_stripes;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (unplug_page || (rw & REQ_WRITE))
 | 
							if (rw & REQ_WRITE)
 | 
				
			||||||
			num_stripes = map->sub_stripes;
 | 
								num_stripes = map->sub_stripes;
 | 
				
			||||||
		else if (mirror_num)
 | 
							else if (mirror_num)
 | 
				
			||||||
			stripe_index += mirror_num - 1;
 | 
								stripe_index += mirror_num - 1;
 | 
				
			||||||
| 
						 | 
					@ -3088,22 +3039,10 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
 | 
				
			||||||
	BUG_ON(stripe_index >= map->num_stripes);
 | 
						BUG_ON(stripe_index >= map->num_stripes);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = 0; i < num_stripes; i++) {
 | 
						for (i = 0; i < num_stripes; i++) {
 | 
				
			||||||
		if (unplug_page) {
 | 
					 | 
				
			||||||
			struct btrfs_device *device;
 | 
					 | 
				
			||||||
			struct backing_dev_info *bdi;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			device = map->stripes[stripe_index].dev;
 | 
					 | 
				
			||||||
			if (device->bdev) {
 | 
					 | 
				
			||||||
				bdi = blk_get_backing_dev_info(device->bdev);
 | 
					 | 
				
			||||||
				if (bdi->unplug_io_fn)
 | 
					 | 
				
			||||||
					bdi->unplug_io_fn(bdi, unplug_page);
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
		} else {
 | 
					 | 
				
			||||||
		multi->stripes[i].physical =
 | 
							multi->stripes[i].physical =
 | 
				
			||||||
			map->stripes[stripe_index].physical +
 | 
								map->stripes[stripe_index].physical +
 | 
				
			||||||
			stripe_offset + stripe_nr * map->stripe_len;
 | 
								stripe_offset + stripe_nr * map->stripe_len;
 | 
				
			||||||
		multi->stripes[i].dev = map->stripes[stripe_index].dev;
 | 
							multi->stripes[i].dev = map->stripes[stripe_index].dev;
 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		stripe_index++;
 | 
							stripe_index++;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if (multi_ret) {
 | 
						if (multi_ret) {
 | 
				
			||||||
| 
						 | 
					@ -3121,7 +3060,7 @@ int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
 | 
				
			||||||
		      struct btrfs_multi_bio **multi_ret, int mirror_num)
 | 
							      struct btrfs_multi_bio **multi_ret, int mirror_num)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
 | 
						return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
 | 
				
			||||||
				 mirror_num, NULL);
 | 
									 mirror_num);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
 | 
					int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
 | 
				
			||||||
| 
						 | 
					@ -3189,14 +3128,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
 | 
					 | 
				
			||||||
		      u64 logical, struct page *page)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	u64 length = PAGE_CACHE_SIZE;
 | 
					 | 
				
			||||||
	return __btrfs_map_block(map_tree, READ, logical, &length,
 | 
					 | 
				
			||||||
				 NULL, 0, page);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void end_bio_multi_stripe(struct bio *bio, int err)
 | 
					static void end_bio_multi_stripe(struct bio *bio, int err)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct btrfs_multi_bio *multi = bio->bi_private;
 | 
						struct btrfs_multi_bio *multi = bio->bi_private;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										31
									
								
								fs/buffer.c
									
									
									
									
									
								
							
							
						
						
									
										31
									
								
								fs/buffer.c
									
									
									
									
									
								
							| 
						 | 
					@ -54,23 +54,15 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(init_buffer);
 | 
					EXPORT_SYMBOL(init_buffer);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int sync_buffer(void *word)
 | 
					static int sleep_on_buffer(void *word)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct block_device *bd;
 | 
					 | 
				
			||||||
	struct buffer_head *bh
 | 
					 | 
				
			||||||
		= container_of(word, struct buffer_head, b_state);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	smp_mb();
 | 
					 | 
				
			||||||
	bd = bh->b_bdev;
 | 
					 | 
				
			||||||
	if (bd)
 | 
					 | 
				
			||||||
		blk_run_address_space(bd->bd_inode->i_mapping);
 | 
					 | 
				
			||||||
	io_schedule();
 | 
						io_schedule();
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void __lock_buffer(struct buffer_head *bh)
 | 
					void __lock_buffer(struct buffer_head *bh)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
 | 
						wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
 | 
				
			||||||
							TASK_UNINTERRUPTIBLE);
 | 
												TASK_UNINTERRUPTIBLE);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(__lock_buffer);
 | 
					EXPORT_SYMBOL(__lock_buffer);
 | 
				
			||||||
| 
						 | 
					@ -90,7 +82,7 @@ EXPORT_SYMBOL(unlock_buffer);
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void __wait_on_buffer(struct buffer_head * bh)
 | 
					void __wait_on_buffer(struct buffer_head * bh)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
 | 
						wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(__wait_on_buffer);
 | 
					EXPORT_SYMBOL(__wait_on_buffer);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -749,7 +741,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct buffer_head *bh;
 | 
						struct buffer_head *bh;
 | 
				
			||||||
	struct list_head tmp;
 | 
						struct list_head tmp;
 | 
				
			||||||
	struct address_space *mapping, *prev_mapping = NULL;
 | 
						struct address_space *mapping;
 | 
				
			||||||
	int err = 0, err2;
 | 
						int err = 0, err2;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	INIT_LIST_HEAD(&tmp);
 | 
						INIT_LIST_HEAD(&tmp);
 | 
				
			||||||
| 
						 | 
					@ -783,10 +775,6 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 | 
				
			||||||
				 * wait_on_buffer() will do that for us
 | 
									 * wait_on_buffer() will do that for us
 | 
				
			||||||
				 * through sync_buffer().
 | 
									 * through sync_buffer().
 | 
				
			||||||
				 */
 | 
									 */
 | 
				
			||||||
				if (prev_mapping && prev_mapping != mapping)
 | 
					 | 
				
			||||||
					blk_run_address_space(prev_mapping);
 | 
					 | 
				
			||||||
				prev_mapping = mapping;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
				brelse(bh);
 | 
									brelse(bh);
 | 
				
			||||||
				spin_lock(lock);
 | 
									spin_lock(lock);
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
| 
						 | 
					@ -3138,17 +3126,6 @@ int try_to_free_buffers(struct page *page)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(try_to_free_buffers);
 | 
					EXPORT_SYMBOL(try_to_free_buffers);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void block_sync_page(struct page *page)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct address_space *mapping;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	smp_mb();
 | 
					 | 
				
			||||||
	mapping = page_mapping(page);
 | 
					 | 
				
			||||||
	if (mapping)
 | 
					 | 
				
			||||||
		blk_run_backing_dev(mapping->backing_dev_info, page);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
EXPORT_SYMBOL(block_sync_page);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * There are no bdflush tunables left.  But distributions are
 | 
					 * There are no bdflush tunables left.  But distributions are
 | 
				
			||||||
 * still running obsolete flush daemons, so we terminate them here.
 | 
					 * still running obsolete flush daemons, so we terminate them here.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1569,34 +1569,6 @@ int cifs_fsync(struct file *file, int datasync)
 | 
				
			||||||
	return rc;
 | 
						return rc;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* static void cifs_sync_page(struct page *page)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct address_space *mapping;
 | 
					 | 
				
			||||||
	struct inode *inode;
 | 
					 | 
				
			||||||
	unsigned long index = page->index;
 | 
					 | 
				
			||||||
	unsigned int rpages = 0;
 | 
					 | 
				
			||||||
	int rc = 0;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	cFYI(1, "sync page %p", page);
 | 
					 | 
				
			||||||
	mapping = page->mapping;
 | 
					 | 
				
			||||||
	if (!mapping)
 | 
					 | 
				
			||||||
		return 0;
 | 
					 | 
				
			||||||
	inode = mapping->host;
 | 
					 | 
				
			||||||
	if (!inode)
 | 
					 | 
				
			||||||
		return; */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*	fill in rpages then
 | 
					 | 
				
			||||||
	result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*	cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#if 0
 | 
					 | 
				
			||||||
	if (rc < 0)
 | 
					 | 
				
			||||||
		return rc;
 | 
					 | 
				
			||||||
	return 0;
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
} */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * As file closes, flush all cached write data for this inode checking
 | 
					 * As file closes, flush all cached write data for this inode checking
 | 
				
			||||||
 * for write behind errors.
 | 
					 * for write behind errors.
 | 
				
			||||||
| 
						 | 
					@ -2510,7 +2482,6 @@ const struct address_space_operations cifs_addr_ops = {
 | 
				
			||||||
	.set_page_dirty = __set_page_dirty_nobuffers,
 | 
						.set_page_dirty = __set_page_dirty_nobuffers,
 | 
				
			||||||
	.releasepage = cifs_release_page,
 | 
						.releasepage = cifs_release_page,
 | 
				
			||||||
	.invalidatepage = cifs_invalidate_page,
 | 
						.invalidatepage = cifs_invalidate_page,
 | 
				
			||||||
	/* .sync_page = cifs_sync_page, */
 | 
					 | 
				
			||||||
	/* .direct_IO = */
 | 
						/* .direct_IO = */
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2528,6 +2499,5 @@ const struct address_space_operations cifs_addr_ops_smallbuf = {
 | 
				
			||||||
	.set_page_dirty = __set_page_dirty_nobuffers,
 | 
						.set_page_dirty = __set_page_dirty_nobuffers,
 | 
				
			||||||
	.releasepage = cifs_release_page,
 | 
						.releasepage = cifs_release_page,
 | 
				
			||||||
	.invalidatepage = cifs_invalidate_page,
 | 
						.invalidatepage = cifs_invalidate_page,
 | 
				
			||||||
	/* .sync_page = cifs_sync_page, */
 | 
					 | 
				
			||||||
	/* .direct_IO = */
 | 
						/* .direct_IO = */
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1110,11 +1110,8 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
 | 
				
			||||||
	    ((rw & READ) || (dio->result == dio->size)))
 | 
						    ((rw & READ) || (dio->result == dio->size)))
 | 
				
			||||||
		ret = -EIOCBQUEUED;
 | 
							ret = -EIOCBQUEUED;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (ret != -EIOCBQUEUED) {
 | 
						if (ret != -EIOCBQUEUED)
 | 
				
			||||||
		/* All IO is now issued, send it on its way */
 | 
					 | 
				
			||||||
		blk_run_address_space(inode->i_mapping);
 | 
					 | 
				
			||||||
		dio_await_completion(dio);
 | 
							dio_await_completion(dio);
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Sync will always be dropping the final ref and completing the
 | 
						 * Sync will always be dropping the final ref and completing the
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -23,7 +23,6 @@ static sector_t _efs_bmap(struct address_space *mapping, sector_t block)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
static const struct address_space_operations efs_aops = {
 | 
					static const struct address_space_operations efs_aops = {
 | 
				
			||||||
	.readpage = efs_readpage,
 | 
						.readpage = efs_readpage,
 | 
				
			||||||
	.sync_page = block_sync_page,
 | 
					 | 
				
			||||||
	.bmap = _efs_bmap
 | 
						.bmap = _efs_bmap
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -795,7 +795,6 @@ const struct address_space_operations exofs_aops = {
 | 
				
			||||||
	.direct_IO	= NULL, /* TODO: Should be trivial to do */
 | 
						.direct_IO	= NULL, /* TODO: Should be trivial to do */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* With these NULL has special meaning or default is not exported */
 | 
						/* With these NULL has special meaning or default is not exported */
 | 
				
			||||||
	.sync_page	= NULL,
 | 
					 | 
				
			||||||
	.get_xip_mem	= NULL,
 | 
						.get_xip_mem	= NULL,
 | 
				
			||||||
	.migratepage	= NULL,
 | 
						.migratepage	= NULL,
 | 
				
			||||||
	.launder_page	= NULL,
 | 
						.launder_page	= NULL,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -860,7 +860,6 @@ const struct address_space_operations ext2_aops = {
 | 
				
			||||||
	.readpage		= ext2_readpage,
 | 
						.readpage		= ext2_readpage,
 | 
				
			||||||
	.readpages		= ext2_readpages,
 | 
						.readpages		= ext2_readpages,
 | 
				
			||||||
	.writepage		= ext2_writepage,
 | 
						.writepage		= ext2_writepage,
 | 
				
			||||||
	.sync_page		= block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin		= ext2_write_begin,
 | 
						.write_begin		= ext2_write_begin,
 | 
				
			||||||
	.write_end		= ext2_write_end,
 | 
						.write_end		= ext2_write_end,
 | 
				
			||||||
	.bmap			= ext2_bmap,
 | 
						.bmap			= ext2_bmap,
 | 
				
			||||||
| 
						 | 
					@ -880,7 +879,6 @@ const struct address_space_operations ext2_nobh_aops = {
 | 
				
			||||||
	.readpage		= ext2_readpage,
 | 
						.readpage		= ext2_readpage,
 | 
				
			||||||
	.readpages		= ext2_readpages,
 | 
						.readpages		= ext2_readpages,
 | 
				
			||||||
	.writepage		= ext2_nobh_writepage,
 | 
						.writepage		= ext2_nobh_writepage,
 | 
				
			||||||
	.sync_page		= block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin		= ext2_nobh_write_begin,
 | 
						.write_begin		= ext2_nobh_write_begin,
 | 
				
			||||||
	.write_end		= nobh_write_end,
 | 
						.write_end		= nobh_write_end,
 | 
				
			||||||
	.bmap			= ext2_bmap,
 | 
						.bmap			= ext2_bmap,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1894,7 +1894,6 @@ static const struct address_space_operations ext3_ordered_aops = {
 | 
				
			||||||
	.readpage		= ext3_readpage,
 | 
						.readpage		= ext3_readpage,
 | 
				
			||||||
	.readpages		= ext3_readpages,
 | 
						.readpages		= ext3_readpages,
 | 
				
			||||||
	.writepage		= ext3_ordered_writepage,
 | 
						.writepage		= ext3_ordered_writepage,
 | 
				
			||||||
	.sync_page		= block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin		= ext3_write_begin,
 | 
						.write_begin		= ext3_write_begin,
 | 
				
			||||||
	.write_end		= ext3_ordered_write_end,
 | 
						.write_end		= ext3_ordered_write_end,
 | 
				
			||||||
	.bmap			= ext3_bmap,
 | 
						.bmap			= ext3_bmap,
 | 
				
			||||||
| 
						 | 
					@ -1910,7 +1909,6 @@ static const struct address_space_operations ext3_writeback_aops = {
 | 
				
			||||||
	.readpage		= ext3_readpage,
 | 
						.readpage		= ext3_readpage,
 | 
				
			||||||
	.readpages		= ext3_readpages,
 | 
						.readpages		= ext3_readpages,
 | 
				
			||||||
	.writepage		= ext3_writeback_writepage,
 | 
						.writepage		= ext3_writeback_writepage,
 | 
				
			||||||
	.sync_page		= block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin		= ext3_write_begin,
 | 
						.write_begin		= ext3_write_begin,
 | 
				
			||||||
	.write_end		= ext3_writeback_write_end,
 | 
						.write_end		= ext3_writeback_write_end,
 | 
				
			||||||
	.bmap			= ext3_bmap,
 | 
						.bmap			= ext3_bmap,
 | 
				
			||||||
| 
						 | 
					@ -1926,7 +1924,6 @@ static const struct address_space_operations ext3_journalled_aops = {
 | 
				
			||||||
	.readpage		= ext3_readpage,
 | 
						.readpage		= ext3_readpage,
 | 
				
			||||||
	.readpages		= ext3_readpages,
 | 
						.readpages		= ext3_readpages,
 | 
				
			||||||
	.writepage		= ext3_journalled_writepage,
 | 
						.writepage		= ext3_journalled_writepage,
 | 
				
			||||||
	.sync_page		= block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin		= ext3_write_begin,
 | 
						.write_begin		= ext3_write_begin,
 | 
				
			||||||
	.write_end		= ext3_journalled_write_end,
 | 
						.write_end		= ext3_journalled_write_end,
 | 
				
			||||||
	.set_page_dirty		= ext3_journalled_set_page_dirty,
 | 
						.set_page_dirty		= ext3_journalled_set_page_dirty,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3903,7 +3903,6 @@ static const struct address_space_operations ext4_ordered_aops = {
 | 
				
			||||||
	.readpage		= ext4_readpage,
 | 
						.readpage		= ext4_readpage,
 | 
				
			||||||
	.readpages		= ext4_readpages,
 | 
						.readpages		= ext4_readpages,
 | 
				
			||||||
	.writepage		= ext4_writepage,
 | 
						.writepage		= ext4_writepage,
 | 
				
			||||||
	.sync_page		= block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin		= ext4_write_begin,
 | 
						.write_begin		= ext4_write_begin,
 | 
				
			||||||
	.write_end		= ext4_ordered_write_end,
 | 
						.write_end		= ext4_ordered_write_end,
 | 
				
			||||||
	.bmap			= ext4_bmap,
 | 
						.bmap			= ext4_bmap,
 | 
				
			||||||
| 
						 | 
					@ -3919,7 +3918,6 @@ static const struct address_space_operations ext4_writeback_aops = {
 | 
				
			||||||
	.readpage		= ext4_readpage,
 | 
						.readpage		= ext4_readpage,
 | 
				
			||||||
	.readpages		= ext4_readpages,
 | 
						.readpages		= ext4_readpages,
 | 
				
			||||||
	.writepage		= ext4_writepage,
 | 
						.writepage		= ext4_writepage,
 | 
				
			||||||
	.sync_page		= block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin		= ext4_write_begin,
 | 
						.write_begin		= ext4_write_begin,
 | 
				
			||||||
	.write_end		= ext4_writeback_write_end,
 | 
						.write_end		= ext4_writeback_write_end,
 | 
				
			||||||
	.bmap			= ext4_bmap,
 | 
						.bmap			= ext4_bmap,
 | 
				
			||||||
| 
						 | 
					@ -3935,7 +3933,6 @@ static const struct address_space_operations ext4_journalled_aops = {
 | 
				
			||||||
	.readpage		= ext4_readpage,
 | 
						.readpage		= ext4_readpage,
 | 
				
			||||||
	.readpages		= ext4_readpages,
 | 
						.readpages		= ext4_readpages,
 | 
				
			||||||
	.writepage		= ext4_writepage,
 | 
						.writepage		= ext4_writepage,
 | 
				
			||||||
	.sync_page		= block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin		= ext4_write_begin,
 | 
						.write_begin		= ext4_write_begin,
 | 
				
			||||||
	.write_end		= ext4_journalled_write_end,
 | 
						.write_end		= ext4_journalled_write_end,
 | 
				
			||||||
	.set_page_dirty		= ext4_journalled_set_page_dirty,
 | 
						.set_page_dirty		= ext4_journalled_set_page_dirty,
 | 
				
			||||||
| 
						 | 
					@ -3951,7 +3948,6 @@ static const struct address_space_operations ext4_da_aops = {
 | 
				
			||||||
	.readpages		= ext4_readpages,
 | 
						.readpages		= ext4_readpages,
 | 
				
			||||||
	.writepage		= ext4_writepage,
 | 
						.writepage		= ext4_writepage,
 | 
				
			||||||
	.writepages		= ext4_da_writepages,
 | 
						.writepages		= ext4_da_writepages,
 | 
				
			||||||
	.sync_page		= block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin		= ext4_da_write_begin,
 | 
						.write_begin		= ext4_da_write_begin,
 | 
				
			||||||
	.write_end		= ext4_da_write_end,
 | 
						.write_end		= ext4_da_write_end,
 | 
				
			||||||
	.bmap			= ext4_bmap,
 | 
						.bmap			= ext4_bmap,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -236,7 +236,6 @@ static const struct address_space_operations fat_aops = {
 | 
				
			||||||
	.readpages	= fat_readpages,
 | 
						.readpages	= fat_readpages,
 | 
				
			||||||
	.writepage	= fat_writepage,
 | 
						.writepage	= fat_writepage,
 | 
				
			||||||
	.writepages	= fat_writepages,
 | 
						.writepages	= fat_writepages,
 | 
				
			||||||
	.sync_page	= block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin	= fat_write_begin,
 | 
						.write_begin	= fat_write_begin,
 | 
				
			||||||
	.write_end	= fat_write_end,
 | 
						.write_end	= fat_write_end,
 | 
				
			||||||
	.direct_IO	= fat_direct_IO,
 | 
						.direct_IO	= fat_direct_IO,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -44,7 +44,6 @@ static sector_t		vxfs_bmap(struct address_space *, sector_t);
 | 
				
			||||||
const struct address_space_operations vxfs_aops = {
 | 
					const struct address_space_operations vxfs_aops = {
 | 
				
			||||||
	.readpage =		vxfs_readpage,
 | 
						.readpage =		vxfs_readpage,
 | 
				
			||||||
	.bmap =			vxfs_bmap,
 | 
						.bmap =			vxfs_bmap,
 | 
				
			||||||
	.sync_page =		block_sync_page,
 | 
					 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
inline void
 | 
					inline void
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -868,7 +868,6 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	fc->bdi.name = "fuse";
 | 
						fc->bdi.name = "fuse";
 | 
				
			||||||
	fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
 | 
						fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
 | 
				
			||||||
	fc->bdi.unplug_io_fn = default_unplug_io_fn;
 | 
					 | 
				
			||||||
	/* fuse does it's own writeback accounting */
 | 
						/* fuse does it's own writeback accounting */
 | 
				
			||||||
	fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB;
 | 
						fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1116,7 +1116,6 @@ static const struct address_space_operations gfs2_writeback_aops = {
 | 
				
			||||||
	.writepages = gfs2_writeback_writepages,
 | 
						.writepages = gfs2_writeback_writepages,
 | 
				
			||||||
	.readpage = gfs2_readpage,
 | 
						.readpage = gfs2_readpage,
 | 
				
			||||||
	.readpages = gfs2_readpages,
 | 
						.readpages = gfs2_readpages,
 | 
				
			||||||
	.sync_page = block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin = gfs2_write_begin,
 | 
						.write_begin = gfs2_write_begin,
 | 
				
			||||||
	.write_end = gfs2_write_end,
 | 
						.write_end = gfs2_write_end,
 | 
				
			||||||
	.bmap = gfs2_bmap,
 | 
						.bmap = gfs2_bmap,
 | 
				
			||||||
| 
						 | 
					@ -1132,7 +1131,6 @@ static const struct address_space_operations gfs2_ordered_aops = {
 | 
				
			||||||
	.writepage = gfs2_ordered_writepage,
 | 
						.writepage = gfs2_ordered_writepage,
 | 
				
			||||||
	.readpage = gfs2_readpage,
 | 
						.readpage = gfs2_readpage,
 | 
				
			||||||
	.readpages = gfs2_readpages,
 | 
						.readpages = gfs2_readpages,
 | 
				
			||||||
	.sync_page = block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin = gfs2_write_begin,
 | 
						.write_begin = gfs2_write_begin,
 | 
				
			||||||
	.write_end = gfs2_write_end,
 | 
						.write_end = gfs2_write_end,
 | 
				
			||||||
	.set_page_dirty = gfs2_set_page_dirty,
 | 
						.set_page_dirty = gfs2_set_page_dirty,
 | 
				
			||||||
| 
						 | 
					@ -1150,7 +1148,6 @@ static const struct address_space_operations gfs2_jdata_aops = {
 | 
				
			||||||
	.writepages = gfs2_jdata_writepages,
 | 
						.writepages = gfs2_jdata_writepages,
 | 
				
			||||||
	.readpage = gfs2_readpage,
 | 
						.readpage = gfs2_readpage,
 | 
				
			||||||
	.readpages = gfs2_readpages,
 | 
						.readpages = gfs2_readpages,
 | 
				
			||||||
	.sync_page = block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin = gfs2_write_begin,
 | 
						.write_begin = gfs2_write_begin,
 | 
				
			||||||
	.write_end = gfs2_write_end,
 | 
						.write_end = gfs2_write_end,
 | 
				
			||||||
	.set_page_dirty = gfs2_set_page_dirty,
 | 
						.set_page_dirty = gfs2_set_page_dirty,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -94,7 +94,6 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
 | 
				
			||||||
const struct address_space_operations gfs2_meta_aops = {
 | 
					const struct address_space_operations gfs2_meta_aops = {
 | 
				
			||||||
	.writepage = gfs2_aspace_writepage,
 | 
						.writepage = gfs2_aspace_writepage,
 | 
				
			||||||
	.releasepage = gfs2_releasepage,
 | 
						.releasepage = gfs2_releasepage,
 | 
				
			||||||
	.sync_page = block_sync_page,
 | 
					 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -150,7 +150,6 @@ static int hfs_writepages(struct address_space *mapping,
 | 
				
			||||||
const struct address_space_operations hfs_btree_aops = {
 | 
					const struct address_space_operations hfs_btree_aops = {
 | 
				
			||||||
	.readpage	= hfs_readpage,
 | 
						.readpage	= hfs_readpage,
 | 
				
			||||||
	.writepage	= hfs_writepage,
 | 
						.writepage	= hfs_writepage,
 | 
				
			||||||
	.sync_page	= block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin	= hfs_write_begin,
 | 
						.write_begin	= hfs_write_begin,
 | 
				
			||||||
	.write_end	= generic_write_end,
 | 
						.write_end	= generic_write_end,
 | 
				
			||||||
	.bmap		= hfs_bmap,
 | 
						.bmap		= hfs_bmap,
 | 
				
			||||||
| 
						 | 
					@ -160,7 +159,6 @@ const struct address_space_operations hfs_btree_aops = {
 | 
				
			||||||
const struct address_space_operations hfs_aops = {
 | 
					const struct address_space_operations hfs_aops = {
 | 
				
			||||||
	.readpage	= hfs_readpage,
 | 
						.readpage	= hfs_readpage,
 | 
				
			||||||
	.writepage	= hfs_writepage,
 | 
						.writepage	= hfs_writepage,
 | 
				
			||||||
	.sync_page	= block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin	= hfs_write_begin,
 | 
						.write_begin	= hfs_write_begin,
 | 
				
			||||||
	.write_end	= generic_write_end,
 | 
						.write_end	= generic_write_end,
 | 
				
			||||||
	.bmap		= hfs_bmap,
 | 
						.bmap		= hfs_bmap,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -146,7 +146,6 @@ static int hfsplus_writepages(struct address_space *mapping,
 | 
				
			||||||
const struct address_space_operations hfsplus_btree_aops = {
 | 
					const struct address_space_operations hfsplus_btree_aops = {
 | 
				
			||||||
	.readpage	= hfsplus_readpage,
 | 
						.readpage	= hfsplus_readpage,
 | 
				
			||||||
	.writepage	= hfsplus_writepage,
 | 
						.writepage	= hfsplus_writepage,
 | 
				
			||||||
	.sync_page	= block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin	= hfsplus_write_begin,
 | 
						.write_begin	= hfsplus_write_begin,
 | 
				
			||||||
	.write_end	= generic_write_end,
 | 
						.write_end	= generic_write_end,
 | 
				
			||||||
	.bmap		= hfsplus_bmap,
 | 
						.bmap		= hfsplus_bmap,
 | 
				
			||||||
| 
						 | 
					@ -156,7 +155,6 @@ const struct address_space_operations hfsplus_btree_aops = {
 | 
				
			||||||
const struct address_space_operations hfsplus_aops = {
 | 
					const struct address_space_operations hfsplus_aops = {
 | 
				
			||||||
	.readpage	= hfsplus_readpage,
 | 
						.readpage	= hfsplus_readpage,
 | 
				
			||||||
	.writepage	= hfsplus_writepage,
 | 
						.writepage	= hfsplus_writepage,
 | 
				
			||||||
	.sync_page	= block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin	= hfsplus_write_begin,
 | 
						.write_begin	= hfsplus_write_begin,
 | 
				
			||||||
	.write_end	= generic_write_end,
 | 
						.write_end	= generic_write_end,
 | 
				
			||||||
	.bmap		= hfsplus_bmap,
 | 
						.bmap		= hfsplus_bmap,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -120,7 +120,6 @@ static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
 | 
				
			||||||
const struct address_space_operations hpfs_aops = {
 | 
					const struct address_space_operations hpfs_aops = {
 | 
				
			||||||
	.readpage = hpfs_readpage,
 | 
						.readpage = hpfs_readpage,
 | 
				
			||||||
	.writepage = hpfs_writepage,
 | 
						.writepage = hpfs_writepage,
 | 
				
			||||||
	.sync_page = block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin = hpfs_write_begin,
 | 
						.write_begin = hpfs_write_begin,
 | 
				
			||||||
	.write_end = generic_write_end,
 | 
						.write_end = generic_write_end,
 | 
				
			||||||
	.bmap = _hpfs_bmap
 | 
						.bmap = _hpfs_bmap
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1158,7 +1158,6 @@ static sector_t _isofs_bmap(struct address_space *mapping, sector_t block)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static const struct address_space_operations isofs_aops = {
 | 
					static const struct address_space_operations isofs_aops = {
 | 
				
			||||||
	.readpage = isofs_readpage,
 | 
						.readpage = isofs_readpage,
 | 
				
			||||||
	.sync_page = block_sync_page,
 | 
					 | 
				
			||||||
	.bmap = _isofs_bmap
 | 
						.bmap = _isofs_bmap
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -352,7 +352,6 @@ const struct address_space_operations jfs_aops = {
 | 
				
			||||||
	.readpages	= jfs_readpages,
 | 
						.readpages	= jfs_readpages,
 | 
				
			||||||
	.writepage	= jfs_writepage,
 | 
						.writepage	= jfs_writepage,
 | 
				
			||||||
	.writepages	= jfs_writepages,
 | 
						.writepages	= jfs_writepages,
 | 
				
			||||||
	.sync_page	= block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin	= jfs_write_begin,
 | 
						.write_begin	= jfs_write_begin,
 | 
				
			||||||
	.write_end	= nobh_write_end,
 | 
						.write_end	= nobh_write_end,
 | 
				
			||||||
	.bmap		= jfs_bmap,
 | 
						.bmap		= jfs_bmap,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -583,7 +583,6 @@ static void metapage_invalidatepage(struct page *page, unsigned long offset)
 | 
				
			||||||
const struct address_space_operations jfs_metapage_aops = {
 | 
					const struct address_space_operations jfs_metapage_aops = {
 | 
				
			||||||
	.readpage	= metapage_readpage,
 | 
						.readpage	= metapage_readpage,
 | 
				
			||||||
	.writepage	= metapage_writepage,
 | 
						.writepage	= metapage_writepage,
 | 
				
			||||||
	.sync_page	= block_sync_page,
 | 
					 | 
				
			||||||
	.releasepage	= metapage_releasepage,
 | 
						.releasepage	= metapage_releasepage,
 | 
				
			||||||
	.invalidatepage	= metapage_invalidatepage,
 | 
						.invalidatepage	= metapage_invalidatepage,
 | 
				
			||||||
	.set_page_dirty	= __set_page_dirty_nobuffers,
 | 
						.set_page_dirty	= __set_page_dirty_nobuffers,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -39,7 +39,6 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
 | 
				
			||||||
	bio.bi_end_io = request_complete;
 | 
						bio.bi_end_io = request_complete;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	submit_bio(rw, &bio);
 | 
						submit_bio(rw, &bio);
 | 
				
			||||||
	generic_unplug_device(bdev_get_queue(bdev));
 | 
					 | 
				
			||||||
	wait_for_completion(&complete);
 | 
						wait_for_completion(&complete);
 | 
				
			||||||
	return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
 | 
						return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -168,7 +167,6 @@ static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	len = PAGE_ALIGN(len);
 | 
						len = PAGE_ALIGN(len);
 | 
				
			||||||
	__bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
 | 
						__bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
 | 
				
			||||||
	generic_unplug_device(bdev_get_queue(logfs_super(sb)->s_bdev));
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -399,7 +399,6 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block)
 | 
				
			||||||
static const struct address_space_operations minix_aops = {
 | 
					static const struct address_space_operations minix_aops = {
 | 
				
			||||||
	.readpage = minix_readpage,
 | 
						.readpage = minix_readpage,
 | 
				
			||||||
	.writepage = minix_writepage,
 | 
						.writepage = minix_writepage,
 | 
				
			||||||
	.sync_page = block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin = minix_write_begin,
 | 
						.write_begin = minix_write_begin,
 | 
				
			||||||
	.write_end = generic_write_end,
 | 
						.write_end = generic_write_end,
 | 
				
			||||||
	.bmap = minix_bmap
 | 
						.bmap = minix_bmap
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -40,14 +40,10 @@ void nilfs_btnode_cache_init_once(struct address_space *btnc)
 | 
				
			||||||
	nilfs_mapping_init_once(btnc);
 | 
						nilfs_mapping_init_once(btnc);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static const struct address_space_operations def_btnode_aops = {
 | 
					 | 
				
			||||||
	.sync_page		= block_sync_page,
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void nilfs_btnode_cache_init(struct address_space *btnc,
 | 
					void nilfs_btnode_cache_init(struct address_space *btnc,
 | 
				
			||||||
			     struct backing_dev_info *bdi)
 | 
								     struct backing_dev_info *bdi)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	nilfs_mapping_init(btnc, bdi, &def_btnode_aops);
 | 
						nilfs_mapping_init(btnc, bdi);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void nilfs_btnode_cache_clear(struct address_space *btnc)
 | 
					void nilfs_btnode_cache_clear(struct address_space *btnc)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -49,7 +49,6 @@
 | 
				
			||||||
#include "ifile.h"
 | 
					#include "ifile.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static const struct address_space_operations def_gcinode_aops = {
 | 
					static const struct address_space_operations def_gcinode_aops = {
 | 
				
			||||||
	.sync_page		= block_sync_page,
 | 
					 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -262,7 +262,6 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
 | 
				
			||||||
const struct address_space_operations nilfs_aops = {
 | 
					const struct address_space_operations nilfs_aops = {
 | 
				
			||||||
	.writepage		= nilfs_writepage,
 | 
						.writepage		= nilfs_writepage,
 | 
				
			||||||
	.readpage		= nilfs_readpage,
 | 
						.readpage		= nilfs_readpage,
 | 
				
			||||||
	.sync_page		= block_sync_page,
 | 
					 | 
				
			||||||
	.writepages		= nilfs_writepages,
 | 
						.writepages		= nilfs_writepages,
 | 
				
			||||||
	.set_page_dirty		= nilfs_set_page_dirty,
 | 
						.set_page_dirty		= nilfs_set_page_dirty,
 | 
				
			||||||
	.readpages		= nilfs_readpages,
 | 
						.readpages		= nilfs_readpages,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -399,7 +399,6 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static const struct address_space_operations def_mdt_aops = {
 | 
					static const struct address_space_operations def_mdt_aops = {
 | 
				
			||||||
	.writepage		= nilfs_mdt_write_page,
 | 
						.writepage		= nilfs_mdt_write_page,
 | 
				
			||||||
	.sync_page		= block_sync_page,
 | 
					 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static const struct inode_operations def_mdt_iops;
 | 
					static const struct inode_operations def_mdt_iops;
 | 
				
			||||||
| 
						 | 
					@ -438,10 +437,6 @@ void nilfs_mdt_set_entry_size(struct inode *inode, unsigned entry_size,
 | 
				
			||||||
	mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
 | 
						mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static const struct address_space_operations shadow_map_aops = {
 | 
					 | 
				
			||||||
	.sync_page		= block_sync_page,
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
 | 
					 * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
 | 
				
			||||||
 * @inode: inode of the metadata file
 | 
					 * @inode: inode of the metadata file
 | 
				
			||||||
| 
						 | 
					@ -455,9 +450,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	INIT_LIST_HEAD(&shadow->frozen_buffers);
 | 
						INIT_LIST_HEAD(&shadow->frozen_buffers);
 | 
				
			||||||
	nilfs_mapping_init_once(&shadow->frozen_data);
 | 
						nilfs_mapping_init_once(&shadow->frozen_data);
 | 
				
			||||||
	nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops);
 | 
						nilfs_mapping_init(&shadow->frozen_data, bdi);
 | 
				
			||||||
	nilfs_mapping_init_once(&shadow->frozen_btnodes);
 | 
						nilfs_mapping_init_once(&shadow->frozen_btnodes);
 | 
				
			||||||
	nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops);
 | 
						nilfs_mapping_init(&shadow->frozen_btnodes, bdi);
 | 
				
			||||||
	mi->mi_shadow = shadow;
 | 
						mi->mi_shadow = shadow;
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -506,15 +506,14 @@ void nilfs_mapping_init_once(struct address_space *mapping)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void nilfs_mapping_init(struct address_space *mapping,
 | 
					void nilfs_mapping_init(struct address_space *mapping,
 | 
				
			||||||
			struct backing_dev_info *bdi,
 | 
								struct backing_dev_info *bdi)
 | 
				
			||||||
			const struct address_space_operations *aops)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	mapping->host = NULL;
 | 
						mapping->host = NULL;
 | 
				
			||||||
	mapping->flags = 0;
 | 
						mapping->flags = 0;
 | 
				
			||||||
	mapping_set_gfp_mask(mapping, GFP_NOFS);
 | 
						mapping_set_gfp_mask(mapping, GFP_NOFS);
 | 
				
			||||||
	mapping->assoc_mapping = NULL;
 | 
						mapping->assoc_mapping = NULL;
 | 
				
			||||||
	mapping->backing_dev_info = bdi;
 | 
						mapping->backing_dev_info = bdi;
 | 
				
			||||||
	mapping->a_ops = aops;
 | 
						mapping->a_ops = NULL;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -63,8 +63,7 @@ void nilfs_copy_back_pages(struct address_space *, struct address_space *);
 | 
				
			||||||
void nilfs_clear_dirty_pages(struct address_space *);
 | 
					void nilfs_clear_dirty_pages(struct address_space *);
 | 
				
			||||||
void nilfs_mapping_init_once(struct address_space *mapping);
 | 
					void nilfs_mapping_init_once(struct address_space *mapping);
 | 
				
			||||||
void nilfs_mapping_init(struct address_space *mapping,
 | 
					void nilfs_mapping_init(struct address_space *mapping,
 | 
				
			||||||
			struct backing_dev_info *bdi,
 | 
								struct backing_dev_info *bdi);
 | 
				
			||||||
			const struct address_space_operations *aops);
 | 
					 | 
				
			||||||
unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned);
 | 
					unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned);
 | 
				
			||||||
unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
 | 
					unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
 | 
				
			||||||
					    sector_t start_blk,
 | 
										    sector_t start_blk,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1543,8 +1543,6 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
const struct address_space_operations ntfs_aops = {
 | 
					const struct address_space_operations ntfs_aops = {
 | 
				
			||||||
	.readpage	= ntfs_readpage,	/* Fill page with data. */
 | 
						.readpage	= ntfs_readpage,	/* Fill page with data. */
 | 
				
			||||||
	.sync_page	= block_sync_page,	/* Currently, just unplugs the
 | 
					 | 
				
			||||||
						   disk request queue. */
 | 
					 | 
				
			||||||
#ifdef NTFS_RW
 | 
					#ifdef NTFS_RW
 | 
				
			||||||
	.writepage	= ntfs_writepage,	/* Write dirty page to disk. */
 | 
						.writepage	= ntfs_writepage,	/* Write dirty page to disk. */
 | 
				
			||||||
#endif /* NTFS_RW */
 | 
					#endif /* NTFS_RW */
 | 
				
			||||||
| 
						 | 
					@ -1560,8 +1558,6 @@ const struct address_space_operations ntfs_aops = {
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
const struct address_space_operations ntfs_mst_aops = {
 | 
					const struct address_space_operations ntfs_mst_aops = {
 | 
				
			||||||
	.readpage	= ntfs_readpage,	/* Fill page with data. */
 | 
						.readpage	= ntfs_readpage,	/* Fill page with data. */
 | 
				
			||||||
	.sync_page	= block_sync_page,	/* Currently, just unplugs the
 | 
					 | 
				
			||||||
						   disk request queue. */
 | 
					 | 
				
			||||||
#ifdef NTFS_RW
 | 
					#ifdef NTFS_RW
 | 
				
			||||||
	.writepage	= ntfs_writepage,	/* Write dirty page to disk. */
 | 
						.writepage	= ntfs_writepage,	/* Write dirty page to disk. */
 | 
				
			||||||
	.set_page_dirty	= __set_page_dirty_nobuffers,	/* Set the page dirty
 | 
						.set_page_dirty	= __set_page_dirty_nobuffers,	/* Set the page dirty
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -698,8 +698,7 @@ int ntfs_read_compressed_block(struct page *page)
 | 
				
			||||||
					"uptodate! Unplugging the disk queue "
 | 
										"uptodate! Unplugging the disk queue "
 | 
				
			||||||
					"and rescheduling.");
 | 
										"and rescheduling.");
 | 
				
			||||||
			get_bh(tbh);
 | 
								get_bh(tbh);
 | 
				
			||||||
			blk_run_address_space(mapping);
 | 
								io_schedule();
 | 
				
			||||||
			schedule();
 | 
					 | 
				
			||||||
			put_bh(tbh);
 | 
								put_bh(tbh);
 | 
				
			||||||
			if (unlikely(!buffer_uptodate(tbh)))
 | 
								if (unlikely(!buffer_uptodate(tbh)))
 | 
				
			||||||
				goto read_err;
 | 
									goto read_err;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2043,7 +2043,6 @@ const struct address_space_operations ocfs2_aops = {
 | 
				
			||||||
	.write_begin		= ocfs2_write_begin,
 | 
						.write_begin		= ocfs2_write_begin,
 | 
				
			||||||
	.write_end		= ocfs2_write_end,
 | 
						.write_end		= ocfs2_write_end,
 | 
				
			||||||
	.bmap			= ocfs2_bmap,
 | 
						.bmap			= ocfs2_bmap,
 | 
				
			||||||
	.sync_page		= block_sync_page,
 | 
					 | 
				
			||||||
	.direct_IO		= ocfs2_direct_IO,
 | 
						.direct_IO		= ocfs2_direct_IO,
 | 
				
			||||||
	.invalidatepage		= ocfs2_invalidatepage,
 | 
						.invalidatepage		= ocfs2_invalidatepage,
 | 
				
			||||||
	.releasepage		= ocfs2_releasepage,
 | 
						.releasepage		= ocfs2_releasepage,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -367,11 +367,7 @@ static inline void o2hb_bio_wait_dec(struct o2hb_bio_wait_ctxt *wc,
 | 
				
			||||||
static void o2hb_wait_on_io(struct o2hb_region *reg,
 | 
					static void o2hb_wait_on_io(struct o2hb_region *reg,
 | 
				
			||||||
			    struct o2hb_bio_wait_ctxt *wc)
 | 
								    struct o2hb_bio_wait_ctxt *wc)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct address_space *mapping = reg->hr_bdev->bd_inode->i_mapping;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	blk_run_address_space(mapping);
 | 
					 | 
				
			||||||
	o2hb_bio_wait_dec(wc, 1);
 | 
						o2hb_bio_wait_dec(wc, 1);
 | 
				
			||||||
 | 
					 | 
				
			||||||
	wait_for_completion(&wc->wc_io_complete);
 | 
						wait_for_completion(&wc->wc_io_complete);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -372,7 +372,6 @@ const struct address_space_operations omfs_aops = {
 | 
				
			||||||
	.readpages = omfs_readpages,
 | 
						.readpages = omfs_readpages,
 | 
				
			||||||
	.writepage = omfs_writepage,
 | 
						.writepage = omfs_writepage,
 | 
				
			||||||
	.writepages = omfs_writepages,
 | 
						.writepages = omfs_writepages,
 | 
				
			||||||
	.sync_page = block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin = omfs_write_begin,
 | 
						.write_begin = omfs_write_begin,
 | 
				
			||||||
	.write_end = generic_write_end,
 | 
						.write_end = generic_write_end,
 | 
				
			||||||
	.bmap = omfs_bmap,
 | 
						.bmap = omfs_bmap,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -335,7 +335,6 @@ static sector_t qnx4_bmap(struct address_space *mapping, sector_t block)
 | 
				
			||||||
static const struct address_space_operations qnx4_aops = {
 | 
					static const struct address_space_operations qnx4_aops = {
 | 
				
			||||||
	.readpage	= qnx4_readpage,
 | 
						.readpage	= qnx4_readpage,
 | 
				
			||||||
	.writepage	= qnx4_writepage,
 | 
						.writepage	= qnx4_writepage,
 | 
				
			||||||
	.sync_page	= block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin	= qnx4_write_begin,
 | 
						.write_begin	= qnx4_write_begin,
 | 
				
			||||||
	.write_end	= generic_write_end,
 | 
						.write_end	= generic_write_end,
 | 
				
			||||||
	.bmap		= qnx4_bmap
 | 
						.bmap		= qnx4_bmap
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3212,7 +3212,6 @@ const struct address_space_operations reiserfs_address_space_operations = {
 | 
				
			||||||
	.readpages = reiserfs_readpages,
 | 
						.readpages = reiserfs_readpages,
 | 
				
			||||||
	.releasepage = reiserfs_releasepage,
 | 
						.releasepage = reiserfs_releasepage,
 | 
				
			||||||
	.invalidatepage = reiserfs_invalidatepage,
 | 
						.invalidatepage = reiserfs_invalidatepage,
 | 
				
			||||||
	.sync_page = block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin = reiserfs_write_begin,
 | 
						.write_begin = reiserfs_write_begin,
 | 
				
			||||||
	.write_end = reiserfs_write_end,
 | 
						.write_end = reiserfs_write_end,
 | 
				
			||||||
	.bmap = reiserfs_aop_bmap,
 | 
						.bmap = reiserfs_aop_bmap,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -488,7 +488,6 @@ static sector_t sysv_bmap(struct address_space *mapping, sector_t block)
 | 
				
			||||||
const struct address_space_operations sysv_aops = {
 | 
					const struct address_space_operations sysv_aops = {
 | 
				
			||||||
	.readpage = sysv_readpage,
 | 
						.readpage = sysv_readpage,
 | 
				
			||||||
	.writepage = sysv_writepage,
 | 
						.writepage = sysv_writepage,
 | 
				
			||||||
	.sync_page = block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin = sysv_write_begin,
 | 
						.write_begin = sysv_write_begin,
 | 
				
			||||||
	.write_end = generic_write_end,
 | 
						.write_end = generic_write_end,
 | 
				
			||||||
	.bmap = sysv_bmap
 | 
						.bmap = sysv_bmap
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1979,7 +1979,6 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	c->bdi.name = "ubifs",
 | 
						c->bdi.name = "ubifs",
 | 
				
			||||||
	c->bdi.capabilities = BDI_CAP_MAP_COPY;
 | 
						c->bdi.capabilities = BDI_CAP_MAP_COPY;
 | 
				
			||||||
	c->bdi.unplug_io_fn = default_unplug_io_fn;
 | 
					 | 
				
			||||||
	err  = bdi_init(&c->bdi);
 | 
						err  = bdi_init(&c->bdi);
 | 
				
			||||||
	if (err)
 | 
						if (err)
 | 
				
			||||||
		goto out_close;
 | 
							goto out_close;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -98,7 +98,6 @@ static int udf_adinicb_write_end(struct file *file,
 | 
				
			||||||
const struct address_space_operations udf_adinicb_aops = {
 | 
					const struct address_space_operations udf_adinicb_aops = {
 | 
				
			||||||
	.readpage	= udf_adinicb_readpage,
 | 
						.readpage	= udf_adinicb_readpage,
 | 
				
			||||||
	.writepage	= udf_adinicb_writepage,
 | 
						.writepage	= udf_adinicb_writepage,
 | 
				
			||||||
	.sync_page	= block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin = simple_write_begin,
 | 
						.write_begin = simple_write_begin,
 | 
				
			||||||
	.write_end = udf_adinicb_write_end,
 | 
						.write_end = udf_adinicb_write_end,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -133,7 +133,6 @@ static sector_t udf_bmap(struct address_space *mapping, sector_t block)
 | 
				
			||||||
const struct address_space_operations udf_aops = {
 | 
					const struct address_space_operations udf_aops = {
 | 
				
			||||||
	.readpage	= udf_readpage,
 | 
						.readpage	= udf_readpage,
 | 
				
			||||||
	.writepage	= udf_writepage,
 | 
						.writepage	= udf_writepage,
 | 
				
			||||||
	.sync_page	= block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin		= udf_write_begin,
 | 
						.write_begin		= udf_write_begin,
 | 
				
			||||||
	.write_end		= generic_write_end,
 | 
						.write_end		= generic_write_end,
 | 
				
			||||||
	.bmap		= udf_bmap,
 | 
						.bmap		= udf_bmap,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -588,7 +588,6 @@ static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
 | 
				
			||||||
const struct address_space_operations ufs_aops = {
 | 
					const struct address_space_operations ufs_aops = {
 | 
				
			||||||
	.readpage = ufs_readpage,
 | 
						.readpage = ufs_readpage,
 | 
				
			||||||
	.writepage = ufs_writepage,
 | 
						.writepage = ufs_writepage,
 | 
				
			||||||
	.sync_page = block_sync_page,
 | 
					 | 
				
			||||||
	.write_begin = ufs_write_begin,
 | 
						.write_begin = ufs_write_begin,
 | 
				
			||||||
	.write_end = generic_write_end,
 | 
						.write_end = generic_write_end,
 | 
				
			||||||
	.bmap = ufs_bmap
 | 
						.bmap = ufs_bmap
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -481,7 +481,7 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size)
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
		if (IS_SYNC(inode) && (inode->i_state & I_DIRTY))
 | 
							if (IS_SYNC(inode) && (inode->i_state & I_DIRTY))
 | 
				
			||||||
			ufs_sync_inode (inode);
 | 
								ufs_sync_inode (inode);
 | 
				
			||||||
		blk_run_address_space(inode->i_mapping);
 | 
							blk_flush_plug(current);
 | 
				
			||||||
		yield();
 | 
							yield();
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1495,7 +1495,6 @@ const struct address_space_operations xfs_address_space_operations = {
 | 
				
			||||||
	.readpages		= xfs_vm_readpages,
 | 
						.readpages		= xfs_vm_readpages,
 | 
				
			||||||
	.writepage		= xfs_vm_writepage,
 | 
						.writepage		= xfs_vm_writepage,
 | 
				
			||||||
	.writepages		= xfs_vm_writepages,
 | 
						.writepages		= xfs_vm_writepages,
 | 
				
			||||||
	.sync_page		= block_sync_page,
 | 
					 | 
				
			||||||
	.releasepage		= xfs_vm_releasepage,
 | 
						.releasepage		= xfs_vm_releasepage,
 | 
				
			||||||
	.invalidatepage		= xfs_vm_invalidatepage,
 | 
						.invalidatepage		= xfs_vm_invalidatepage,
 | 
				
			||||||
	.write_begin		= xfs_vm_write_begin,
 | 
						.write_begin		= xfs_vm_write_begin,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
Some files were not shown because too many files have changed in this diff Show more
		Loading…
	
		Reference in a new issue