forked from mirrors/linux
		
	block: document blk-plug
Thus spake Andrew Morton: "And I have the usual maintainability whine. If someone comes up to vmscan.c and sees it calling blk_start_plug(), how are they supposed to work out why that call is there? They go look at the blk_start_plug() definition and it is undocumented. I think we can do better than this?" Adapted from the LWN article - http://lwn.net/Articles/438256/ by Jens Axboe and from an earlier attempt by Shaohua Li to document blk-plug. [akpm@linux-foundation.org: grammatical and spelling tweaks] Signed-off-by: Suresh Jayaraman <sjayaraman@suse.de> Cc: Shaohua Li <shaohua.li@intel.com> Cc: Jonathan Corbet <corbet@lwn.net> Signed-off-by: Andrew Morton <akpm@google.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									27a84d54c0
								
							
						
					
					
						commit
						75df713627
					
				
					 2 changed files with 29 additions and 9 deletions
				
			
		| 
						 | 
					@ -2595,6 +2595,20 @@ EXPORT_SYMBOL(kblockd_schedule_delayed_work);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define PLUG_MAGIC	0x91827364
 | 
					#define PLUG_MAGIC	0x91827364
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * blk_start_plug - initialize blk_plug and track it inside the task_struct
 | 
				
			||||||
 | 
					 * @plug:	The &struct blk_plug that needs to be initialized
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Description:
 | 
				
			||||||
 | 
					 *   Tracking blk_plug inside the task_struct will help with auto-flushing the
 | 
				
			||||||
 | 
					 *   pending I/O should the task end up blocking between blk_start_plug() and
 | 
				
			||||||
 | 
					 *   blk_finish_plug(). This is important from a performance perspective, but
 | 
				
			||||||
 | 
					 *   also ensures that we don't deadlock. For instance, if the task is blocking
 | 
				
			||||||
 | 
					 *   for a memory allocation, memory reclaim could end up wanting to free a
 | 
				
			||||||
 | 
					 *   page belonging to that request that is currently residing in our private
 | 
				
			||||||
 | 
					 *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
 | 
				
			||||||
 | 
					 *   this kind of deadlock.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
void blk_start_plug(struct blk_plug *plug)
 | 
					void blk_start_plug(struct blk_plug *plug)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct task_struct *tsk = current;
 | 
						struct task_struct *tsk = current;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -860,17 +860,23 @@ struct request_queue *blk_alloc_queue_node(gfp_t, int);
 | 
				
			||||||
extern void blk_put_queue(struct request_queue *);
 | 
					extern void blk_put_queue(struct request_queue *);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Note: Code in between changing the blk_plug list/cb_list or element of such
 | 
					 * blk_plug permits building a queue of related requests by holding the I/O
 | 
				
			||||||
 * lists is preemptable, but such code can't do sleep (or be very careful),
 | 
					 * fragments for a short period. This allows merging of sequential requests
 | 
				
			||||||
 * otherwise data is corrupted. For details, please check schedule() where
 | 
					 * into single larger request. As the requests are moved from a per-task list to
 | 
				
			||||||
 * blk_schedule_flush_plug() is called.
 | 
					 * the device's request_queue in a batch, this results in improved scalability
 | 
				
			||||||
 | 
					 * as the lock contention for request_queue lock is reduced.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * It is ok not to disable preemption when adding the request to the plug list
 | 
				
			||||||
 | 
					 * or when attempting a merge, because blk_schedule_flush_list() will only flush
 | 
				
			||||||
 | 
					 * the plug list when the task sleeps by itself. For details, please see
 | 
				
			||||||
 | 
					 * schedule() where blk_schedule_flush_plug() is called.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
struct blk_plug {
 | 
					struct blk_plug {
 | 
				
			||||||
	unsigned long magic;
 | 
						unsigned long magic; /* detect uninitialized use-cases */
 | 
				
			||||||
	struct list_head list;
 | 
						struct list_head list; /* requests */
 | 
				
			||||||
	struct list_head cb_list;
 | 
						struct list_head cb_list; /* md requires an unplug callback */
 | 
				
			||||||
	unsigned int should_sort;
 | 
						unsigned int should_sort; /* list to be sorted before flushing? */
 | 
				
			||||||
	unsigned int count;
 | 
						unsigned int count; /* number of queued requests */
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
#define BLK_MAX_REQUEST_COUNT 16
 | 
					#define BLK_MAX_REQUEST_COUNT 16
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue