mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	writeback: move nr_pages == 0 logic to one location
Now that we have no external callers of wb_start_writeback(), we can shuffle the passing in of 'nr_pages'. Everybody passes in 0 at this point, so just kill the argument and move the dirty count retrieval to that function. Acked-by: Johannes Weiner <hannes@cmpxchg.org> Tested-by: Chris Mason <clm@fb.com> Reviewed-by: Jan Kara <jack@suse.cz> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									9dfb176fae
								
							
						
					
					
						commit
						e8e8a0c6c9
					
				
					 1 changed files with 17 additions and 24 deletions
				
			
		|  | @ -933,8 +933,18 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi, | ||||||
| 
 | 
 | ||||||
| #endif	/* CONFIG_CGROUP_WRITEBACK */ | #endif	/* CONFIG_CGROUP_WRITEBACK */ | ||||||
| 
 | 
 | ||||||
| static void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, | /*
 | ||||||
| 				enum wb_reason reason) |  * Add in the number of potentially dirty inodes, because each inode | ||||||
|  |  * write can dirty pagecache in the underlying blockdev. | ||||||
|  |  */ | ||||||
|  | static unsigned long get_nr_dirty_pages(void) | ||||||
|  | { | ||||||
|  | 	return global_node_page_state(NR_FILE_DIRTY) + | ||||||
|  | 		global_node_page_state(NR_UNSTABLE_NFS) + | ||||||
|  | 		get_nr_dirty_inodes(); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason) | ||||||
| { | { | ||||||
| 	struct wb_writeback_work *work; | 	struct wb_writeback_work *work; | ||||||
| 
 | 
 | ||||||
|  | @ -954,7 +964,7 @@ static void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	work->sync_mode	= WB_SYNC_NONE; | 	work->sync_mode	= WB_SYNC_NONE; | ||||||
| 	work->nr_pages	= nr_pages; | 	work->nr_pages	= wb_split_bdi_pages(wb, get_nr_dirty_pages()); | ||||||
| 	work->range_cyclic = 1; | 	work->range_cyclic = 1; | ||||||
| 	work->reason	= reason; | 	work->reason	= reason; | ||||||
| 	work->auto_free	= 1; | 	work->auto_free	= 1; | ||||||
|  | @ -1814,17 +1824,6 @@ static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb) | ||||||
| 	return work; | 	return work; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 |  | ||||||
|  * Add in the number of potentially dirty inodes, because each inode |  | ||||||
|  * write can dirty pagecache in the underlying blockdev. |  | ||||||
|  */ |  | ||||||
| static unsigned long get_nr_dirty_pages(void) |  | ||||||
| { |  | ||||||
| 	return global_node_page_state(NR_FILE_DIRTY) + |  | ||||||
| 		global_node_page_state(NR_UNSTABLE_NFS) + |  | ||||||
| 		get_nr_dirty_inodes(); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static long wb_check_background_flush(struct bdi_writeback *wb) | static long wb_check_background_flush(struct bdi_writeback *wb) | ||||||
| { | { | ||||||
| 	if (wb_over_bg_thresh(wb)) { | 	if (wb_over_bg_thresh(wb)) { | ||||||
|  | @ -1951,7 +1950,7 @@ void wb_workfn(struct work_struct *work) | ||||||
|  * write back the whole world. |  * write back the whole world. | ||||||
|  */ |  */ | ||||||
| static void __wakeup_flusher_threads_bdi(struct backing_dev_info *bdi, | static void __wakeup_flusher_threads_bdi(struct backing_dev_info *bdi, | ||||||
| 					 long nr_pages, enum wb_reason reason) | 					 enum wb_reason reason) | ||||||
| { | { | ||||||
| 	struct bdi_writeback *wb; | 	struct bdi_writeback *wb; | ||||||
| 
 | 
 | ||||||
|  | @ -1959,17 +1958,14 @@ static void __wakeup_flusher_threads_bdi(struct backing_dev_info *bdi, | ||||||
| 		return; | 		return; | ||||||
| 
 | 
 | ||||||
| 	list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) | 	list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) | ||||||
| 		wb_start_writeback(wb, wb_split_bdi_pages(wb, nr_pages), | 		wb_start_writeback(wb, reason); | ||||||
| 					reason); |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi, | void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi, | ||||||
| 				enum wb_reason reason) | 				enum wb_reason reason) | ||||||
| { | { | ||||||
| 	long nr_pages = get_nr_dirty_pages(); |  | ||||||
| 
 |  | ||||||
| 	rcu_read_lock(); | 	rcu_read_lock(); | ||||||
| 	__wakeup_flusher_threads_bdi(bdi, nr_pages, reason); | 	__wakeup_flusher_threads_bdi(bdi, reason); | ||||||
| 	rcu_read_unlock(); | 	rcu_read_unlock(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -1979,7 +1975,6 @@ void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi, | ||||||
| void wakeup_flusher_threads(enum wb_reason reason) | void wakeup_flusher_threads(enum wb_reason reason) | ||||||
| { | { | ||||||
| 	struct backing_dev_info *bdi; | 	struct backing_dev_info *bdi; | ||||||
| 	long nr_pages; |  | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * If we are expecting writeback progress we must submit plugged IO. | 	 * If we are expecting writeback progress we must submit plugged IO. | ||||||
|  | @ -1987,11 +1982,9 @@ void wakeup_flusher_threads(enum wb_reason reason) | ||||||
| 	if (blk_needs_flush_plug(current)) | 	if (blk_needs_flush_plug(current)) | ||||||
| 		blk_schedule_flush_plug(current); | 		blk_schedule_flush_plug(current); | ||||||
| 
 | 
 | ||||||
| 	nr_pages = get_nr_dirty_pages(); |  | ||||||
| 
 |  | ||||||
| 	rcu_read_lock(); | 	rcu_read_lock(); | ||||||
| 	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) | 	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) | ||||||
| 		__wakeup_flusher_threads_bdi(bdi, nr_pages, reason); | 		__wakeup_flusher_threads_bdi(bdi, reason); | ||||||
| 	rcu_read_unlock(); | 	rcu_read_unlock(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Jens Axboe
						Jens Axboe