mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 00:28:52 +02:00 
			
		
		
		
	writeback: Protect inode->i_io_list with inode->i_lock
Currently, operations on inode->i_io_list are protected by wb->list_lock. In the following patches we'll need to maintain consistency between inode->i_state and inode->i_io_list so change the code so that inode->i_lock protects also all inode's i_io_list handling. Reviewed-by: Martijn Coenen <maco@android.com> Reviewed-by: Christoph Hellwig <hch@lst.de> CC: stable@vger.kernel.org # Prerequisite for "writeback: Avoid skipping inode writeback" Signed-off-by: Jan Kara <jack@suse.cz>
This commit is contained in:
		
							parent
							
								
									5b14671be5
								
							
						
					
					
						commit
						b35250c081
					
				
					 1 changed files with 17 additions and 5 deletions
				
			
		|  | @ -144,6 +144,7 @@ static void inode_io_list_del_locked(struct inode *inode, | |||
| 				     struct bdi_writeback *wb) | ||||
| { | ||||
| 	assert_spin_locked(&wb->list_lock); | ||||
| 	assert_spin_locked(&inode->i_lock); | ||||
| 
 | ||||
| 	list_del_init(&inode->i_io_list); | ||||
| 	wb_io_lists_depopulated(wb); | ||||
|  | @ -1122,7 +1123,9 @@ void inode_io_list_del(struct inode *inode) | |||
| 	struct bdi_writeback *wb; | ||||
| 
 | ||||
| 	wb = inode_to_wb_and_lock_list(inode); | ||||
| 	spin_lock(&inode->i_lock); | ||||
| 	inode_io_list_del_locked(inode, wb); | ||||
| 	spin_unlock(&inode->i_lock); | ||||
| 	spin_unlock(&wb->list_lock); | ||||
| } | ||||
| EXPORT_SYMBOL(inode_io_list_del); | ||||
|  | @ -1172,8 +1175,10 @@ void sb_clear_inode_writeback(struct inode *inode) | |||
|  * the case then the inode must have been redirtied while it was being written | ||||
|  * out and we don't reset its dirtied_when. | ||||
|  */ | ||||
| static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) | ||||
| static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb) | ||||
| { | ||||
| 	assert_spin_locked(&inode->i_lock); | ||||
| 
 | ||||
| 	if (!list_empty(&wb->b_dirty)) { | ||||
| 		struct inode *tail; | ||||
| 
 | ||||
|  | @ -1184,6 +1189,13 @@ static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) | |||
| 	inode_io_list_move_locked(inode, wb, &wb->b_dirty); | ||||
| } | ||||
| 
 | ||||
| static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) | ||||
| { | ||||
| 	spin_lock(&inode->i_lock); | ||||
| 	redirty_tail_locked(inode, wb); | ||||
| 	spin_unlock(&inode->i_lock); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * requeue inode for re-scanning after bdi->b_io list is exhausted. | ||||
|  */ | ||||
|  | @ -1394,7 +1406,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, | |||
| 		 * writeback is not making progress due to locked | ||||
| 		 * buffers. Skip this inode for now. | ||||
| 		 */ | ||||
| 		redirty_tail(inode, wb); | ||||
| 		redirty_tail_locked(inode, wb); | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
|  | @ -1414,7 +1426,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, | |||
| 			 * retrying writeback of the dirty page/inode | ||||
| 			 * that cannot be performed immediately. | ||||
| 			 */ | ||||
| 			redirty_tail(inode, wb); | ||||
| 			redirty_tail_locked(inode, wb); | ||||
| 		} | ||||
| 	} else if (inode->i_state & I_DIRTY) { | ||||
| 		/*
 | ||||
|  | @ -1422,7 +1434,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, | |||
| 		 * such as delayed allocation during submission or metadata | ||||
| 		 * updates after data IO completion. | ||||
| 		 */ | ||||
| 		redirty_tail(inode, wb); | ||||
| 		redirty_tail_locked(inode, wb); | ||||
| 	} else if (inode->i_state & I_DIRTY_TIME) { | ||||
| 		inode->dirtied_when = jiffies; | ||||
| 		inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); | ||||
|  | @ -1669,8 +1681,8 @@ static long writeback_sb_inodes(struct super_block *sb, | |||
| 		 */ | ||||
| 		spin_lock(&inode->i_lock); | ||||
| 		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { | ||||
| 			redirty_tail_locked(inode, wb); | ||||
| 			spin_unlock(&inode->i_lock); | ||||
| 			redirty_tail(inode, wb); | ||||
| 			continue; | ||||
| 		} | ||||
| 		if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Jan Kara
						Jan Kara