forked from mirrors/linux
		
	ext4: remove metadata reservation checks
Commit 27dd438542 ("ext4: introduce reserved space") reserves 2% of
the file system space to make sure metadata allocations will always
succeed.  Given that, tracking the reservation of metadata blocks is
no longer necessary.
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
			
			
This commit is contained in:
		
							parent
							
								
									d5e03cbb0c
								
							
						
					
					
						commit
						71d4f7d032
					
				
					 5 changed files with 7 additions and 141 deletions
				
			
		| 
						 | 
				
			
			@ -639,7 +639,6 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
 | 
			
		|||
	if (!(*errp) &&
 | 
			
		||||
	    ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
 | 
			
		||||
		spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
 | 
			
		||||
		EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
 | 
			
		||||
		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 | 
			
		||||
		dquot_alloc_block_nofail(inode,
 | 
			
		||||
				EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -591,7 +591,6 @@ enum {
 | 
			
		|||
#define EXT4_FREE_BLOCKS_NO_QUOT_UPDATE	0x0008
 | 
			
		||||
#define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER	0x0010
 | 
			
		||||
#define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER	0x0020
 | 
			
		||||
#define EXT4_FREE_BLOCKS_RESERVE		0x0040
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * ioctl commands
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1808,8 +1808,7 @@ static void ext4_ext_try_to_merge_up(handle_t *handle,
 | 
			
		|||
 | 
			
		||||
	brelse(path[1].p_bh);
 | 
			
		||||
	ext4_free_blocks(handle, inode, NULL, blk, 1,
 | 
			
		||||
			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET |
 | 
			
		||||
			 EXT4_FREE_BLOCKS_RESERVE);
 | 
			
		||||
			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										128
									
								
								fs/ext4/inode.c
									
									
									
									
									
								
							
							
						
						
									
										128
									
								
								fs/ext4/inode.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -324,18 +324,6 @@ qsize_t *ext4_get_reserved_space(struct inode *inode)
 | 
			
		|||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Calculate the number of metadata blocks need to reserve
 | 
			
		||||
 * to allocate a block located at @lblock
 | 
			
		||||
 */
 | 
			
		||||
static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
 | 
			
		||||
{
 | 
			
		||||
	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
 | 
			
		||||
		return ext4_ext_calc_metadata_amount(inode, lblock);
 | 
			
		||||
 | 
			
		||||
	return ext4_ind_calc_metadata_amount(inode, lblock);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Called with i_data_sem down, which is important since we can call
 | 
			
		||||
 * ext4_discard_preallocations() from here.
 | 
			
		||||
| 
						 | 
				
			
			@ -357,35 +345,10 @@ void ext4_da_update_reserve_space(struct inode *inode,
 | 
			
		|||
		used = ei->i_reserved_data_blocks;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
 | 
			
		||||
		ext4_warning(inode->i_sb, "ino %lu, allocated %d "
 | 
			
		||||
			"with only %d reserved metadata blocks "
 | 
			
		||||
			"(releasing %d blocks with reserved %d data blocks)",
 | 
			
		||||
			inode->i_ino, ei->i_allocated_meta_blocks,
 | 
			
		||||
			     ei->i_reserved_meta_blocks, used,
 | 
			
		||||
			     ei->i_reserved_data_blocks);
 | 
			
		||||
		WARN_ON(1);
 | 
			
		||||
		ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Update per-inode reservations */
 | 
			
		||||
	ei->i_reserved_data_blocks -= used;
 | 
			
		||||
	ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
 | 
			
		||||
	percpu_counter_sub(&sbi->s_dirtyclusters_counter,
 | 
			
		||||
			   used + ei->i_allocated_meta_blocks);
 | 
			
		||||
	ei->i_allocated_meta_blocks = 0;
 | 
			
		||||
	percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
 | 
			
		||||
 | 
			
		||||
	if (ei->i_reserved_data_blocks == 0) {
 | 
			
		||||
		/*
 | 
			
		||||
		 * We can release all of the reserved metadata blocks
 | 
			
		||||
		 * only when we have written all of the delayed
 | 
			
		||||
		 * allocation blocks.
 | 
			
		||||
		 */
 | 
			
		||||
		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
 | 
			
		||||
				   ei->i_reserved_meta_blocks);
 | 
			
		||||
		ei->i_reserved_meta_blocks = 0;
 | 
			
		||||
		ei->i_da_metadata_calc_len = 0;
 | 
			
		||||
	}
 | 
			
		||||
	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 | 
			
		||||
 | 
			
		||||
	/* Update quota subsystem for data blocks */
 | 
			
		||||
| 
						 | 
				
			
			@ -1221,49 +1184,6 @@ static int ext4_journalled_write_end(struct file *file,
 | 
			
		|||
	return ret ? ret : copied;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Reserve a metadata for a single block located at lblock
 | 
			
		||||
 */
 | 
			
		||||
static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
 | 
			
		||||
{
 | 
			
		||||
	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 | 
			
		||||
	struct ext4_inode_info *ei = EXT4_I(inode);
 | 
			
		||||
	unsigned int md_needed;
 | 
			
		||||
	ext4_lblk_t save_last_lblock;
 | 
			
		||||
	int save_len;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * recalculate the amount of metadata blocks to reserve
 | 
			
		||||
	 * in order to allocate nrblocks
 | 
			
		||||
	 * worse case is one extent per block
 | 
			
		||||
	 */
 | 
			
		||||
	spin_lock(&ei->i_block_reservation_lock);
 | 
			
		||||
	/*
 | 
			
		||||
	 * ext4_calc_metadata_amount() has side effects, which we have
 | 
			
		||||
	 * to be prepared undo if we fail to claim space.
 | 
			
		||||
	 */
 | 
			
		||||
	save_len = ei->i_da_metadata_calc_len;
 | 
			
		||||
	save_last_lblock = ei->i_da_metadata_calc_last_lblock;
 | 
			
		||||
	md_needed = EXT4_NUM_B2C(sbi,
 | 
			
		||||
				 ext4_calc_metadata_amount(inode, lblock));
 | 
			
		||||
	trace_ext4_da_reserve_space(inode, md_needed);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * We do still charge estimated metadata to the sb though;
 | 
			
		||||
	 * we cannot afford to run out of free blocks.
 | 
			
		||||
	 */
 | 
			
		||||
	if (ext4_claim_free_clusters(sbi, md_needed, 0)) {
 | 
			
		||||
		ei->i_da_metadata_calc_len = save_len;
 | 
			
		||||
		ei->i_da_metadata_calc_last_lblock = save_last_lblock;
 | 
			
		||||
		spin_unlock(&ei->i_block_reservation_lock);
 | 
			
		||||
		return -ENOSPC;
 | 
			
		||||
	}
 | 
			
		||||
	ei->i_reserved_meta_blocks += md_needed;
 | 
			
		||||
	spin_unlock(&ei->i_block_reservation_lock);
 | 
			
		||||
 | 
			
		||||
	return 0;       /* success */
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Reserve a single cluster located at lblock
 | 
			
		||||
 */
 | 
			
		||||
| 
						 | 
				
			
			@ -1273,8 +1193,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
 | 
			
		|||
	struct ext4_inode_info *ei = EXT4_I(inode);
 | 
			
		||||
	unsigned int md_needed;
 | 
			
		||||
	int ret;
 | 
			
		||||
	ext4_lblk_t save_last_lblock;
 | 
			
		||||
	int save_len;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * We will charge metadata quota at writeout time; this saves
 | 
			
		||||
| 
						 | 
				
			
			@ -1295,25 +1213,15 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
 | 
			
		|||
	 * ext4_calc_metadata_amount() has side effects, which we have
 | 
			
		||||
	 * to be prepared undo if we fail to claim space.
 | 
			
		||||
	 */
 | 
			
		||||
	save_len = ei->i_da_metadata_calc_len;
 | 
			
		||||
	save_last_lblock = ei->i_da_metadata_calc_last_lblock;
 | 
			
		||||
	md_needed = EXT4_NUM_B2C(sbi,
 | 
			
		||||
				 ext4_calc_metadata_amount(inode, lblock));
 | 
			
		||||
	trace_ext4_da_reserve_space(inode, md_needed);
 | 
			
		||||
	md_needed = 0;
 | 
			
		||||
	trace_ext4_da_reserve_space(inode, 0);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * We do still charge estimated metadata to the sb though;
 | 
			
		||||
	 * we cannot afford to run out of free blocks.
 | 
			
		||||
	 */
 | 
			
		||||
	if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
 | 
			
		||||
		ei->i_da_metadata_calc_len = save_len;
 | 
			
		||||
		ei->i_da_metadata_calc_last_lblock = save_last_lblock;
 | 
			
		||||
	if (ext4_claim_free_clusters(sbi, 1, 0)) {
 | 
			
		||||
		spin_unlock(&ei->i_block_reservation_lock);
 | 
			
		||||
		dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
 | 
			
		||||
		return -ENOSPC;
 | 
			
		||||
	}
 | 
			
		||||
	ei->i_reserved_data_blocks++;
 | 
			
		||||
	ei->i_reserved_meta_blocks += md_needed;
 | 
			
		||||
	spin_unlock(&ei->i_block_reservation_lock);
 | 
			
		||||
 | 
			
		||||
	return 0;       /* success */
 | 
			
		||||
| 
						 | 
				
			
			@ -1346,20 +1254,6 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
 | 
			
		|||
	}
 | 
			
		||||
	ei->i_reserved_data_blocks -= to_free;
 | 
			
		||||
 | 
			
		||||
	if (ei->i_reserved_data_blocks == 0) {
 | 
			
		||||
		/*
 | 
			
		||||
		 * We can release all of the reserved metadata blocks
 | 
			
		||||
		 * only when we have written all of the delayed
 | 
			
		||||
		 * allocation blocks.
 | 
			
		||||
		 * Note that in case of bigalloc, i_reserved_meta_blocks,
 | 
			
		||||
		 * i_reserved_data_blocks, etc. refer to number of clusters.
 | 
			
		||||
		 */
 | 
			
		||||
		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
 | 
			
		||||
				   ei->i_reserved_meta_blocks);
 | 
			
		||||
		ei->i_reserved_meta_blocks = 0;
 | 
			
		||||
		ei->i_da_metadata_calc_len = 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* update fs dirty data blocks counter */
 | 
			
		||||
	percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1500,10 +1394,6 @@ static void ext4_print_free_blocks(struct inode *inode)
 | 
			
		|||
	ext4_msg(sb, KERN_CRIT, "Block reservation details");
 | 
			
		||||
	ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
 | 
			
		||||
		 ei->i_reserved_data_blocks);
 | 
			
		||||
	ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u",
 | 
			
		||||
	       ei->i_reserved_meta_blocks);
 | 
			
		||||
	ext4_msg(sb, KERN_CRIT, "i_allocated_meta_blocks=%u",
 | 
			
		||||
	       ei->i_allocated_meta_blocks);
 | 
			
		||||
	return;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1620,13 +1510,6 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
 | 
			
		|||
				retval = ret;
 | 
			
		||||
				goto out_unlock;
 | 
			
		||||
			}
 | 
			
		||||
		} else {
 | 
			
		||||
			ret = ext4_da_reserve_metadata(inode, iblock);
 | 
			
		||||
			if (ret) {
 | 
			
		||||
				/* not enough space to reserve */
 | 
			
		||||
				retval = ret;
 | 
			
		||||
				goto out_unlock;
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
 | 
			
		||||
| 
						 | 
				
			
			@ -2843,8 +2726,7 @@ int ext4_alloc_da_blocks(struct inode *inode)
 | 
			
		|||
{
 | 
			
		||||
	trace_ext4_alloc_da_blocks(inode);
 | 
			
		||||
 | 
			
		||||
	if (!EXT4_I(inode)->i_reserved_data_blocks &&
 | 
			
		||||
	    !EXT4_I(inode)->i_reserved_meta_blocks)
 | 
			
		||||
	if (!EXT4_I(inode)->i_reserved_data_blocks)
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -4627,7 +4627,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
 | 
			
		|||
	struct buffer_head *gd_bh;
 | 
			
		||||
	ext4_group_t block_group;
 | 
			
		||||
	struct ext4_sb_info *sbi;
 | 
			
		||||
	struct ext4_inode_info *ei = EXT4_I(inode);
 | 
			
		||||
	struct ext4_buddy e4b;
 | 
			
		||||
	unsigned int count_clusters;
 | 
			
		||||
	int err = 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -4838,19 +4837,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
 | 
			
		|||
			     &sbi->s_flex_groups[flex_group].free_clusters);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (flags & EXT4_FREE_BLOCKS_RESERVE && ei->i_reserved_data_blocks) {
 | 
			
		||||
		percpu_counter_add(&sbi->s_dirtyclusters_counter,
 | 
			
		||||
				   count_clusters);
 | 
			
		||||
		spin_lock(&ei->i_block_reservation_lock);
 | 
			
		||||
		if (flags & EXT4_FREE_BLOCKS_METADATA)
 | 
			
		||||
			ei->i_reserved_meta_blocks += count_clusters;
 | 
			
		||||
		else
 | 
			
		||||
			ei->i_reserved_data_blocks += count_clusters;
 | 
			
		||||
		spin_unlock(&ei->i_block_reservation_lock);
 | 
			
		||||
	if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
 | 
			
		||||
			dquot_reclaim_block(inode,
 | 
			
		||||
					EXT4_C2B(sbi, count_clusters));
 | 
			
		||||
	} else if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
 | 
			
		||||
		dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
 | 
			
		||||
	percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue