forked from mirrors/linux
		
	remove bdi_congested() and wb_congested() and related functions
These functions are no longer useful as no BDIs report congestions any more. Removing the test on bdi_write_contested() in current_may_throttle() could cause a small change in behaviour, but only when PF_LOCAL_THROTTLE is set. So replace the calls by 'false' and simplify the code - and remove the functions. [akpm@linux-foundation.org: fix build] Link: https://lkml.kernel.org/r/164549983742.9187.2570198746005819592.stgit@noble.brown Signed-off-by: NeilBrown <neilb@suse.de> Acked-by: Ryusuke Konishi <konishi.ryusuke@gmail.com> [nilfs] Cc: Anna Schumaker <Anna.Schumaker@Netapp.com> Cc: Chao Yu <chao@kernel.org> Cc: Darrick J. Wong <djwong@kernel.org> Cc: Ilya Dryomov <idryomov@gmail.com> Cc: Jaegeuk Kim <jaegeuk@kernel.org> Cc: Jan Kara <jack@suse.cz> Cc: Jeff Layton <jlayton@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Lars Ellenberg <lars.ellenberg@linbit.com> Cc: Miklos Szeredi <miklos@szeredi.hu> Cc: Paolo Valente <paolo.valente@linaro.org> Cc: Philipp Reisner <philipp.reisner@linbit.com> Cc: Trond Myklebust <trond.myklebust@hammerspace.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									fe55d563d4
								
							
						
					
					
						commit
						b9b1335e64
					
				
					 7 changed files with 2 additions and 58 deletions
				
			
		| 
						 | 
					@ -638,9 +638,6 @@ enum {
 | 
				
			||||||
	STATE_SENT,		/* Do not change state/UUIDs while this is set */
 | 
						STATE_SENT,		/* Do not change state/UUIDs while this is set */
 | 
				
			||||||
	CALLBACK_PENDING,	/* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
 | 
						CALLBACK_PENDING,	/* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
 | 
				
			||||||
				 * pending, from drbd worker context.
 | 
									 * pending, from drbd worker context.
 | 
				
			||||||
				 * If set, bdi_write_congested() returns true,
 | 
					 | 
				
			||||||
				 * so shrink_page_list() would not recurse into,
 | 
					 | 
				
			||||||
				 * and potentially deadlock on, this drbd worker.
 | 
					 | 
				
			||||||
				 */
 | 
									 */
 | 
				
			||||||
	DISCONNECT_SENT,
 | 
						DISCONNECT_SENT,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -909,8 +909,7 @@ static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t se
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	switch (rbm) {
 | 
						switch (rbm) {
 | 
				
			||||||
	case RB_CONGESTED_REMOTE:
 | 
						case RB_CONGESTED_REMOTE:
 | 
				
			||||||
		return bdi_read_congested(
 | 
							return 0;
 | 
				
			||||||
			device->ldev->backing_bdev->bd_disk->bdi);
 | 
					 | 
				
			||||||
	case RB_LEAST_PENDING:
 | 
						case RB_LEAST_PENDING:
 | 
				
			||||||
		return atomic_read(&device->local_cnt) >
 | 
							return atomic_read(&device->local_cnt) >
 | 
				
			||||||
			atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
 | 
								atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -170,11 +170,6 @@ static void ext2_preread_inode(struct inode *inode)
 | 
				
			||||||
	unsigned long offset;
 | 
						unsigned long offset;
 | 
				
			||||||
	unsigned long block;
 | 
						unsigned long block;
 | 
				
			||||||
	struct ext2_group_desc * gdp;
 | 
						struct ext2_group_desc * gdp;
 | 
				
			||||||
	struct backing_dev_info *bdi;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	bdi = inode_to_bdi(inode);
 | 
					 | 
				
			||||||
	if (bdi_rw_congested(bdi))
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	block_group = (inode->i_ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
 | 
						block_group = (inode->i_ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
 | 
				
			||||||
	gdp = ext2_get_group_desc(inode->i_sb, block_group, NULL);
 | 
						gdp = ext2_get_group_desc(inode->i_sb, block_group, NULL);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -341,18 +341,6 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
 | 
				
			||||||
				   int mode_flags)
 | 
									   int mode_flags)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct bio *bio = wi->bio;
 | 
						struct bio *bio = wi->bio;
 | 
				
			||||||
	int err;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (segbuf->sb_nbio > 0 &&
 | 
					 | 
				
			||||||
	    bdi_write_congested(segbuf->sb_super->s_bdi)) {
 | 
					 | 
				
			||||||
		wait_for_completion(&segbuf->sb_bio_event);
 | 
					 | 
				
			||||||
		segbuf->sb_nbio--;
 | 
					 | 
				
			||||||
		if (unlikely(atomic_read(&segbuf->sb_err))) {
 | 
					 | 
				
			||||||
			bio_put(bio);
 | 
					 | 
				
			||||||
			err = -EIO;
 | 
					 | 
				
			||||||
			goto failed;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	bio->bi_end_io = nilfs_end_bio_write;
 | 
						bio->bi_end_io = nilfs_end_bio_write;
 | 
				
			||||||
	bio->bi_private = segbuf;
 | 
						bio->bi_private = segbuf;
 | 
				
			||||||
| 
						 | 
					@ -365,10 +353,6 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
 | 
				
			||||||
	wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
 | 
						wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
 | 
				
			||||||
	wi->start = wi->end;
 | 
						wi->start = wi->end;
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
 | 
					 | 
				
			||||||
 failed:
 | 
					 | 
				
			||||||
	wi->bio = NULL;
 | 
					 | 
				
			||||||
	return err;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -843,9 +843,6 @@ xfs_buf_readahead_map(
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct xfs_buf		*bp;
 | 
						struct xfs_buf		*bp;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (bdi_read_congested(target->bt_bdev->bd_disk->bdi))
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	xfs_buf_read_map(target, map, nmaps,
 | 
						xfs_buf_read_map(target, map, nmaps,
 | 
				
			||||||
		     XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD, &bp, ops,
 | 
							     XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD, &bp, ops,
 | 
				
			||||||
		     __this_address);
 | 
							     __this_address);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -135,11 +135,6 @@ static inline bool writeback_in_progress(struct bdi_writeback *wb)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct backing_dev_info *inode_to_bdi(struct inode *inode);
 | 
					struct backing_dev_info *inode_to_bdi(struct inode *inode);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return wb->congested & cong_bits;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
long congestion_wait(int sync, long timeout);
 | 
					long congestion_wait(int sync, long timeout);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline bool mapping_can_writeback(struct address_space *mapping)
 | 
					static inline bool mapping_can_writeback(struct address_space *mapping)
 | 
				
			||||||
| 
						 | 
					@ -391,27 +386,6 @@ static inline void wb_blkcg_offline(struct blkcg *blkcg)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif	/* CONFIG_CGROUP_WRITEBACK */
 | 
					#endif	/* CONFIG_CGROUP_WRITEBACK */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return wb_congested(&bdi->wb, cong_bits);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline int bdi_read_congested(struct backing_dev_info *bdi)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return bdi_congested(bdi, 1 << WB_sync_congested);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline int bdi_write_congested(struct backing_dev_info *bdi)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return bdi_congested(bdi, 1 << WB_async_congested);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline int bdi_rw_congested(struct backing_dev_info *bdi)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return bdi_congested(bdi, (1 << WB_sync_congested) |
 | 
					 | 
				
			||||||
				  (1 << WB_async_congested));
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
const char *bdi_dev_name(struct backing_dev_info *bdi);
 | 
					const char *bdi_dev_name(struct backing_dev_info *bdi);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif	/* _LINUX_BACKING_DEV_H */
 | 
					#endif	/* _LINUX_BACKING_DEV_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2364,9 +2364,7 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static int current_may_throttle(void)
 | 
					static int current_may_throttle(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return !(current->flags & PF_LOCAL_THROTTLE) ||
 | 
						return !(current->flags & PF_LOCAL_THROTTLE);
 | 
				
			||||||
		current->backing_dev_info == NULL ||
 | 
					 | 
				
			||||||
		bdi_write_congested(current->backing_dev_info);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue