forked from mirrors/linux
		
	writeback: don't drain bdi_writeback_congested on bdi destruction
52ebea749a("writeback: make backing_dev_info host cgroup-specific bdi_writebacks") made bdi (backing_dev_info) host per-cgroup wb's (bdi_writeback's). As the congested state needs to be per-wb and referenced from blkcg side and multiple wbs, the patch made all non-root cong's (bdi_writeback_congested's) reference counted and indexed on bdi. When a bdi is destroyed, cgwb_bdi_destroy() tries to drain all non-root cong's; however, this can hang indefinitely because wb's can also be referenced from blkcg_gq's which are destroyed after bdi destruction is complete. This patch fixes the bug by updating bdi destruction to not wait for cong's to drain. A cong is unlinked from bdi->cgwb_congested_tree on bdi destuction regardless of its reference count as the bdi may go away any point after destruction. wb_congested_put() checks whether the cong is already unlinked on release. Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Jon Christopherson <jon@jons.org> Link: https://bugzilla.kernel.org/show_bug.cgi?id=100681 Fixes:52ebea749a("writeback: make backing_dev_info host cgroup-specific bdi_writebacks") Tested-by: Jon Christopherson <jon@jons.org> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
		
							parent
							
								
									a13f35e871
								
							
						
					
					
						commit
						a20135ffbc
					
				
					 1 changed files with 16 additions and 6 deletions
				
			
		| 
						 | 
					@ -425,7 +425,6 @@ wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
 | 
				
			||||||
		new_congested = NULL;
 | 
							new_congested = NULL;
 | 
				
			||||||
		rb_link_node(&congested->rb_node, parent, node);
 | 
							rb_link_node(&congested->rb_node, parent, node);
 | 
				
			||||||
		rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
 | 
							rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
 | 
				
			||||||
		atomic_inc(&bdi->usage_cnt);
 | 
					 | 
				
			||||||
		goto found;
 | 
							goto found;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -456,7 +455,6 @@ wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void wb_congested_put(struct bdi_writeback_congested *congested)
 | 
					void wb_congested_put(struct bdi_writeback_congested *congested)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct backing_dev_info *bdi = congested->bdi;
 | 
					 | 
				
			||||||
	unsigned long flags;
 | 
						unsigned long flags;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	local_irq_save(flags);
 | 
						local_irq_save(flags);
 | 
				
			||||||
| 
						 | 
					@ -465,12 +463,15 @@ void wb_congested_put(struct bdi_writeback_congested *congested)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rb_erase(&congested->rb_node, &congested->bdi->cgwb_congested_tree);
 | 
						/* bdi might already have been destroyed leaving @congested unlinked */
 | 
				
			||||||
 | 
						if (congested->bdi) {
 | 
				
			||||||
 | 
							rb_erase(&congested->rb_node,
 | 
				
			||||||
 | 
								 &congested->bdi->cgwb_congested_tree);
 | 
				
			||||||
 | 
							congested->bdi = NULL;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_unlock_irqrestore(&cgwb_lock, flags);
 | 
						spin_unlock_irqrestore(&cgwb_lock, flags);
 | 
				
			||||||
	kfree(congested);
 | 
						kfree(congested);
 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (atomic_dec_and_test(&bdi->usage_cnt))
 | 
					 | 
				
			||||||
		wake_up_all(&cgwb_release_wait);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void cgwb_release_workfn(struct work_struct *work)
 | 
					static void cgwb_release_workfn(struct work_struct *work)
 | 
				
			||||||
| 
						 | 
					@ -675,13 +676,22 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
 | 
				
			||||||
static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
 | 
					static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct radix_tree_iter iter;
 | 
						struct radix_tree_iter iter;
 | 
				
			||||||
 | 
						struct bdi_writeback_congested *congested, *congested_n;
 | 
				
			||||||
	void **slot;
 | 
						void **slot;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	WARN_ON(test_bit(WB_registered, &bdi->wb.state));
 | 
						WARN_ON(test_bit(WB_registered, &bdi->wb.state));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock_irq(&cgwb_lock);
 | 
						spin_lock_irq(&cgwb_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
 | 
						radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
 | 
				
			||||||
		cgwb_kill(*slot);
 | 
							cgwb_kill(*slot);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						rbtree_postorder_for_each_entry_safe(congested, congested_n,
 | 
				
			||||||
 | 
										&bdi->cgwb_congested_tree, rb_node) {
 | 
				
			||||||
 | 
							rb_erase(&congested->rb_node, &bdi->cgwb_congested_tree);
 | 
				
			||||||
 | 
							congested->bdi = NULL;	/* mark @congested unlinked */
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_unlock_irq(&cgwb_lock);
 | 
						spin_unlock_irq(&cgwb_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue