mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	bcache: Kill btree_io_wq
With the locking rework in the last patch, this shouldn't be needed anymore - btree_node_write_work() only takes b->write_lock which is never held for very long. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
This commit is contained in:
		
							parent
							
								
									2a285686c1
								
							
						
					
					
						commit
						56b30770b2
					
				
					 3 changed files with 2 additions and 24 deletions
				
			
		| 
						 | 
				
			
			@ -962,7 +962,5 @@ void bch_debug_exit(void);
 | 
			
		|||
int bch_debug_init(struct kobject *);
 | 
			
		||||
void bch_request_exit(void);
 | 
			
		||||
int bch_request_init(void);
 | 
			
		||||
void bch_btree_exit(void);
 | 
			
		||||
int bch_btree_init(void);
 | 
			
		||||
 | 
			
		||||
#endif /* _BCACHE_H */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -93,8 +93,6 @@
 | 
			
		|||
#define PTR_HASH(c, k)							\
 | 
			
		||||
	(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
 | 
			
		||||
 | 
			
		||||
static struct workqueue_struct *btree_io_wq;
 | 
			
		||||
 | 
			
		||||
#define insert_lock(s, b)	((b)->level <= (s)->lock)
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			@ -362,8 +360,7 @@ static void __btree_node_write_done(struct closure *cl)
 | 
			
		|||
	btree_complete_write(b, w);
 | 
			
		||||
 | 
			
		||||
	if (btree_node_dirty(b))
 | 
			
		||||
		queue_delayed_work(btree_io_wq, &b->work,
 | 
			
		||||
				   msecs_to_jiffies(30000));
 | 
			
		||||
		schedule_delayed_work(&b->work, 30 * HZ);
 | 
			
		||||
 | 
			
		||||
	closure_return_with_destructor(cl, btree_node_write_unlock);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -535,7 +532,7 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
 | 
			
		|||
	BUG_ON(!i->keys);
 | 
			
		||||
 | 
			
		||||
	if (!btree_node_dirty(b))
 | 
			
		||||
		queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
 | 
			
		||||
		schedule_delayed_work(&b->work, 30 * HZ);
 | 
			
		||||
 | 
			
		||||
	set_btree_node_dirty(b);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -2446,18 +2443,3 @@ void bch_keybuf_init(struct keybuf *buf)
 | 
			
		|||
	spin_lock_init(&buf->lock);
 | 
			
		||||
	array_allocator_init(&buf->freelist);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void bch_btree_exit(void)
 | 
			
		||||
{
 | 
			
		||||
	if (btree_io_wq)
 | 
			
		||||
		destroy_workqueue(btree_io_wq);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int __init bch_btree_init(void)
 | 
			
		||||
{
 | 
			
		||||
	btree_io_wq = create_singlethread_workqueue("bch_btree_io");
 | 
			
		||||
	if (!btree_io_wq)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2072,7 +2072,6 @@ static void bcache_exit(void)
 | 
			
		|||
{
 | 
			
		||||
	bch_debug_exit();
 | 
			
		||||
	bch_request_exit();
 | 
			
		||||
	bch_btree_exit();
 | 
			
		||||
	if (bcache_kobj)
 | 
			
		||||
		kobject_put(bcache_kobj);
 | 
			
		||||
	if (bcache_wq)
 | 
			
		||||
| 
						 | 
				
			
			@ -2102,7 +2101,6 @@ static int __init bcache_init(void)
 | 
			
		|||
	if (!(bcache_wq = create_workqueue("bcache")) ||
 | 
			
		||||
	    !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
 | 
			
		||||
	    sysfs_create_files(bcache_kobj, files) ||
 | 
			
		||||
	    bch_btree_init() ||
 | 
			
		||||
	    bch_request_init() ||
 | 
			
		||||
	    bch_debug_init(bcache_kobj))
 | 
			
		||||
		goto err;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue