forked from mirrors/linux
		
	bcache: Make gc wakeup sane, remove set_task_state()
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
This commit is contained in:
		
							parent
							
								
									59331c215d
								
							
						
					
					
						commit
						be628be095
					
				
					 5 changed files with 27 additions and 27 deletions
				
			
		| 
						 | 
					@ -425,7 +425,7 @@ struct cache {
 | 
				
			||||||
	 * until a gc finishes - otherwise we could pointlessly burn a ton of
 | 
						 * until a gc finishes - otherwise we could pointlessly burn a ton of
 | 
				
			||||||
	 * cpu
 | 
						 * cpu
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	unsigned		invalidate_needs_gc:1;
 | 
						unsigned		invalidate_needs_gc;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	bool			discard; /* Get rid of? */
 | 
						bool			discard; /* Get rid of? */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -593,8 +593,8 @@ struct cache_set {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Counts how many sectors bio_insert has added to the cache */
 | 
						/* Counts how many sectors bio_insert has added to the cache */
 | 
				
			||||||
	atomic_t		sectors_to_gc;
 | 
						atomic_t		sectors_to_gc;
 | 
				
			||||||
 | 
						wait_queue_head_t	gc_wait;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	wait_queue_head_t	moving_gc_wait;
 | 
					 | 
				
			||||||
	struct keybuf		moving_gc_keys;
 | 
						struct keybuf		moving_gc_keys;
 | 
				
			||||||
	/* Number of moving GC bios in flight */
 | 
						/* Number of moving GC bios in flight */
 | 
				
			||||||
	struct semaphore	moving_in_flight;
 | 
						struct semaphore	moving_in_flight;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1757,32 +1757,34 @@ static void bch_btree_gc(struct cache_set *c)
 | 
				
			||||||
	bch_moving_gc(c);
 | 
						bch_moving_gc(c);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int bch_gc_thread(void *arg)
 | 
					static bool gc_should_run(struct cache_set *c)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct cache_set *c = arg;
 | 
					 | 
				
			||||||
	struct cache *ca;
 | 
						struct cache *ca;
 | 
				
			||||||
	unsigned i;
 | 
						unsigned i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	while (1) {
 | 
						for_each_cache(ca, c, i)
 | 
				
			||||||
again:
 | 
							if (ca->invalidate_needs_gc)
 | 
				
			||||||
		bch_btree_gc(c);
 | 
								return true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (atomic_read(&c->sectors_to_gc) < 0)
 | 
				
			||||||
 | 
							return true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return false;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int bch_gc_thread(void *arg)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct cache_set *c = arg;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						while (1) {
 | 
				
			||||||
 | 
							wait_event_interruptible(c->gc_wait,
 | 
				
			||||||
 | 
								   kthread_should_stop() || gc_should_run(c));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		set_current_state(TASK_INTERRUPTIBLE);
 | 
					 | 
				
			||||||
		if (kthread_should_stop())
 | 
							if (kthread_should_stop())
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		mutex_lock(&c->bucket_lock);
 | 
							set_gc_sectors(c);
 | 
				
			||||||
 | 
							bch_btree_gc(c);
 | 
				
			||||||
		for_each_cache(ca, c, i)
 | 
					 | 
				
			||||||
			if (ca->invalidate_needs_gc) {
 | 
					 | 
				
			||||||
				mutex_unlock(&c->bucket_lock);
 | 
					 | 
				
			||||||
				set_current_state(TASK_RUNNING);
 | 
					 | 
				
			||||||
				goto again;
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		mutex_unlock(&c->bucket_lock);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		schedule();
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
| 
						 | 
					@ -1790,11 +1792,10 @@ static int bch_gc_thread(void *arg)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int bch_gc_thread_start(struct cache_set *c)
 | 
					int bch_gc_thread_start(struct cache_set *c)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
 | 
						c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
 | 
				
			||||||
	if (IS_ERR(c->gc_thread))
 | 
						if (IS_ERR(c->gc_thread))
 | 
				
			||||||
		return PTR_ERR(c->gc_thread);
 | 
							return PTR_ERR(c->gc_thread);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
 | 
					 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -260,8 +260,7 @@ void bch_initial_mark_key(struct cache_set *, int, struct bkey *);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void wake_up_gc(struct cache_set *c)
 | 
					static inline void wake_up_gc(struct cache_set *c)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (c->gc_thread)
 | 
						wake_up(&c->gc_wait);
 | 
				
			||||||
		wake_up_process(c->gc_thread);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define MAP_DONE	0
 | 
					#define MAP_DONE	0
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -196,10 +196,8 @@ static void bch_data_insert_start(struct closure *cl)
 | 
				
			||||||
	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 | 
						struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 | 
				
			||||||
	struct bio *bio = op->bio, *n;
 | 
						struct bio *bio = op->bio, *n;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
 | 
						if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
 | 
				
			||||||
		set_gc_sectors(op->c);
 | 
					 | 
				
			||||||
		wake_up_gc(op->c);
 | 
							wake_up_gc(op->c);
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (op->bypass)
 | 
						if (op->bypass)
 | 
				
			||||||
		return bch_data_invalidate(cl);
 | 
							return bch_data_invalidate(cl);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1489,6 +1489,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
 | 
				
			||||||
	mutex_init(&c->bucket_lock);
 | 
						mutex_init(&c->bucket_lock);
 | 
				
			||||||
	init_waitqueue_head(&c->btree_cache_wait);
 | 
						init_waitqueue_head(&c->btree_cache_wait);
 | 
				
			||||||
	init_waitqueue_head(&c->bucket_wait);
 | 
						init_waitqueue_head(&c->bucket_wait);
 | 
				
			||||||
 | 
						init_waitqueue_head(&c->gc_wait);
 | 
				
			||||||
	sema_init(&c->uuid_write_mutex, 1);
 | 
						sema_init(&c->uuid_write_mutex, 1);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock_init(&c->btree_gc_time.lock);
 | 
						spin_lock_init(&c->btree_gc_time.lock);
 | 
				
			||||||
| 
						 | 
					@ -1548,6 +1549,7 @@ static void run_cache_set(struct cache_set *c)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for_each_cache(ca, c, i)
 | 
						for_each_cache(ca, c, i)
 | 
				
			||||||
		c->nbuckets += ca->sb.nbuckets;
 | 
							c->nbuckets += ca->sb.nbuckets;
 | 
				
			||||||
 | 
						set_gc_sectors(c);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (CACHE_SYNC(&c->sb)) {
 | 
						if (CACHE_SYNC(&c->sb)) {
 | 
				
			||||||
		LIST_HEAD(journal);
 | 
							LIST_HEAD(journal);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue