forked from mirrors/linux
		
	mm/damon: simplify stop mechanism
A kernel thread can exit gracefully with kthread_stop(). So we don't need a new flag 'kdamond_stop'. And to make sure the task struct is not freed when accessing it, get reference to it before termination. Link: https://lkml.kernel.org/r/20211027130517.4404-1-changbin.du@gmail.com Signed-off-by: Changbin Du <changbin.du@gmail.com> Reviewed-by: SeongJae Park <sj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									0d16cfd46b
								
							
						
					
					
						commit
						0f91d13366
					
				
					 2 changed files with 15 additions and 37 deletions
				
			
		|  | @ -381,7 +381,6 @@ struct damon_ctx { | ||||||
| 
 | 
 | ||||||
| /* public: */ | /* public: */ | ||||||
| 	struct task_struct *kdamond; | 	struct task_struct *kdamond; | ||||||
| 	bool kdamond_stop; |  | ||||||
| 	struct mutex kdamond_lock; | 	struct mutex kdamond_lock; | ||||||
| 
 | 
 | ||||||
| 	struct damon_primitive primitive; | 	struct damon_primitive primitive; | ||||||
|  |  | ||||||
|  | @ -390,17 +390,6 @@ static unsigned long damon_region_sz_limit(struct damon_ctx *ctx) | ||||||
| 	return sz; | 	return sz; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static bool damon_kdamond_running(struct damon_ctx *ctx) |  | ||||||
| { |  | ||||||
| 	bool running; |  | ||||||
| 
 |  | ||||||
| 	mutex_lock(&ctx->kdamond_lock); |  | ||||||
| 	running = ctx->kdamond != NULL; |  | ||||||
| 	mutex_unlock(&ctx->kdamond_lock); |  | ||||||
| 
 |  | ||||||
| 	return running; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static int kdamond_fn(void *data); | static int kdamond_fn(void *data); | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  | @ -418,7 +407,6 @@ static int __damon_start(struct damon_ctx *ctx) | ||||||
| 	mutex_lock(&ctx->kdamond_lock); | 	mutex_lock(&ctx->kdamond_lock); | ||||||
| 	if (!ctx->kdamond) { | 	if (!ctx->kdamond) { | ||||||
| 		err = 0; | 		err = 0; | ||||||
| 		ctx->kdamond_stop = false; |  | ||||||
| 		ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d", | 		ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d", | ||||||
| 				nr_running_ctxs); | 				nr_running_ctxs); | ||||||
| 		if (IS_ERR(ctx->kdamond)) { | 		if (IS_ERR(ctx->kdamond)) { | ||||||
|  | @ -474,13 +462,15 @@ int damon_start(struct damon_ctx **ctxs, int nr_ctxs) | ||||||
|  */ |  */ | ||||||
| static int __damon_stop(struct damon_ctx *ctx) | static int __damon_stop(struct damon_ctx *ctx) | ||||||
| { | { | ||||||
|  | 	struct task_struct *tsk; | ||||||
|  | 
 | ||||||
| 	mutex_lock(&ctx->kdamond_lock); | 	mutex_lock(&ctx->kdamond_lock); | ||||||
| 	if (ctx->kdamond) { | 	tsk = ctx->kdamond; | ||||||
| 		ctx->kdamond_stop = true; | 	if (tsk) { | ||||||
|  | 		get_task_struct(tsk); | ||||||
| 		mutex_unlock(&ctx->kdamond_lock); | 		mutex_unlock(&ctx->kdamond_lock); | ||||||
| 		while (damon_kdamond_running(ctx)) | 		kthread_stop(tsk); | ||||||
| 			usleep_range(ctx->sample_interval, | 		put_task_struct(tsk); | ||||||
| 					ctx->sample_interval * 2); |  | ||||||
| 		return 0; | 		return 0; | ||||||
| 	} | 	} | ||||||
| 	mutex_unlock(&ctx->kdamond_lock); | 	mutex_unlock(&ctx->kdamond_lock); | ||||||
|  | @ -925,12 +915,8 @@ static bool kdamond_need_update_primitive(struct damon_ctx *ctx) | ||||||
| static bool kdamond_need_stop(struct damon_ctx *ctx) | static bool kdamond_need_stop(struct damon_ctx *ctx) | ||||||
| { | { | ||||||
| 	struct damon_target *t; | 	struct damon_target *t; | ||||||
| 	bool stop; |  | ||||||
| 
 | 
 | ||||||
| 	mutex_lock(&ctx->kdamond_lock); | 	if (kthread_should_stop()) | ||||||
| 	stop = ctx->kdamond_stop; |  | ||||||
| 	mutex_unlock(&ctx->kdamond_lock); |  | ||||||
| 	if (stop) |  | ||||||
| 		return true; | 		return true; | ||||||
| 
 | 
 | ||||||
| 	if (!ctx->primitive.target_valid) | 	if (!ctx->primitive.target_valid) | ||||||
|  | @ -1021,13 +1007,6 @@ static int kdamond_wait_activation(struct damon_ctx *ctx) | ||||||
| 	return -EBUSY; | 	return -EBUSY; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void set_kdamond_stop(struct damon_ctx *ctx) |  | ||||||
| { |  | ||||||
| 	mutex_lock(&ctx->kdamond_lock); |  | ||||||
| 	ctx->kdamond_stop = true; |  | ||||||
| 	mutex_unlock(&ctx->kdamond_lock); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /*
 | /*
 | ||||||
|  * The monitoring daemon that runs as a kernel thread |  * The monitoring daemon that runs as a kernel thread | ||||||
|  */ |  */ | ||||||
|  | @ -1038,17 +1017,18 @@ static int kdamond_fn(void *data) | ||||||
| 	struct damon_region *r, *next; | 	struct damon_region *r, *next; | ||||||
| 	unsigned int max_nr_accesses = 0; | 	unsigned int max_nr_accesses = 0; | ||||||
| 	unsigned long sz_limit = 0; | 	unsigned long sz_limit = 0; | ||||||
|  | 	bool done = false; | ||||||
| 
 | 
 | ||||||
| 	pr_debug("kdamond (%d) starts\n", current->pid); | 	pr_debug("kdamond (%d) starts\n", current->pid); | ||||||
| 
 | 
 | ||||||
| 	if (ctx->primitive.init) | 	if (ctx->primitive.init) | ||||||
| 		ctx->primitive.init(ctx); | 		ctx->primitive.init(ctx); | ||||||
| 	if (ctx->callback.before_start && ctx->callback.before_start(ctx)) | 	if (ctx->callback.before_start && ctx->callback.before_start(ctx)) | ||||||
| 		set_kdamond_stop(ctx); | 		done = true; | ||||||
| 
 | 
 | ||||||
| 	sz_limit = damon_region_sz_limit(ctx); | 	sz_limit = damon_region_sz_limit(ctx); | ||||||
| 
 | 
 | ||||||
| 	while (!kdamond_need_stop(ctx)) { | 	while (!kdamond_need_stop(ctx) && !done) { | ||||||
| 		if (kdamond_wait_activation(ctx)) | 		if (kdamond_wait_activation(ctx)) | ||||||
| 			continue; | 			continue; | ||||||
| 
 | 
 | ||||||
|  | @ -1056,7 +1036,7 @@ static int kdamond_fn(void *data) | ||||||
| 			ctx->primitive.prepare_access_checks(ctx); | 			ctx->primitive.prepare_access_checks(ctx); | ||||||
| 		if (ctx->callback.after_sampling && | 		if (ctx->callback.after_sampling && | ||||||
| 				ctx->callback.after_sampling(ctx)) | 				ctx->callback.after_sampling(ctx)) | ||||||
| 			set_kdamond_stop(ctx); | 			done = true; | ||||||
| 
 | 
 | ||||||
| 		usleep_range(ctx->sample_interval, ctx->sample_interval + 1); | 		usleep_range(ctx->sample_interval, ctx->sample_interval + 1); | ||||||
| 
 | 
 | ||||||
|  | @ -1069,7 +1049,7 @@ static int kdamond_fn(void *data) | ||||||
| 					sz_limit); | 					sz_limit); | ||||||
| 			if (ctx->callback.after_aggregation && | 			if (ctx->callback.after_aggregation && | ||||||
| 					ctx->callback.after_aggregation(ctx)) | 					ctx->callback.after_aggregation(ctx)) | ||||||
| 				set_kdamond_stop(ctx); | 				done = true; | ||||||
| 			kdamond_apply_schemes(ctx); | 			kdamond_apply_schemes(ctx); | ||||||
| 			kdamond_reset_aggregated(ctx); | 			kdamond_reset_aggregated(ctx); | ||||||
| 			kdamond_split_regions(ctx); | 			kdamond_split_regions(ctx); | ||||||
|  | @ -1088,9 +1068,8 @@ static int kdamond_fn(void *data) | ||||||
| 			damon_destroy_region(r, t); | 			damon_destroy_region(r, t); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (ctx->callback.before_terminate && | 	if (ctx->callback.before_terminate) | ||||||
| 			ctx->callback.before_terminate(ctx)) | 		ctx->callback.before_terminate(ctx); | ||||||
| 		set_kdamond_stop(ctx); |  | ||||||
| 	if (ctx->primitive.cleanup) | 	if (ctx->primitive.cleanup) | ||||||
| 		ctx->primitive.cleanup(ctx); | 		ctx->primitive.cleanup(ctx); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Changbin Du
						Changbin Du