mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	kprobes: fix wait_for_kprobe_optimizer()
wait_for_kprobe_optimizer() seems largely broken. It uses optimizer_comp which is never re-initialized, so wait_for_kprobe_optimizer() will never wait for anything once kprobe_optimizer() finishes all pending jobs for the first time. Also, aside from completion, delayed_work_pending() is %false once kprobe_optimizer() starts execution and wait_for_kprobe_optimizer() won't wait for it. Reimplement it so that it flushes optimizing_work until [un]optimizing_lists are empty. Note that this also makes optimizing_work execute immediately if someone's waiting for it, which is the nicer behavior. Only compile tested. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: "David S. Miller" <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									7c99e0bf86
								
							
						
					
					
						commit
						ad72b3bea7
					
				
					 1 changed files with 15 additions and 8 deletions
				
			
		| 
						 | 
				
			
			@ -471,7 +471,6 @@ static LIST_HEAD(unoptimizing_list);
 | 
			
		|||
 | 
			
		||||
static void kprobe_optimizer(struct work_struct *work);
 | 
			
		||||
static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
 | 
			
		||||
static DECLARE_COMPLETION(optimizer_comp);
 | 
			
		||||
#define OPTIMIZE_DELAY 5
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			@ -552,7 +551,6 @@ static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list)
 | 
			
		|||
/* Start optimizer after OPTIMIZE_DELAY passed */
 | 
			
		||||
static __kprobes void kick_kprobe_optimizer(void)
 | 
			
		||||
{
 | 
			
		||||
	if (!delayed_work_pending(&optimizing_work))
 | 
			
		||||
	schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -592,16 +590,25 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
 | 
			
		|||
	/* Step 5: Kick optimizer again if needed */
 | 
			
		||||
	if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
 | 
			
		||||
		kick_kprobe_optimizer();
 | 
			
		||||
	else
 | 
			
		||||
		/* Wake up all waiters */
 | 
			
		||||
		complete_all(&optimizer_comp);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Wait for completing optimization and unoptimization */
 | 
			
		||||
static __kprobes void wait_for_kprobe_optimizer(void)
 | 
			
		||||
{
 | 
			
		||||
	if (delayed_work_pending(&optimizing_work))
 | 
			
		||||
		wait_for_completion(&optimizer_comp);
 | 
			
		||||
	mutex_lock(&kprobe_mutex);
 | 
			
		||||
 | 
			
		||||
	while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
 | 
			
		||||
		mutex_unlock(&kprobe_mutex);
 | 
			
		||||
 | 
			
		||||
		/* this will also make optimizing_work execute immmediately */
 | 
			
		||||
		flush_delayed_work(&optimizing_work);
 | 
			
		||||
		/* @optimizing_work might not have been queued yet, relax */
 | 
			
		||||
		cpu_relax();
 | 
			
		||||
 | 
			
		||||
		mutex_lock(&kprobe_mutex);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	mutex_unlock(&kprobe_mutex);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Optimize kprobe if p is ready to be optimized */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue