mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	kprobes: Fix optimize_kprobe()/unoptimize_kprobe() cancellation logic
optimize_kprobe() and unoptimize_kprobe() cancels if a given kprobe is on the optimizing_list or unoptimizing_list already. However, since the following commit:f66c0447cc("kprobes: Set unoptimized flag after unoptimizing code") modified the update timing of the KPROBE_FLAG_OPTIMIZED, it doesn't work as expected anymore. The optimized_kprobe could be in the following states: - [optimizing]: Before inserting jump instruction op.kp->flags has KPROBE_FLAG_OPTIMIZED and op->list is not empty. - [optimized]: jump inserted op.kp->flags has KPROBE_FLAG_OPTIMIZED and op->list is empty. - [unoptimizing]: Before removing jump instruction (including unused optprobe) op.kp->flags has KPROBE_FLAG_OPTIMIZED and op->list is not empty. - [unoptimized]: jump removed op.kp->flags doesn't have KPROBE_FLAG_OPTIMIZED and op->list is empty. Current code mis-expects [unoptimizing] state doesn't have KPROBE_FLAG_OPTIMIZED, and that can cause incorrect results. To fix this, introduce optprobe_queued_unopt() to distinguish [optimizing] and [unoptimizing] states and fixes the logic in optimize_kprobe() and unoptimize_kprobe(). [ mingo: Cleaned up the changelog and the code a bit. ] Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org> Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: bristot@redhat.com Fixes:f66c0447cc("kprobes: Set unoptimized flag after unoptimizing code") Link: https://lkml.kernel.org/r/157840814418.7181.13478003006386303481.stgit@devnote2 Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
							parent
							
								
									2040cf9f59
								
							
						
					
					
						commit
						e4add24778
					
				
					 1 changed files with 43 additions and 24 deletions
				
			
		| 
						 | 
				
			
			@ -612,6 +612,18 @@ void wait_for_kprobe_optimizer(void)
 | 
			
		|||
	mutex_unlock(&kprobe_mutex);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static bool optprobe_queued_unopt(struct optimized_kprobe *op)
 | 
			
		||||
{
 | 
			
		||||
	struct optimized_kprobe *_op;
 | 
			
		||||
 | 
			
		||||
	list_for_each_entry(_op, &unoptimizing_list, list) {
 | 
			
		||||
		if (op == _op)
 | 
			
		||||
			return true;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Optimize kprobe if p is ready to be optimized */
 | 
			
		||||
static void optimize_kprobe(struct kprobe *p)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -633,18 +645,22 @@ static void optimize_kprobe(struct kprobe *p)
 | 
			
		|||
		return;
 | 
			
		||||
 | 
			
		||||
	/* Check if it is already optimized. */
 | 
			
		||||
	if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
 | 
			
		||||
		return;
 | 
			
		||||
	op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
 | 
			
		||||
 | 
			
		||||
	if (!list_empty(&op->list))
 | 
			
		||||
	if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
 | 
			
		||||
		if (optprobe_queued_unopt(op)) {
 | 
			
		||||
			/* This is under unoptimizing. Just dequeue the probe */
 | 
			
		||||
			list_del_init(&op->list);
 | 
			
		||||
	else {
 | 
			
		||||
		}
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
	op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
 | 
			
		||||
 | 
			
		||||
	/* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */
 | 
			
		||||
	if (WARN_ON_ONCE(!list_empty(&op->list)))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	list_add(&op->list, &optimizing_list);
 | 
			
		||||
	kick_kprobe_optimizer();
 | 
			
		||||
}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Short cut to direct unoptimizing */
 | 
			
		||||
static void force_unoptimize_kprobe(struct optimized_kprobe *op)
 | 
			
		||||
| 
						 | 
				
			
			@ -665,30 +681,33 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
 | 
			
		|||
		return; /* This is not an optprobe nor optimized */
 | 
			
		||||
 | 
			
		||||
	op = container_of(p, struct optimized_kprobe, kp);
 | 
			
		||||
	if (!kprobe_optimized(p)) {
 | 
			
		||||
		/* Unoptimized or unoptimizing case */
 | 
			
		||||
		if (force && !list_empty(&op->list)) {
 | 
			
		||||
	if (!kprobe_optimized(p))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	if (!list_empty(&op->list)) {
 | 
			
		||||
		if (optprobe_queued_unopt(op)) {
 | 
			
		||||
			/* Queued in unoptimizing queue */
 | 
			
		||||
			if (force) {
 | 
			
		||||
				/*
 | 
			
		||||
			 * Only if this is unoptimizing kprobe and forced,
 | 
			
		||||
			 * forcibly unoptimize it. (No need to unoptimize
 | 
			
		||||
			 * unoptimized kprobe again :)
 | 
			
		||||
				 * Forcibly unoptimize the kprobe here, and queue it
 | 
			
		||||
				 * in the freeing list for release afterwards.
 | 
			
		||||
				 */
 | 
			
		||||
			list_del_init(&op->list);
 | 
			
		||||
				force_unoptimize_kprobe(op);
 | 
			
		||||
				list_move(&op->list, &freeing_list);
 | 
			
		||||
			}
 | 
			
		||||
		} else {
 | 
			
		||||
			/* Dequeue from the optimizing queue */
 | 
			
		||||
			list_del_init(&op->list);
 | 
			
		||||
			op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
 | 
			
		||||
		}
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!list_empty(&op->list)) {
 | 
			
		||||
		/* Dequeue from the optimization queue */
 | 
			
		||||
		list_del_init(&op->list);
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
	/* Optimized kprobe case */
 | 
			
		||||
	if (force)
 | 
			
		||||
	if (force) {
 | 
			
		||||
		/* Forcibly update the code: this is a special case */
 | 
			
		||||
		force_unoptimize_kprobe(op);
 | 
			
		||||
	else {
 | 
			
		||||
	} else {
 | 
			
		||||
		list_add(&op->list, &unoptimizing_list);
 | 
			
		||||
		kick_kprobe_optimizer();
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue