mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	perf/core: Fix WARN in perf_cgroup_switch()
There may be concurrency between perf_cgroup_switch and
perf_cgroup_event_disable. Consider the following scenario: after a new
perf cgroup event is created on CPU0, the new event may not trigger
a reprogramming, causing ctx->is_active to be 0. In this case, when CPU1
disables this perf event, it executes __perf_remove_from_context->
list _del_event->perf_cgroup_event_disable on CPU1, which causes a race
with perf_cgroup_switch running on CPU0.
The following describes the details of this concurrency scenario:
CPU0						CPU1
perf_cgroup_switch:
   ...
   # cpuctx->cgrp is not NULL here
   if (READ_ONCE(cpuctx->cgrp) == NULL)
   	return;
						perf_remove_from_context:
						   ...
						   raw_spin_lock_irq(&ctx->lock);
						   ...
						   # ctx->is_active == 0 because reprogramm is not
						   # tigger, so CPU1 can do __perf_remove_from_context
						   # for CPU0
						   __perf_remove_from_context:
						         perf_cgroup_event_disable:
							    ...
							    if (--ctx->nr_cgroups)
							    ...
   # this warning will happened because CPU1 changed
   # ctx.nr_cgroups to 0.
   WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
[peterz: use guard instead of goto unlock]
Fixes: db4a835601 ("perf/core: Set cgroup in CPU contexts for new cgroup events")
Signed-off-by: Luo Gengkun <luogengkun@huaweicloud.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20250604033924.3914647-3-luogengkun@huaweicloud.com
			
			
This commit is contained in:
		
							parent
							
								
									3b7a34aebb
								
							
						
					
					
						commit
						3172fb9866
					
				
					 1 changed files with 20 additions and 2 deletions
				
			
		| 
						 | 
					@ -207,6 +207,19 @@ static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
 | 
				
			||||||
	__perf_ctx_unlock(&cpuctx->ctx);
 | 
						__perf_ctx_unlock(&cpuctx->ctx);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					typedef struct {
 | 
				
			||||||
 | 
						struct perf_cpu_context *cpuctx;
 | 
				
			||||||
 | 
						struct perf_event_context *ctx;
 | 
				
			||||||
 | 
					} class_perf_ctx_lock_t;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void class_perf_ctx_lock_destructor(class_perf_ctx_lock_t *_T)
 | 
				
			||||||
 | 
					{ perf_ctx_unlock(_T->cpuctx, _T->ctx); }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline class_perf_ctx_lock_t
 | 
				
			||||||
 | 
					class_perf_ctx_lock_constructor(struct perf_cpu_context *cpuctx,
 | 
				
			||||||
 | 
									struct perf_event_context *ctx)
 | 
				
			||||||
 | 
					{ perf_ctx_lock(cpuctx, ctx); return (class_perf_ctx_lock_t){ cpuctx, ctx }; }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define TASK_TOMBSTONE ((void *)-1L)
 | 
					#define TASK_TOMBSTONE ((void *)-1L)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static bool is_kernel_event(struct perf_event *event)
 | 
					static bool is_kernel_event(struct perf_event *event)
 | 
				
			||||||
| 
						 | 
					@ -944,7 +957,13 @@ static void perf_cgroup_switch(struct task_struct *task)
 | 
				
			||||||
	if (READ_ONCE(cpuctx->cgrp) == cgrp)
 | 
						if (READ_ONCE(cpuctx->cgrp) == cgrp)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	perf_ctx_lock(cpuctx, cpuctx->task_ctx);
 | 
						guard(perf_ctx_lock)(cpuctx, cpuctx->task_ctx);
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Re-check, could've raced vs perf_remove_from_context().
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if (READ_ONCE(cpuctx->cgrp) == NULL)
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	perf_ctx_disable(&cpuctx->ctx, true);
 | 
						perf_ctx_disable(&cpuctx->ctx, true);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ctx_sched_out(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
 | 
						ctx_sched_out(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
 | 
				
			||||||
| 
						 | 
					@ -962,7 +981,6 @@ static void perf_cgroup_switch(struct task_struct *task)
 | 
				
			||||||
	ctx_sched_in(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
 | 
						ctx_sched_in(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	perf_ctx_enable(&cpuctx->ctx, true);
 | 
						perf_ctx_enable(&cpuctx->ctx, true);
 | 
				
			||||||
	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int perf_cgroup_ensure_storage(struct perf_event *event,
 | 
					static int perf_cgroup_ensure_storage(struct perf_event *event,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue