mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	perf: Add a few assertions
While auditing 6b959ba22d ("perf/core: Fix reentry problem in
perf_output_read_group()") a few spots were found that wanted
assertions.
Notable for_each_sibling_event() relies on exclusion from
modification. This would normally be holding either ctx->lock or
ctx->mutex, however due to how things are constructed disabling IRQs
is a valid and sufficient substitute for ctx->lock.
Another possible site to add assertions would be the various
pmu::{add,del,read,..}() methods, but that's not trivially expressable
in C -- the best option is wrappers, but those are easy enough to
forget.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
			
			
This commit is contained in:
		
							parent
							
								
									88081cfb69
								
							
						
					
					
						commit
						f3c0eba287
					
				
					 2 changed files with 19 additions and 0 deletions
				
			
		| 
						 | 
				
			
			@ -61,6 +61,7 @@ struct perf_guest_info_callbacks {
 | 
			
		|||
#include <linux/refcount.h>
 | 
			
		||||
#include <linux/security.h>
 | 
			
		||||
#include <linux/static_call.h>
 | 
			
		||||
#include <linux/lockdep.h>
 | 
			
		||||
#include <asm/local.h>
 | 
			
		||||
 | 
			
		||||
struct perf_callchain_entry {
 | 
			
		||||
| 
						 | 
				
			
			@ -634,7 +635,23 @@ struct pmu_event_list {
 | 
			
		|||
	struct list_head	list;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * event->sibling_list is modified whole holding both ctx->lock and ctx->mutex
 | 
			
		||||
 * as such iteration must hold either lock. However, since ctx->lock is an IRQ
 | 
			
		||||
 * safe lock, and is only held by the CPU doing the modification, having IRQs
 | 
			
		||||
 * disabled is sufficient since it will hold-off the IPIs.
 | 
			
		||||
 */
 | 
			
		||||
#ifdef CONFIG_PROVE_LOCKING
 | 
			
		||||
#define lockdep_assert_event_ctx(event)				\
 | 
			
		||||
	WARN_ON_ONCE(__lockdep_enabled &&			\
 | 
			
		||||
		     (this_cpu_read(hardirqs_enabled) ||	\
 | 
			
		||||
		      lockdep_is_held(&(event)->ctx->mutex) != LOCK_STATE_HELD))
 | 
			
		||||
#else
 | 
			
		||||
#define lockdep_assert_event_ctx(event)
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#define for_each_sibling_event(sibling, event)			\
 | 
			
		||||
	lockdep_assert_event_ctx(event);			\
 | 
			
		||||
	if ((event)->group_leader == (event))			\
 | 
			
		||||
		list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1468,6 +1468,8 @@ static void __update_context_time(struct perf_event_context *ctx, bool adv)
 | 
			
		|||
{
 | 
			
		||||
	u64 now = perf_clock();
 | 
			
		||||
 | 
			
		||||
	lockdep_assert_held(&ctx->lock);
 | 
			
		||||
 | 
			
		||||
	if (adv)
 | 
			
		||||
		ctx->time += now - ctx->timestamp;
 | 
			
		||||
	ctx->timestamp = now;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue