mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	perf: Add pmu callbacks to track event mapping and unmapping
Signed-off-by: Andy Lutomirski <luto@amacapital.net> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Kees Cook <keescook@chromium.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Vince Weaver <vince@deater.net> Cc: "hillf.zj" <hillf.zj@alibaba-inc.com> Cc: Valdis Kletnieks <Valdis.Kletnieks@vt.edu> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/266afcba1d1f91ea5501e4e16e94bbbc1a9339b6.1414190806.git.luto@amacapital.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
							parent
							
								
									22c4bd9fa9
								
							
						
					
					
						commit
						1e0fb9ec67
					
				
					 2 changed files with 16 additions and 0 deletions
				
			
		| 
						 | 
				
			
			@ -202,6 +202,13 @@ struct pmu {
 | 
			
		|||
	 */
 | 
			
		||||
	int (*event_init)		(struct perf_event *event);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Notification that the event was mapped or unmapped.  Called
 | 
			
		||||
	 * in the context of the mapping task.
 | 
			
		||||
	 */
 | 
			
		||||
	void (*event_mapped)		(struct perf_event *event); /*optional*/
 | 
			
		||||
	void (*event_unmapped)		(struct perf_event *event); /*optional*/
 | 
			
		||||
 | 
			
		||||
#define PERF_EF_START	0x01		/* start the counter when adding    */
 | 
			
		||||
#define PERF_EF_RELOAD	0x02		/* reload the counter when starting */
 | 
			
		||||
#define PERF_EF_UPDATE	0x04		/* update the counter when stopping */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -4293,6 +4293,9 @@ static void perf_mmap_open(struct vm_area_struct *vma)
 | 
			
		|||
 | 
			
		||||
	atomic_inc(&event->mmap_count);
 | 
			
		||||
	atomic_inc(&event->rb->mmap_count);
 | 
			
		||||
 | 
			
		||||
	if (event->pmu->event_mapped)
 | 
			
		||||
		event->pmu->event_mapped(event);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			@ -4312,6 +4315,9 @@ static void perf_mmap_close(struct vm_area_struct *vma)
 | 
			
		|||
	int mmap_locked = rb->mmap_locked;
 | 
			
		||||
	unsigned long size = perf_data_size(rb);
 | 
			
		||||
 | 
			
		||||
	if (event->pmu->event_unmapped)
 | 
			
		||||
		event->pmu->event_unmapped(event);
 | 
			
		||||
 | 
			
		||||
	atomic_dec(&rb->mmap_count);
 | 
			
		||||
 | 
			
		||||
	if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
 | 
			
		||||
| 
						 | 
				
			
			@ -4513,6 +4519,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 | 
			
		|||
	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	vma->vm_ops = &perf_mmap_vmops;
 | 
			
		||||
 | 
			
		||||
	if (event->pmu->event_mapped)
 | 
			
		||||
		event->pmu->event_mapped(event);
 | 
			
		||||
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue