forked from mirrors/linux
		
	perf: Introduce perf_pmu_migrate_context()
Originally from Peter Zijlstra. The helper migrates perf events from one cpu to another cpu. Signed-off-by: Zheng Yan <zheng.z.yan@intel.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1339741902-8449-5-git-send-email-zheng.z.yan@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
							parent
							
								
									e2d37cd213
								
							
						
					
					
						commit
						0cda4c0231
					
				
					 2 changed files with 38 additions and 0 deletions
				
			
		| 
						 | 
					@ -1107,6 +1107,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
 | 
				
			||||||
				struct task_struct *task,
 | 
									struct task_struct *task,
 | 
				
			||||||
				perf_overflow_handler_t callback,
 | 
									perf_overflow_handler_t callback,
 | 
				
			||||||
				void *context);
 | 
									void *context);
 | 
				
			||||||
 | 
					extern void perf_pmu_migrate_context(struct pmu *pmu,
 | 
				
			||||||
 | 
									int src_cpu, int dst_cpu);
 | 
				
			||||||
extern u64 perf_event_read_value(struct perf_event *event,
 | 
					extern u64 perf_event_read_value(struct perf_event *event,
 | 
				
			||||||
				 u64 *enabled, u64 *running);
 | 
									 u64 *enabled, u64 *running);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1645,6 +1645,8 @@ perf_install_in_context(struct perf_event_context *ctx,
 | 
				
			||||||
	lockdep_assert_held(&ctx->mutex);
 | 
						lockdep_assert_held(&ctx->mutex);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	event->ctx = ctx;
 | 
						event->ctx = ctx;
 | 
				
			||||||
 | 
						if (event->cpu != -1)
 | 
				
			||||||
 | 
							event->cpu = cpu;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!task) {
 | 
						if (!task) {
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
| 
						 | 
					@ -6379,6 +6381,7 @@ SYSCALL_DEFINE5(perf_event_open,
 | 
				
			||||||
	mutex_lock(&ctx->mutex);
 | 
						mutex_lock(&ctx->mutex);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (move_group) {
 | 
						if (move_group) {
 | 
				
			||||||
 | 
							synchronize_rcu();
 | 
				
			||||||
		perf_install_in_context(ctx, group_leader, event->cpu);
 | 
							perf_install_in_context(ctx, group_leader, event->cpu);
 | 
				
			||||||
		get_ctx(ctx);
 | 
							get_ctx(ctx);
 | 
				
			||||||
		list_for_each_entry(sibling, &group_leader->sibling_list,
 | 
							list_for_each_entry(sibling, &group_leader->sibling_list,
 | 
				
			||||||
| 
						 | 
					@ -6484,6 +6487,39 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
 | 
					EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct perf_event_context *src_ctx;
 | 
				
			||||||
 | 
						struct perf_event_context *dst_ctx;
 | 
				
			||||||
 | 
						struct perf_event *event, *tmp;
 | 
				
			||||||
 | 
						LIST_HEAD(events);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
 | 
				
			||||||
 | 
						dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mutex_lock(&src_ctx->mutex);
 | 
				
			||||||
 | 
						list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
 | 
				
			||||||
 | 
									 event_entry) {
 | 
				
			||||||
 | 
							perf_remove_from_context(event);
 | 
				
			||||||
 | 
							put_ctx(src_ctx);
 | 
				
			||||||
 | 
							list_add(&event->event_entry, &events);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						mutex_unlock(&src_ctx->mutex);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						synchronize_rcu();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mutex_lock(&dst_ctx->mutex);
 | 
				
			||||||
 | 
						list_for_each_entry_safe(event, tmp, &events, event_entry) {
 | 
				
			||||||
 | 
							list_del(&event->event_entry);
 | 
				
			||||||
 | 
							if (event->state >= PERF_EVENT_STATE_OFF)
 | 
				
			||||||
 | 
								event->state = PERF_EVENT_STATE_INACTIVE;
 | 
				
			||||||
 | 
							perf_install_in_context(dst_ctx, event, dst_cpu);
 | 
				
			||||||
 | 
							get_ctx(dst_ctx);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						mutex_unlock(&dst_ctx->mutex);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void sync_child_event(struct perf_event *child_event,
 | 
					static void sync_child_event(struct perf_event *child_event,
 | 
				
			||||||
			       struct task_struct *child)
 | 
								       struct task_struct *child)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue