mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	Merge branch 'perf/urgent' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
						commit
						d652f4bbca
					
				
					 3 changed files with 63 additions and 19 deletions
				
			
		| 
						 | 
					@ -2101,8 +2101,8 @@ static int x86_pmu_event_init(struct perf_event *event)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void refresh_pce(void *ignored)
 | 
					static void refresh_pce(void *ignored)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (current->mm)
 | 
						if (current->active_mm)
 | 
				
			||||||
		load_mm_cr4(current->mm);
 | 
							load_mm_cr4(current->active_mm);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void x86_pmu_event_mapped(struct perf_event *event)
 | 
					static void x86_pmu_event_mapped(struct perf_event *event)
 | 
				
			||||||
| 
						 | 
					@ -2110,6 +2110,18 @@ static void x86_pmu_event_mapped(struct perf_event *event)
 | 
				
			||||||
	if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
 | 
						if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * This function relies on not being called concurrently in two
 | 
				
			||||||
 | 
						 * tasks in the same mm.  Otherwise one task could observe
 | 
				
			||||||
 | 
						 * perf_rdpmc_allowed > 1 and return all the way back to
 | 
				
			||||||
 | 
						 * userspace with CR4.PCE clear while another task is still
 | 
				
			||||||
 | 
						 * doing on_each_cpu_mask() to propagate CR4.PCE.
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 * For now, this can't happen because all callers hold mmap_sem
 | 
				
			||||||
 | 
						 * for write.  If this changes, we'll need a different solution.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						lockdep_assert_held_exclusive(¤t->mm->mmap_sem);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1)
 | 
						if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1)
 | 
				
			||||||
		on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
 | 
							on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -4261,7 +4261,7 @@ int perf_event_release_kernel(struct perf_event *event)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	raw_spin_lock_irq(&ctx->lock);
 | 
						raw_spin_lock_irq(&ctx->lock);
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Mark this even as STATE_DEAD, there is no external reference to it
 | 
						 * Mark this event as STATE_DEAD, there is no external reference to it
 | 
				
			||||||
	 * anymore.
 | 
						 * anymore.
 | 
				
			||||||
	 *
 | 
						 *
 | 
				
			||||||
	 * Anybody acquiring event->child_mutex after the below loop _must_
 | 
						 * Anybody acquiring event->child_mutex after the below loop _must_
 | 
				
			||||||
| 
						 | 
					@ -10556,21 +10556,22 @@ void perf_event_free_task(struct task_struct *task)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		mutex_lock(&ctx->mutex);
 | 
							mutex_lock(&ctx->mutex);
 | 
				
			||||||
again:
 | 
							raw_spin_lock_irq(&ctx->lock);
 | 
				
			||||||
		list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
 | 
							/*
 | 
				
			||||||
				group_entry)
 | 
							 * Destroy the task <-> ctx relation and mark the context dead.
 | 
				
			||||||
			perf_free_event(event, ctx);
 | 
							 *
 | 
				
			||||||
 | 
							 * This is important because even though the task hasn't been
 | 
				
			||||||
 | 
							 * exposed yet the context has been (through child_list).
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
 | 
							RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL);
 | 
				
			||||||
 | 
							WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
 | 
				
			||||||
 | 
							put_task_struct(task); /* cannot be last */
 | 
				
			||||||
 | 
							raw_spin_unlock_irq(&ctx->lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
 | 
							list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry)
 | 
				
			||||||
				group_entry)
 | 
					 | 
				
			||||||
			perf_free_event(event, ctx);
 | 
								perf_free_event(event, ctx);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!list_empty(&ctx->pinned_groups) ||
 | 
					 | 
				
			||||||
				!list_empty(&ctx->flexible_groups))
 | 
					 | 
				
			||||||
			goto again;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		mutex_unlock(&ctx->mutex);
 | 
							mutex_unlock(&ctx->mutex);
 | 
				
			||||||
 | 
					 | 
				
			||||||
		put_ctx(ctx);
 | 
							put_ctx(ctx);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -10608,7 +10609,12 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * inherit a event from parent task to child task:
 | 
					 * Inherit a event from parent task to child task.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Returns:
 | 
				
			||||||
 | 
					 *  - valid pointer on success
 | 
				
			||||||
 | 
					 *  - NULL for orphaned events
 | 
				
			||||||
 | 
					 *  - IS_ERR() on error
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static struct perf_event *
 | 
					static struct perf_event *
 | 
				
			||||||
inherit_event(struct perf_event *parent_event,
 | 
					inherit_event(struct perf_event *parent_event,
 | 
				
			||||||
| 
						 | 
					@ -10702,6 +10708,16 @@ inherit_event(struct perf_event *parent_event,
 | 
				
			||||||
	return child_event;
 | 
						return child_event;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Inherits an event group.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * This will quietly suppress orphaned events; !inherit_event() is not an error.
 | 
				
			||||||
 | 
					 * This matches with perf_event_release_kernel() removing all child events.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Returns:
 | 
				
			||||||
 | 
					 *  - 0 on success
 | 
				
			||||||
 | 
					 *  - <0 on error
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
static int inherit_group(struct perf_event *parent_event,
 | 
					static int inherit_group(struct perf_event *parent_event,
 | 
				
			||||||
	      struct task_struct *parent,
 | 
						      struct task_struct *parent,
 | 
				
			||||||
	      struct perf_event_context *parent_ctx,
 | 
						      struct perf_event_context *parent_ctx,
 | 
				
			||||||
| 
						 | 
					@ -10716,6 +10732,11 @@ static int inherit_group(struct perf_event *parent_event,
 | 
				
			||||||
				 child, NULL, child_ctx);
 | 
									 child, NULL, child_ctx);
 | 
				
			||||||
	if (IS_ERR(leader))
 | 
						if (IS_ERR(leader))
 | 
				
			||||||
		return PTR_ERR(leader);
 | 
							return PTR_ERR(leader);
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * @leader can be NULL here because of is_orphaned_event(). In this
 | 
				
			||||||
 | 
						 * case inherit_event() will create individual events, similar to what
 | 
				
			||||||
 | 
						 * perf_group_detach() would do anyway.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
	list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
 | 
						list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
 | 
				
			||||||
		child_ctr = inherit_event(sub, parent, parent_ctx,
 | 
							child_ctr = inherit_event(sub, parent, parent_ctx,
 | 
				
			||||||
					    child, leader, child_ctx);
 | 
										    child, leader, child_ctx);
 | 
				
			||||||
| 
						 | 
					@ -10725,6 +10746,17 @@ static int inherit_group(struct perf_event *parent_event,
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Creates the child task context and tries to inherit the event-group.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Clears @inherited_all on !attr.inherited or error. Note that we'll leave
 | 
				
			||||||
 | 
					 * inherited_all set when we 'fail' to inherit an orphaned event; this is
 | 
				
			||||||
 | 
					 * consistent with perf_event_release_kernel() removing all child events.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Returns:
 | 
				
			||||||
 | 
					 *  - 0 on success
 | 
				
			||||||
 | 
					 *  - <0 on error
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
static int
 | 
					static int
 | 
				
			||||||
inherit_task_group(struct perf_event *event, struct task_struct *parent,
 | 
					inherit_task_group(struct perf_event *event, struct task_struct *parent,
 | 
				
			||||||
		   struct perf_event_context *parent_ctx,
 | 
							   struct perf_event_context *parent_ctx,
 | 
				
			||||||
| 
						 | 
					@ -10747,7 +10779,6 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
 | 
				
			||||||
		 * First allocate and initialize a context for the
 | 
							 * First allocate and initialize a context for the
 | 
				
			||||||
		 * child.
 | 
							 * child.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
 | 
					 | 
				
			||||||
		child_ctx = alloc_perf_context(parent_ctx->pmu, child);
 | 
							child_ctx = alloc_perf_context(parent_ctx->pmu, child);
 | 
				
			||||||
		if (!child_ctx)
 | 
							if (!child_ctx)
 | 
				
			||||||
			return -ENOMEM;
 | 
								return -ENOMEM;
 | 
				
			||||||
| 
						 | 
					@ -10809,7 +10840,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
 | 
				
			||||||
		ret = inherit_task_group(event, parent, parent_ctx,
 | 
							ret = inherit_task_group(event, parent, parent_ctx,
 | 
				
			||||||
					 child, ctxn, &inherited_all);
 | 
										 child, ctxn, &inherited_all);
 | 
				
			||||||
		if (ret)
 | 
							if (ret)
 | 
				
			||||||
			break;
 | 
								goto out_unlock;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
| 
						 | 
					@ -10825,7 +10856,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
 | 
				
			||||||
		ret = inherit_task_group(event, parent, parent_ctx,
 | 
							ret = inherit_task_group(event, parent, parent_ctx,
 | 
				
			||||||
					 child, ctxn, &inherited_all);
 | 
										 child, ctxn, &inherited_all);
 | 
				
			||||||
		if (ret)
 | 
							if (ret)
 | 
				
			||||||
			break;
 | 
								goto out_unlock;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
 | 
						raw_spin_lock_irqsave(&parent_ctx->lock, flags);
 | 
				
			||||||
| 
						 | 
					@ -10853,6 +10884,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
 | 
						raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
 | 
				
			||||||
 | 
					out_unlock:
 | 
				
			||||||
	mutex_unlock(&parent_ctx->mutex);
 | 
						mutex_unlock(&parent_ctx->mutex);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	perf_unpin_context(parent_ctx);
 | 
						perf_unpin_context(parent_ctx);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -202,7 +202,7 @@ void symbols__fixup_end(struct rb_root *symbols)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Last entry */
 | 
						/* Last entry */
 | 
				
			||||||
	if (curr->end == curr->start)
 | 
						if (curr->end == curr->start)
 | 
				
			||||||
		curr->end = roundup(curr->start, 4096);
 | 
							curr->end = roundup(curr->start, 4096) + 4096;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
 | 
					void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue