mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	perf/x86/intel: Make cpuc allocations consistent
The cpuc data structure allocation is different between fake and real cpuc's; use the same code to init/free both. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
		
							parent
							
								
									1c163f4c7b
								
							
						
					
					
						commit
						d01b1f96a8
					
				
					 3 changed files with 31 additions and 22 deletions
				
			
		| 
						 | 
					@ -1995,7 +1995,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void free_fake_cpuc(struct cpu_hw_events *cpuc)
 | 
					static void free_fake_cpuc(struct cpu_hw_events *cpuc)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	kfree(cpuc->shared_regs);
 | 
						intel_cpuc_finish(cpuc);
 | 
				
			||||||
	kfree(cpuc);
 | 
						kfree(cpuc);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2007,14 +2007,11 @@ static struct cpu_hw_events *allocate_fake_cpuc(void)
 | 
				
			||||||
	cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
 | 
						cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
 | 
				
			||||||
	if (!cpuc)
 | 
						if (!cpuc)
 | 
				
			||||||
		return ERR_PTR(-ENOMEM);
 | 
							return ERR_PTR(-ENOMEM);
 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* only needed, if we have extra_regs */
 | 
					 | 
				
			||||||
	if (x86_pmu.extra_regs) {
 | 
					 | 
				
			||||||
		cpuc->shared_regs = allocate_shared_regs(cpu);
 | 
					 | 
				
			||||||
		if (!cpuc->shared_regs)
 | 
					 | 
				
			||||||
			goto error;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	cpuc->is_fake = 1;
 | 
						cpuc->is_fake = 1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (intel_cpuc_prepare(cpuc, cpu))
 | 
				
			||||||
 | 
							goto error;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return cpuc;
 | 
						return cpuc;
 | 
				
			||||||
error:
 | 
					error:
 | 
				
			||||||
	free_fake_cpuc(cpuc);
 | 
						free_fake_cpuc(cpuc);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3398,7 +3398,7 @@ ssize_t intel_event_sysfs_show(char *page, u64 config)
 | 
				
			||||||
	return x86_event_sysfs_show(page, config, event);
 | 
						return x86_event_sysfs_show(page, config, event);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct intel_shared_regs *allocate_shared_regs(int cpu)
 | 
					static struct intel_shared_regs *allocate_shared_regs(int cpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct intel_shared_regs *regs;
 | 
						struct intel_shared_regs *regs;
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
| 
						 | 
					@ -3430,10 +3430,9 @@ static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
 | 
				
			||||||
	return c;
 | 
						return c;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int intel_pmu_cpu_prepare(int cpu)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
	if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
 | 
						if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
 | 
				
			||||||
		cpuc->shared_regs = allocate_shared_regs(cpu);
 | 
							cpuc->shared_regs = allocate_shared_regs(cpu);
 | 
				
			||||||
		if (!cpuc->shared_regs)
 | 
							if (!cpuc->shared_regs)
 | 
				
			||||||
| 
						 | 
					@ -3443,7 +3442,7 @@ static int intel_pmu_cpu_prepare(int cpu)
 | 
				
			||||||
	if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
 | 
						if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
 | 
				
			||||||
		size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
 | 
							size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
 | 
							cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
 | 
				
			||||||
		if (!cpuc->constraint_list)
 | 
							if (!cpuc->constraint_list)
 | 
				
			||||||
			goto err_shared_regs;
 | 
								goto err_shared_regs;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3468,6 +3467,11 @@ static int intel_pmu_cpu_prepare(int cpu)
 | 
				
			||||||
	return -ENOMEM;
 | 
						return -ENOMEM;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int intel_pmu_cpu_prepare(int cpu)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void flip_smm_bit(void *data)
 | 
					static void flip_smm_bit(void *data)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long set = *(unsigned long *)data;
 | 
						unsigned long set = *(unsigned long *)data;
 | 
				
			||||||
| 
						 | 
					@ -3542,9 +3546,8 @@ static void intel_pmu_cpu_starting(int cpu)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void free_excl_cntrs(int cpu)
 | 
					static void free_excl_cntrs(struct cpu_hw_events *cpuc)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
 | 
					 | 
				
			||||||
	struct intel_excl_cntrs *c;
 | 
						struct intel_excl_cntrs *c;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	c = cpuc->excl_cntrs;
 | 
						c = cpuc->excl_cntrs;
 | 
				
			||||||
| 
						 | 
					@ -3565,9 +3568,8 @@ static void intel_pmu_cpu_dying(int cpu)
 | 
				
			||||||
		disable_counter_freeze();
 | 
							disable_counter_freeze();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void intel_pmu_cpu_dead(int cpu)
 | 
					void intel_cpuc_finish(struct cpu_hw_events *cpuc)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
 | 
					 | 
				
			||||||
	struct intel_shared_regs *pc;
 | 
						struct intel_shared_regs *pc;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pc = cpuc->shared_regs;
 | 
						pc = cpuc->shared_regs;
 | 
				
			||||||
| 
						 | 
					@ -3577,7 +3579,12 @@ static void intel_pmu_cpu_dead(int cpu)
 | 
				
			||||||
		cpuc->shared_regs = NULL;
 | 
							cpuc->shared_regs = NULL;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	free_excl_cntrs(cpu);
 | 
						free_excl_cntrs(cpuc);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void intel_pmu_cpu_dead(int cpu)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void intel_pmu_sched_task(struct perf_event_context *ctx,
 | 
					static void intel_pmu_sched_task(struct perf_event_context *ctx,
 | 
				
			||||||
| 
						 | 
					@ -4715,7 +4722,7 @@ static __init int fixup_ht_bug(void)
 | 
				
			||||||
	hardlockup_detector_perf_restart();
 | 
						hardlockup_detector_perf_restart();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for_each_online_cpu(c)
 | 
						for_each_online_cpu(c)
 | 
				
			||||||
		free_excl_cntrs(c);
 | 
							free_excl_cntrs(&per_cpu(cpu_hw_events, c));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cpus_read_unlock();
 | 
						cpus_read_unlock();
 | 
				
			||||||
	pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
 | 
						pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -889,7 +889,8 @@ struct event_constraint *
 | 
				
			||||||
x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
 | 
					x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
 | 
				
			||||||
			  struct perf_event *event);
 | 
								  struct perf_event *event);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct intel_shared_regs *allocate_shared_regs(int cpu);
 | 
					extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
 | 
				
			||||||
 | 
					extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int intel_pmu_init(void);
 | 
					int intel_pmu_init(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1025,9 +1026,13 @@ static inline int intel_pmu_init(void)
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
 | 
					static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return NULL;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline int is_ht_workaround_enabled(void)
 | 
					static inline int is_ht_workaround_enabled(void)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue