mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	perf/x86/intel: Implement support for TSX Force Abort
Skylake (and later) will receive a microcode update to address a TSX errata. This microcode will, on execution of a TSX instruction (speculative or not) use (clobber) PMC3. This update will also provide a new MSR to change this behaviour along with a CPUID bit to enumerate the presence of this new MSR. When the MSR gets set; the microcode will no longer use PMC3 but will Force Abort every TSX transaction (upon executing COMMIT). When TSX Force Abort (TFA) is allowed (default); the MSR gets set when PMC3 gets scheduled and cleared when, after scheduling, PMC3 is unused. When TFA is not allowed; clear PMC3 from all constraints such that it will not get used. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
		
							parent
							
								
									52f6490940
								
							
						
					
					
						commit
						400816f60c
					
				
					 2 changed files with 77 additions and 3 deletions
				
			
		| 
						 | 
				
			
			@ -1999,6 +1999,39 @@ static void intel_pmu_nhm_enable_all(int added)
 | 
			
		|||
	intel_pmu_enable_all(added);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
 | 
			
		||||
{
 | 
			
		||||
	u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
 | 
			
		||||
 | 
			
		||||
	if (cpuc->tfa_shadow != val) {
 | 
			
		||||
		cpuc->tfa_shadow = val;
 | 
			
		||||
		wrmsrl(MSR_TSX_FORCE_ABORT, val);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
 | 
			
		||||
{
 | 
			
		||||
	/*
 | 
			
		||||
	 * We're going to use PMC3, make sure TFA is set before we touch it.
 | 
			
		||||
	 */
 | 
			
		||||
	if (cntr == 3 && !cpuc->is_fake)
 | 
			
		||||
		intel_set_tfa(cpuc, true);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void intel_tfa_pmu_enable_all(int added)
 | 
			
		||||
{
 | 
			
		||||
	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * If we find PMC3 is no longer used when we enable the PMU, we can
 | 
			
		||||
	 * clear TFA.
 | 
			
		||||
	 */
 | 
			
		||||
	if (!test_bit(3, cpuc->active_mask))
 | 
			
		||||
		intel_set_tfa(cpuc, false);
 | 
			
		||||
 | 
			
		||||
	intel_pmu_enable_all(added);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void enable_counter_freeze(void)
 | 
			
		||||
{
 | 
			
		||||
	update_debugctlmsr(get_debugctlmsr() |
 | 
			
		||||
| 
						 | 
				
			
			@ -3354,6 +3387,26 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
 | 
			
		|||
	return c;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static bool allow_tsx_force_abort = true;
 | 
			
		||||
 | 
			
		||||
static struct event_constraint *
 | 
			
		||||
tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
 | 
			
		||||
			  struct perf_event *event)
 | 
			
		||||
{
 | 
			
		||||
	struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Without TFA we must not use PMC3.
 | 
			
		||||
	 */
 | 
			
		||||
	if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
 | 
			
		||||
		c = dyn_constraint(cpuc, c, idx);
 | 
			
		||||
		c->idxmsk64 &= ~(1ULL << 3);
 | 
			
		||||
		c->weight--;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return c;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Broadwell:
 | 
			
		||||
 *
 | 
			
		||||
| 
						 | 
				
			
			@ -3448,13 +3501,15 @@ int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
 | 
			
		|||
			goto err;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
 | 
			
		||||
	if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
 | 
			
		||||
		size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
 | 
			
		||||
 | 
			
		||||
		cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
 | 
			
		||||
		if (!cpuc->constraint_list)
 | 
			
		||||
			goto err_shared_regs;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
 | 
			
		||||
		cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
 | 
			
		||||
		if (!cpuc->excl_cntrs)
 | 
			
		||||
			goto err_constraint_list;
 | 
			
		||||
| 
						 | 
				
			
			@ -3564,9 +3619,10 @@ static void free_excl_cntrs(struct cpu_hw_events *cpuc)
 | 
			
		|||
		if (c->core_id == -1 || --c->refcnt == 0)
 | 
			
		||||
			kfree(c);
 | 
			
		||||
		cpuc->excl_cntrs = NULL;
 | 
			
		||||
		kfree(cpuc->constraint_list);
 | 
			
		||||
		cpuc->constraint_list = NULL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	kfree(cpuc->constraint_list);
 | 
			
		||||
	cpuc->constraint_list = NULL;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void intel_pmu_cpu_dying(int cpu)
 | 
			
		||||
| 
						 | 
				
			
			@ -4086,8 +4142,11 @@ static struct attribute *intel_pmu_caps_attrs[] = {
 | 
			
		|||
       NULL
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
 | 
			
		||||
 | 
			
		||||
static struct attribute *intel_pmu_attrs[] = {
 | 
			
		||||
	&dev_attr_freeze_on_smi.attr,
 | 
			
		||||
	NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */
 | 
			
		||||
	NULL,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -4580,6 +4639,15 @@ __init int intel_pmu_init(void)
 | 
			
		|||
		tsx_attr = hsw_tsx_events_attrs;
 | 
			
		||||
		intel_pmu_pebs_data_source_skl(
 | 
			
		||||
			boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
 | 
			
		||||
 | 
			
		||||
		if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
 | 
			
		||||
			x86_pmu.flags |= PMU_FL_TFA;
 | 
			
		||||
			x86_pmu.get_event_constraints = tfa_get_event_constraints;
 | 
			
		||||
			x86_pmu.enable_all = intel_tfa_pmu_enable_all;
 | 
			
		||||
			x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
 | 
			
		||||
			intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		pr_cont("Skylake events, ");
 | 
			
		||||
		name = "skylake";
 | 
			
		||||
		break;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -242,6 +242,11 @@ struct cpu_hw_events {
 | 
			
		|||
	struct intel_excl_cntrs		*excl_cntrs;
 | 
			
		||||
	int excl_thread_id; /* 0 or 1 */
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * SKL TSX_FORCE_ABORT shadow
 | 
			
		||||
	 */
 | 
			
		||||
	u64				tfa_shadow;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * AMD specific bits
 | 
			
		||||
	 */
 | 
			
		||||
| 
						 | 
				
			
			@ -681,6 +686,7 @@ do {									\
 | 
			
		|||
#define PMU_FL_EXCL_CNTRS	0x4 /* has exclusive counter requirements  */
 | 
			
		||||
#define PMU_FL_EXCL_ENABLED	0x8 /* exclusive counter active */
 | 
			
		||||
#define PMU_FL_PEBS_ALL		0x10 /* all events are valid PEBS events */
 | 
			
		||||
#define PMU_FL_TFA		0x20 /* deal with TSX force abort */
 | 
			
		||||
 | 
			
		||||
#define EVENT_VAR(_id)  event_attr_##_id
 | 
			
		||||
#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue