mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	perf evlist: Remove __evlist__add_default
__evlist__add_default adds a cycles event to a typically empty evlist and was extended for hybrid with evlist__add_default_hybrid, as more than 1 PMU was necessary. Rather than have dedicated logic for the cycles event, this change switches to parsing 'cycles:P' which will handle wildcarding the PMUs appropriately for hybrid. Reviewed-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kang Minchul <tegongkang@gmail.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mike Leach <mike.leach@linaro.org> Cc: Ming Wang <wangming01@loongson.cn> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Rob Herring <robh@kernel.org> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230527072210.2900565-14-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
		
							parent
							
								
									b4388dfa3a
								
							
						
					
					
						commit
						7b100989b4
					
				
					 10 changed files with 25 additions and 130 deletions
				
			
		| 
						 | 
				
			
			@ -16,26 +16,6 @@ void arch_evsel__set_sample_weight(struct evsel *evsel)
 | 
			
		|||
	evsel__set_sample_bit(evsel, WEIGHT_STRUCT);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void arch_evsel__fixup_new_cycles(struct perf_event_attr *attr)
 | 
			
		||||
{
 | 
			
		||||
	struct perf_env env = { .total_mem = 0, } ;
 | 
			
		||||
 | 
			
		||||
	if (!perf_env__cpuid(&env))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * On AMD, precise cycles event sampling internally uses IBS pmu.
 | 
			
		||||
	 * But IBS does not have filtering capabilities and perf by default
 | 
			
		||||
	 * sets exclude_guest = 1. This makes IBS pmu event init fail and
 | 
			
		||||
	 * thus perf ends up doing non-precise sampling. Avoid it by clearing
 | 
			
		||||
	 * exclude_guest.
 | 
			
		||||
	 */
 | 
			
		||||
	if (env.cpuid && strstarts(env.cpuid, "AuthenticAMD"))
 | 
			
		||||
		attr->exclude_guest = 0;
 | 
			
		||||
 | 
			
		||||
	free(env.cpuid);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Check whether the evsel's PMU supports the perf metrics */
 | 
			
		||||
bool evsel__sys_has_perf_metrics(const struct evsel *evsel)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -4161,18 +4161,11 @@ int cmd_record(int argc, const char **argv)
 | 
			
		|||
		record.opts.tail_synthesize = true;
 | 
			
		||||
 | 
			
		||||
	if (rec->evlist->core.nr_entries == 0) {
 | 
			
		||||
		if (perf_pmu__has_hybrid()) {
 | 
			
		||||
			err = evlist__add_default_hybrid(rec->evlist,
 | 
			
		||||
							 !record.opts.no_samples);
 | 
			
		||||
		} else {
 | 
			
		||||
			err = __evlist__add_default(rec->evlist,
 | 
			
		||||
						    !record.opts.no_samples);
 | 
			
		||||
		}
 | 
			
		||||
		bool can_profile_kernel = perf_event_paranoid_check(1);
 | 
			
		||||
 | 
			
		||||
		if (err < 0) {
 | 
			
		||||
			pr_err("Not enough memory for event selector list\n");
 | 
			
		||||
		err = parse_event(rec->evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
 | 
			
		||||
		if (err)
 | 
			
		||||
			goto out;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (rec->opts.target.tid && !rec->opts.no_inherit_set)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1653,10 +1653,12 @@ int cmd_top(int argc, const char **argv)
 | 
			
		|||
	if (annotate_check_args(&top.annotation_opts) < 0)
 | 
			
		||||
		goto out_delete_evlist;
 | 
			
		||||
 | 
			
		||||
	if (!top.evlist->core.nr_entries &&
 | 
			
		||||
	    evlist__add_default(top.evlist) < 0) {
 | 
			
		||||
		pr_err("Not enough memory for event selector list\n");
 | 
			
		||||
		goto out_delete_evlist;
 | 
			
		||||
	if (!top.evlist->core.nr_entries) {
 | 
			
		||||
		bool can_profile_kernel = perf_event_paranoid_check(1);
 | 
			
		||||
		int err = parse_event(top.evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
 | 
			
		||||
 | 
			
		||||
		if (err)
 | 
			
		||||
			goto out_delete_evlist;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	status = evswitch__init(&top.evswitch, top.evlist, stderr);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -16,31 +16,6 @@
 | 
			
		|||
#include <perf/evsel.h>
 | 
			
		||||
#include <perf/cpumap.h>
 | 
			
		||||
 | 
			
		||||
int evlist__add_default_hybrid(struct evlist *evlist, bool precise)
 | 
			
		||||
{
 | 
			
		||||
	struct evsel *evsel;
 | 
			
		||||
	struct perf_pmu *pmu;
 | 
			
		||||
	__u64 config;
 | 
			
		||||
	struct perf_cpu_map *cpus;
 | 
			
		||||
 | 
			
		||||
	perf_pmu__for_each_hybrid_pmu(pmu) {
 | 
			
		||||
		config = PERF_COUNT_HW_CPU_CYCLES |
 | 
			
		||||
			 ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT);
 | 
			
		||||
		evsel = evsel__new_cycles(precise, PERF_TYPE_HARDWARE,
 | 
			
		||||
					  config);
 | 
			
		||||
		if (!evsel)
 | 
			
		||||
			return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
		cpus = perf_cpu_map__get(pmu->cpus);
 | 
			
		||||
		evsel->core.cpus = cpus;
 | 
			
		||||
		evsel->core.own_cpus = perf_cpu_map__get(cpus);
 | 
			
		||||
		evsel->pmu_name = strdup(pmu->name);
 | 
			
		||||
		evlist__add(evlist, evsel);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool evlist__has_hybrid(struct evlist *evlist)
 | 
			
		||||
{
 | 
			
		||||
	struct evsel *evsel;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -7,7 +7,6 @@
 | 
			
		|||
#include "evlist.h"
 | 
			
		||||
#include <unistd.h>
 | 
			
		||||
 | 
			
		||||
int evlist__add_default_hybrid(struct evlist *evlist, bool precise);
 | 
			
		||||
bool evlist__has_hybrid(struct evlist *evlist);
 | 
			
		||||
 | 
			
		||||
#endif /* __PERF_EVLIST_HYBRID_H */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -93,8 +93,15 @@ struct evlist *evlist__new(void)
 | 
			
		|||
struct evlist *evlist__new_default(void)
 | 
			
		||||
{
 | 
			
		||||
	struct evlist *evlist = evlist__new();
 | 
			
		||||
	bool can_profile_kernel;
 | 
			
		||||
	int err;
 | 
			
		||||
 | 
			
		||||
	if (evlist && evlist__add_default(evlist)) {
 | 
			
		||||
	if (!evlist)
 | 
			
		||||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	can_profile_kernel = perf_event_paranoid_check(1);
 | 
			
		||||
	err = parse_event(evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
 | 
			
		||||
	if (err) {
 | 
			
		||||
		evlist__delete(evlist);
 | 
			
		||||
		evlist = NULL;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -237,19 +244,6 @@ static void evlist__set_leader(struct evlist *evlist)
 | 
			
		|||
	perf_evlist__set_leader(&evlist->core);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int __evlist__add_default(struct evlist *evlist, bool precise)
 | 
			
		||||
{
 | 
			
		||||
	struct evsel *evsel;
 | 
			
		||||
 | 
			
		||||
	evsel = evsel__new_cycles(precise, PERF_TYPE_HARDWARE,
 | 
			
		||||
				  PERF_COUNT_HW_CPU_CYCLES);
 | 
			
		||||
	if (evsel == NULL)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	evlist__add(evlist, evsel);
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static struct evsel *evlist__dummy_event(struct evlist *evlist)
 | 
			
		||||
{
 | 
			
		||||
	struct perf_event_attr attr = {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -100,13 +100,6 @@ void evlist__delete(struct evlist *evlist);
 | 
			
		|||
void evlist__add(struct evlist *evlist, struct evsel *entry);
 | 
			
		||||
void evlist__remove(struct evlist *evlist, struct evsel *evsel);
 | 
			
		||||
 | 
			
		||||
int __evlist__add_default(struct evlist *evlist, bool precise);
 | 
			
		||||
 | 
			
		||||
static inline int evlist__add_default(struct evlist *evlist)
 | 
			
		||||
{
 | 
			
		||||
	return __evlist__add_default(evlist, true);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs);
 | 
			
		||||
 | 
			
		||||
int __evlist__add_default_attrs(struct evlist *evlist,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -316,48 +316,6 @@ struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx)
 | 
			
		|||
	return evsel;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static bool perf_event_can_profile_kernel(void)
 | 
			
		||||
{
 | 
			
		||||
	return perf_event_paranoid_check(1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct evsel *evsel__new_cycles(bool precise __maybe_unused, __u32 type, __u64 config)
 | 
			
		||||
{
 | 
			
		||||
	struct perf_event_attr attr = {
 | 
			
		||||
		.type	= type,
 | 
			
		||||
		.config	= config,
 | 
			
		||||
		.exclude_kernel	= !perf_event_can_profile_kernel(),
 | 
			
		||||
	};
 | 
			
		||||
	struct evsel *evsel;
 | 
			
		||||
 | 
			
		||||
	event_attr_init(&attr);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Now let the usual logic to set up the perf_event_attr defaults
 | 
			
		||||
	 * to kick in when we return and before perf_evsel__open() is called.
 | 
			
		||||
	 */
 | 
			
		||||
	evsel = evsel__new(&attr);
 | 
			
		||||
	if (evsel == NULL)
 | 
			
		||||
		goto out;
 | 
			
		||||
 | 
			
		||||
	arch_evsel__fixup_new_cycles(&evsel->core.attr);
 | 
			
		||||
 | 
			
		||||
	evsel->precise_max = true;
 | 
			
		||||
 | 
			
		||||
	/* use asprintf() because free(evsel) assumes name is allocated */
 | 
			
		||||
	if (asprintf(&evsel->name, "cycles%s%s%.*s",
 | 
			
		||||
		     (attr.precise_ip || attr.exclude_kernel) ? ":" : "",
 | 
			
		||||
		     attr.exclude_kernel ? "u" : "",
 | 
			
		||||
		     attr.precise_ip ? attr.precise_ip + 1 : 0, "ppp") < 0)
 | 
			
		||||
		goto error_free;
 | 
			
		||||
out:
 | 
			
		||||
	return evsel;
 | 
			
		||||
error_free:
 | 
			
		||||
	evsel__delete(evsel);
 | 
			
		||||
	evsel = NULL;
 | 
			
		||||
	goto out;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int copy_config_terms(struct list_head *dst, struct list_head *src)
 | 
			
		||||
{
 | 
			
		||||
	struct evsel_config_term *pos, *tmp;
 | 
			
		||||
| 
						 | 
				
			
			@ -1131,10 +1089,6 @@ void __weak arch_evsel__set_sample_weight(struct evsel *evsel)
 | 
			
		|||
	evsel__set_sample_bit(evsel, WEIGHT);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void __weak arch_evsel__fixup_new_cycles(struct perf_event_attr *attr __maybe_unused)
 | 
			
		||||
{
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void __weak arch__post_evsel_config(struct evsel *evsel __maybe_unused,
 | 
			
		||||
				    struct perf_event_attr *attr __maybe_unused)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -243,8 +243,6 @@ static inline struct evsel *evsel__newtp(const char *sys, const char *name)
 | 
			
		|||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
struct evsel *evsel__new_cycles(bool precise, __u32 type, __u64 config);
 | 
			
		||||
 | 
			
		||||
#ifdef HAVE_LIBTRACEEVENT
 | 
			
		||||
struct tep_event *event_format__new(const char *sys, const char *name);
 | 
			
		||||
#endif
 | 
			
		||||
| 
						 | 
				
			
			@ -312,7 +310,6 @@ void __evsel__reset_sample_bit(struct evsel *evsel, enum perf_event_sample_forma
 | 
			
		|||
void evsel__set_sample_id(struct evsel *evsel, bool use_sample_identifier);
 | 
			
		||||
 | 
			
		||||
void arch_evsel__set_sample_weight(struct evsel *evsel);
 | 
			
		||||
void arch_evsel__fixup_new_cycles(struct perf_event_attr *attr);
 | 
			
		||||
void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr);
 | 
			
		||||
 | 
			
		||||
int evsel__set_filter(struct evsel *evsel, const char *filter);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -49,6 +49,14 @@
 | 
			
		|||
#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Avoid bringing in event parsing.
 | 
			
		||||
 */
 | 
			
		||||
int parse_event(struct evlist *evlist __maybe_unused, const char *str __maybe_unused)
 | 
			
		||||
{
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Provide these two so that we don't have to link against callchain.c and
 | 
			
		||||
 * start dragging hist.c, etc.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue