mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	perf stat: No need to setup affinities when starting a workload
I.e. the simple: $ perf stat sleep 1 Uses a dummy CPU map and thus there is no need to setup/cleanup affinities to avoid IPIs, etc. With this we're down to a sched_getaffinity() call, in the libnuma initialization, that probably can be removed in a followup patch. Acked-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ian Rogers <irogers@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Link: https://lore.kernel.org/r/20220117160931.1191712-3-acme@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
		
							parent
							
								
									1855b796f2
								
							
						
					
					
						commit
						49de179577
					
				
					 1 changed files with 10 additions and 7 deletions
				
			
		| 
						 | 
				
			
			@ -788,7 +788,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
 | 
			
		|||
	const bool forks = (argc > 0);
 | 
			
		||||
	bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
 | 
			
		||||
	struct evlist_cpu_iterator evlist_cpu_itr;
 | 
			
		||||
	struct affinity affinity;
 | 
			
		||||
	struct affinity saved_affinity, *affinity = NULL;
 | 
			
		||||
	int err;
 | 
			
		||||
	bool second_pass = false;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -803,8 +803,11 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
 | 
			
		|||
	if (group)
 | 
			
		||||
		evlist__set_leader(evsel_list);
 | 
			
		||||
 | 
			
		||||
	if (affinity__setup(&affinity) < 0)
 | 
			
		||||
		return -1;
 | 
			
		||||
	if (!cpu_map__is_dummy(evsel_list->core.cpus)) {
 | 
			
		||||
		if (affinity__setup(&saved_affinity) < 0)
 | 
			
		||||
			return -1;
 | 
			
		||||
		affinity = &saved_affinity;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	evlist__for_each_entry(evsel_list, counter) {
 | 
			
		||||
		if (bpf_counter__load(counter, &target))
 | 
			
		||||
| 
						 | 
				
			
			@ -813,7 +816,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
 | 
			
		|||
			all_counters_use_bpf = false;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) {
 | 
			
		||||
	evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
 | 
			
		||||
		counter = evlist_cpu_itr.evsel;
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
| 
						 | 
				
			
			@ -869,7 +872,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
 | 
			
		|||
		 */
 | 
			
		||||
 | 
			
		||||
		/* First close errored or weak retry */
 | 
			
		||||
		evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) {
 | 
			
		||||
		evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
 | 
			
		||||
			counter = evlist_cpu_itr.evsel;
 | 
			
		||||
 | 
			
		||||
			if (!counter->reset_group && !counter->errored)
 | 
			
		||||
| 
						 | 
				
			
			@ -878,7 +881,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
 | 
			
		|||
			perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx);
 | 
			
		||||
		}
 | 
			
		||||
		/* Now reopen weak */
 | 
			
		||||
		evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) {
 | 
			
		||||
		evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
 | 
			
		||||
			counter = evlist_cpu_itr.evsel;
 | 
			
		||||
 | 
			
		||||
			if (!counter->reset_group && !counter->errored)
 | 
			
		||||
| 
						 | 
				
			
			@ -904,7 +907,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
 | 
			
		|||
			counter->supported = true;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	affinity__cleanup(&affinity);
 | 
			
		||||
	affinity__cleanup(affinity);
 | 
			
		||||
 | 
			
		||||
	evlist__for_each_entry(evsel_list, counter) {
 | 
			
		||||
		if (!counter->supported) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue