mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	perf stat: No need to setup affinities when starting a workload
I.e. the simple: $ perf stat sleep 1 Uses a dummy CPU map and thus there is no need to setup/cleanup affinities to avoid IPIs, etc. With this we're down to a sched_getaffinity() call, in the libnuma initialization, that probably can be removed in a followup patch. Acked-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ian Rogers <irogers@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Link: https://lore.kernel.org/r/20220117160931.1191712-3-acme@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
		
							parent
							
								
									1855b796f2
								
							
						
					
					
						commit
						49de179577
					
				
					 1 changed files with 10 additions and 7 deletions
				
			
		| 
						 | 
					@ -788,7 +788,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
 | 
				
			||||||
	const bool forks = (argc > 0);
 | 
						const bool forks = (argc > 0);
 | 
				
			||||||
	bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
 | 
						bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
 | 
				
			||||||
	struct evlist_cpu_iterator evlist_cpu_itr;
 | 
						struct evlist_cpu_iterator evlist_cpu_itr;
 | 
				
			||||||
	struct affinity affinity;
 | 
						struct affinity saved_affinity, *affinity = NULL;
 | 
				
			||||||
	int err;
 | 
						int err;
 | 
				
			||||||
	bool second_pass = false;
 | 
						bool second_pass = false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -803,8 +803,11 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
 | 
				
			||||||
	if (group)
 | 
						if (group)
 | 
				
			||||||
		evlist__set_leader(evsel_list);
 | 
							evlist__set_leader(evsel_list);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (affinity__setup(&affinity) < 0)
 | 
						if (!cpu_map__is_dummy(evsel_list->core.cpus)) {
 | 
				
			||||||
		return -1;
 | 
							if (affinity__setup(&saved_affinity) < 0)
 | 
				
			||||||
 | 
								return -1;
 | 
				
			||||||
 | 
							affinity = &saved_affinity;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	evlist__for_each_entry(evsel_list, counter) {
 | 
						evlist__for_each_entry(evsel_list, counter) {
 | 
				
			||||||
		if (bpf_counter__load(counter, &target))
 | 
							if (bpf_counter__load(counter, &target))
 | 
				
			||||||
| 
						 | 
					@ -813,7 +816,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
 | 
				
			||||||
			all_counters_use_bpf = false;
 | 
								all_counters_use_bpf = false;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) {
 | 
						evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
 | 
				
			||||||
		counter = evlist_cpu_itr.evsel;
 | 
							counter = evlist_cpu_itr.evsel;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
| 
						 | 
					@ -869,7 +872,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* First close errored or weak retry */
 | 
							/* First close errored or weak retry */
 | 
				
			||||||
		evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) {
 | 
							evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
 | 
				
			||||||
			counter = evlist_cpu_itr.evsel;
 | 
								counter = evlist_cpu_itr.evsel;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (!counter->reset_group && !counter->errored)
 | 
								if (!counter->reset_group && !counter->errored)
 | 
				
			||||||
| 
						 | 
					@ -878,7 +881,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
 | 
				
			||||||
			perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx);
 | 
								perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		/* Now reopen weak */
 | 
							/* Now reopen weak */
 | 
				
			||||||
		evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) {
 | 
							evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
 | 
				
			||||||
			counter = evlist_cpu_itr.evsel;
 | 
								counter = evlist_cpu_itr.evsel;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (!counter->reset_group && !counter->errored)
 | 
								if (!counter->reset_group && !counter->errored)
 | 
				
			||||||
| 
						 | 
					@ -904,7 +907,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
 | 
				
			||||||
			counter->supported = true;
 | 
								counter->supported = true;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	affinity__cleanup(&affinity);
 | 
						affinity__cleanup(affinity);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	evlist__for_each_entry(evsel_list, counter) {
 | 
						evlist__for_each_entry(evsel_list, counter) {
 | 
				
			||||||
		if (!counter->supported) {
 | 
							if (!counter->supported) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue