mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	Specify the threshold for dumping offcpu samples with --off-cpu-thresh,
the unit is milliseconds. Default value is 500ms.
Example:
  perf record --off-cpu --off-cpu-thresh 824
The example above collects direct off-cpu samples where the off-cpu time
is longer than 824ms.
Committer testing:
After commenting out the end off-cpu dump to have just the ones that are
added right after the task is scheduled back, and using a threshould of
1000ms, we see some periods (the 5th column, just before "offcpu-time"
in the 'perf script' output) that are over 1000.000.000 nanoseconds:
  root@number:~# perf record --off-cpu --off-cpu-thresh 10000
  ^C[ perf record: Woken up 1 times to write data ]
  [ perf record: Captured and wrote 3.902 MB perf.data (34335 samples) ]
  root@number:~# perf script
<SNIP>
  Isolated Web Co   59932 [028] 63839.594437: 1000049427 offcpu-time:
             7fe63c7976c2 __syscall_cancel_arch_end+0x0 (/usr/lib64/libc.so.6)
             7fe63c78c04c __futex_abstimed_wait_common+0x7c (/usr/lib64/libc.so.6)
             7fe63c78e928 pthread_cond_timedwait@@GLIBC_2.3.2+0x178 (/usr/lib64/libc.so.6)
             5599974a9fe7 mozilla::detail::ConditionVariableImpl::wait_for(mozilla::detail::MutexImpl&, mozilla::BaseTimeDuration<mozilla::TimeDurationValueCalculator> const&)+0xe7 (/usr/lib64/fir>
                100000000 [unknown] ([unknown])
          swapper       0 [025] 63839.594459:     195724    cycles:P:  ffffffffac328270 read_tsc+0x0 ([kernel.kallsyms])
  Isolated Web Co   59932 [010] 63839.594466: 1000055278 offcpu-time:
             7fe63c7976c2 __syscall_cancel_arch_end+0x0 (/usr/lib64/libc.so.6)
             7fe63c78ba24 __syscall_cancel+0x14 (/usr/lib64/libc.so.6)
             7fe63c804c4e __poll+0x1e (/usr/lib64/libc.so.6)
             7fe633b0d1b8 PollWrapper(_GPollFD*, unsigned int, int) [clone .lto_priv.0]+0xf8 (/usr/lib64/firefox/libxul.so)
                10000002c [unknown] ([unknown])
          swapper       0 [027] 63839.594475:     134433    cycles:P:  ffffffffad4c45d9 irqentry_enter+0x19 ([kernel.kallsyms])
          swapper       0 [028] 63839.594499:     215838    cycles:P:  ffffffffac39199a switch_mm_irqs_off+0x10a ([kernel.kallsyms])
  MediaPD~oder #1 1407676 [027] 63839.594514:     134433    cycles:P:      7f982ef5e69f dct_IV(int*, int, int*)+0x24f (/usr/lib64/libfdk-aac.so.2.0.0)
          swapper       0 [024] 63839.594524:     267411    cycles:P:  ffffffffad4c6ee6 poll_idle+0x56 ([kernel.kallsyms])
  MediaSu~sor #75 1093827 [026] 63839.594555:     332652    cycles:P:      55be753ad030 moz_xmalloc+0x200 (/usr/lib64/firefox/firefox)
          swapper       0 [027] 63839.594616:     160548    cycles:P:  ffffffffad144840 menu_select+0x570 ([kernel.kallsyms])
  Isolated Web Co   14019 [027] 63839.595120: 1000050178 offcpu-time:
             7fc9537cc6c2 __syscall_cancel_arch_end+0x0 (/usr/lib64/libc.so.6)
             7fc9537c104c __futex_abstimed_wait_common+0x7c (/usr/lib64/libc.so.6)
             7fc9537c3928 pthread_cond_timedwait@@GLIBC_2.3.2+0x178 (/usr/lib64/libc.so.6)
             7fc95372a3c8 pt_TimedWait+0xb8 (/usr/lib64/libnspr4.so)
             7fc95372a8d8 PR_WaitCondVar+0x68 (/usr/lib64/libnspr4.so)
             7fc94afb1f7c WatchdogMain(void*)+0xac (/usr/lib64/firefox/libxul.so)
             7fc947498660 [unknown] ([unknown])
             7fc9535fce88 [unknown] ([unknown])
             7fc94b620e60 WatchdogManager::~WatchdogManager()+0x0 (/usr/lib64/firefox/libxul.so)
          fff8548387f8b48 [unknown] ([unknown])
          swapper       0 [003] 63839.595712:     212948    cycles:P:  ffffffffacd5b865 acpi_os_read_port+0x55 ([kernel.kallsyms])
<SNIP>
Suggested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Suggested-by: Ian Rogers <irogers@google.com>
Suggested-by: Namhyung Kim <namhyung@kernel.org>
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Howard Chu <howardchu95@gmail.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Tested-by: Gautam Menghani <gautam@linux.ibm.com>
Tested-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@linaro.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20241108204137.2444151-2-howardchu95@gmail.com
Link: https://lore.kernel.org/r/20250501022809.449767-10-howardchu95@gmail.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
		
	
			
		
			
				
	
	
		
			427 lines
		
	
	
	
		
			9.9 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			427 lines
		
	
	
	
		
			9.9 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
// SPDX-License-Identifier: GPL-2.0
 | 
						|
#include "util/bpf_counter.h"
 | 
						|
#include "util/debug.h"
 | 
						|
#include "util/evsel.h"
 | 
						|
#include "util/evlist.h"
 | 
						|
#include "util/off_cpu.h"
 | 
						|
#include "util/perf-hooks.h"
 | 
						|
#include "util/record.h"
 | 
						|
#include "util/session.h"
 | 
						|
#include "util/target.h"
 | 
						|
#include "util/cpumap.h"
 | 
						|
#include "util/thread_map.h"
 | 
						|
#include "util/cgroup.h"
 | 
						|
#include "util/strlist.h"
 | 
						|
#include <bpf/bpf.h>
 | 
						|
#include <internal/xyarray.h>
 | 
						|
#include <linux/time64.h>
 | 
						|
 | 
						|
#include "bpf_skel/off_cpu.skel.h"
 | 
						|
 | 
						|
#define MAX_STACKS  32
 | 
						|
#define MAX_PROC  4096
 | 
						|
/* we don't need actual timestamp, just want to put the samples at last */
 | 
						|
#define OFF_CPU_TIMESTAMP  (~0ull << 32)
 | 
						|
 | 
						|
static struct off_cpu_bpf *skel;
 | 
						|
 | 
						|
struct off_cpu_key {
 | 
						|
	u32 pid;
 | 
						|
	u32 tgid;
 | 
						|
	u32 stack_id;
 | 
						|
	u32 state;
 | 
						|
	u64 cgroup_id;
 | 
						|
};
 | 
						|
 | 
						|
union off_cpu_data {
 | 
						|
	struct perf_event_header hdr;
 | 
						|
	u64 array[1024 / sizeof(u64)];
 | 
						|
};
 | 
						|
 | 
						|
u64 off_cpu_raw[MAX_STACKS + 5];
 | 
						|
 | 
						|
static int off_cpu_config(struct evlist *evlist)
 | 
						|
{
 | 
						|
	char off_cpu_event[64];
 | 
						|
	struct evsel *evsel;
 | 
						|
 | 
						|
	scnprintf(off_cpu_event, sizeof(off_cpu_event), "bpf-output/name=%s/", OFFCPU_EVENT);
 | 
						|
	if (parse_event(evlist, off_cpu_event)) {
 | 
						|
		pr_err("Failed to open off-cpu event\n");
 | 
						|
		return -1;
 | 
						|
	}
 | 
						|
 | 
						|
	evlist__for_each_entry(evlist, evsel) {
 | 
						|
		if (evsel__is_offcpu_event(evsel)) {
 | 
						|
			evsel->core.system_wide = true;
 | 
						|
			break;
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
 | 
						|
static void off_cpu_start(void *arg)
 | 
						|
{
 | 
						|
	struct evlist *evlist = arg;
 | 
						|
	struct evsel *evsel;
 | 
						|
	struct perf_cpu pcpu;
 | 
						|
	int i;
 | 
						|
 | 
						|
	/* update task filter for the given workload */
 | 
						|
	if (skel->rodata->has_task && skel->rodata->uses_tgid &&
 | 
						|
	    perf_thread_map__pid(evlist->core.threads, 0) != -1) {
 | 
						|
		int fd;
 | 
						|
		u32 pid;
 | 
						|
		u8 val = 1;
 | 
						|
 | 
						|
		fd = bpf_map__fd(skel->maps.task_filter);
 | 
						|
		pid = perf_thread_map__pid(evlist->core.threads, 0);
 | 
						|
		bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
 | 
						|
	}
 | 
						|
 | 
						|
	/* update BPF perf_event map */
 | 
						|
	evsel = evlist__find_evsel_by_str(evlist, OFFCPU_EVENT);
 | 
						|
	if (evsel == NULL) {
 | 
						|
		pr_err("%s evsel not found\n", OFFCPU_EVENT);
 | 
						|
		return;
 | 
						|
	}
 | 
						|
 | 
						|
	perf_cpu_map__for_each_cpu(pcpu, i, evsel->core.cpus) {
 | 
						|
		int err;
 | 
						|
		int cpu_nr = pcpu.cpu;
 | 
						|
 | 
						|
		err = bpf_map__update_elem(skel->maps.offcpu_output, &cpu_nr, sizeof(int),
 | 
						|
					   xyarray__entry(evsel->core.fd, cpu_nr, 0),
 | 
						|
					   sizeof(int), BPF_ANY);
 | 
						|
		if (err) {
 | 
						|
			pr_err("Failed to update perf event map for direct off-cpu dumping\n");
 | 
						|
			return;
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	skel->bss->enabled = 1;
 | 
						|
}
 | 
						|
 | 
						|
static void off_cpu_finish(void *arg __maybe_unused)
 | 
						|
{
 | 
						|
	skel->bss->enabled = 0;
 | 
						|
	off_cpu_bpf__destroy(skel);
 | 
						|
}
 | 
						|
 | 
						|
/* v5.18 kernel added prev_state arg, so it needs to check the signature */
 | 
						|
static void check_sched_switch_args(void)
 | 
						|
{
 | 
						|
	struct btf *btf = btf__load_vmlinux_btf();
 | 
						|
	const struct btf_type *t1, *t2, *t3;
 | 
						|
	u32 type_id;
 | 
						|
 | 
						|
	if (!btf) {
 | 
						|
		pr_debug("Missing btf, check if CONFIG_DEBUG_INFO_BTF is enabled\n");
 | 
						|
		goto cleanup;
 | 
						|
	}
 | 
						|
 | 
						|
	type_id = btf__find_by_name_kind(btf, "btf_trace_sched_switch",
 | 
						|
					 BTF_KIND_TYPEDEF);
 | 
						|
	if ((s32)type_id < 0)
 | 
						|
		goto cleanup;
 | 
						|
 | 
						|
	t1 = btf__type_by_id(btf, type_id);
 | 
						|
	if (t1 == NULL)
 | 
						|
		goto cleanup;
 | 
						|
 | 
						|
	t2 = btf__type_by_id(btf, t1->type);
 | 
						|
	if (t2 == NULL || !btf_is_ptr(t2))
 | 
						|
		goto cleanup;
 | 
						|
 | 
						|
	t3 = btf__type_by_id(btf, t2->type);
 | 
						|
	/* btf_trace func proto has one more argument for the context */
 | 
						|
	if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 5) {
 | 
						|
		/* new format: pass prev_state as 4th arg */
 | 
						|
		skel->rodata->has_prev_state = true;
 | 
						|
	}
 | 
						|
cleanup:
 | 
						|
	btf__free(btf);
 | 
						|
}
 | 
						|
 | 
						|
int off_cpu_prepare(struct evlist *evlist, struct target *target,
 | 
						|
		    struct record_opts *opts)
 | 
						|
{
 | 
						|
	int err, fd, i;
 | 
						|
	int ncpus = 1, ntasks = 1, ncgrps = 1;
 | 
						|
	struct strlist *pid_slist = NULL;
 | 
						|
	struct str_node *pos;
 | 
						|
 | 
						|
	if (off_cpu_config(evlist) < 0) {
 | 
						|
		pr_err("Failed to config off-cpu BPF event\n");
 | 
						|
		return -1;
 | 
						|
	}
 | 
						|
 | 
						|
	skel = off_cpu_bpf__open();
 | 
						|
	if (!skel) {
 | 
						|
		pr_err("Failed to open off-cpu BPF skeleton\n");
 | 
						|
		return -1;
 | 
						|
	}
 | 
						|
 | 
						|
	/* don't need to set cpu filter for system-wide mode */
 | 
						|
	if (target->cpu_list) {
 | 
						|
		ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
 | 
						|
		bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
 | 
						|
		skel->rodata->has_cpu = 1;
 | 
						|
	}
 | 
						|
 | 
						|
	if (target->pid) {
 | 
						|
		pid_slist = strlist__new(target->pid, NULL);
 | 
						|
		if (!pid_slist) {
 | 
						|
			pr_err("Failed to create a strlist for pid\n");
 | 
						|
			return -1;
 | 
						|
		}
 | 
						|
 | 
						|
		ntasks = 0;
 | 
						|
		strlist__for_each_entry(pos, pid_slist) {
 | 
						|
			char *end_ptr;
 | 
						|
			int pid = strtol(pos->s, &end_ptr, 10);
 | 
						|
 | 
						|
			if (pid == INT_MIN || pid == INT_MAX ||
 | 
						|
			    (*end_ptr != '\0' && *end_ptr != ','))
 | 
						|
				continue;
 | 
						|
 | 
						|
			ntasks++;
 | 
						|
		}
 | 
						|
 | 
						|
		if (ntasks < MAX_PROC)
 | 
						|
			ntasks = MAX_PROC;
 | 
						|
 | 
						|
		bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
 | 
						|
		skel->rodata->has_task = 1;
 | 
						|
		skel->rodata->uses_tgid = 1;
 | 
						|
	} else if (target__has_task(target)) {
 | 
						|
		ntasks = perf_thread_map__nr(evlist->core.threads);
 | 
						|
		bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
 | 
						|
		skel->rodata->has_task = 1;
 | 
						|
	} else if (target__none(target)) {
 | 
						|
		bpf_map__set_max_entries(skel->maps.task_filter, MAX_PROC);
 | 
						|
		skel->rodata->has_task = 1;
 | 
						|
		skel->rodata->uses_tgid = 1;
 | 
						|
	}
 | 
						|
 | 
						|
	if (evlist__first(evlist)->cgrp) {
 | 
						|
		ncgrps = evlist->core.nr_entries - 1; /* excluding a dummy */
 | 
						|
		bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
 | 
						|
 | 
						|
		if (!cgroup_is_v2("perf_event"))
 | 
						|
			skel->rodata->uses_cgroup_v1 = true;
 | 
						|
		skel->rodata->has_cgroup = 1;
 | 
						|
	}
 | 
						|
 | 
						|
	if (opts->record_cgroup) {
 | 
						|
		skel->rodata->needs_cgroup = true;
 | 
						|
 | 
						|
		if (!cgroup_is_v2("perf_event"))
 | 
						|
			skel->rodata->uses_cgroup_v1 = true;
 | 
						|
	}
 | 
						|
 | 
						|
	set_max_rlimit();
 | 
						|
	check_sched_switch_args();
 | 
						|
 | 
						|
	err = off_cpu_bpf__load(skel);
 | 
						|
	if (err) {
 | 
						|
		pr_err("Failed to load off-cpu skeleton\n");
 | 
						|
		goto out;
 | 
						|
	}
 | 
						|
 | 
						|
	if (target->cpu_list) {
 | 
						|
		u32 cpu;
 | 
						|
		u8 val = 1;
 | 
						|
 | 
						|
		fd = bpf_map__fd(skel->maps.cpu_filter);
 | 
						|
 | 
						|
		for (i = 0; i < ncpus; i++) {
 | 
						|
			cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
 | 
						|
			bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	if (target->pid) {
 | 
						|
		u8 val = 1;
 | 
						|
 | 
						|
		fd = bpf_map__fd(skel->maps.task_filter);
 | 
						|
 | 
						|
		strlist__for_each_entry(pos, pid_slist) {
 | 
						|
			char *end_ptr;
 | 
						|
			u32 tgid;
 | 
						|
			int pid = strtol(pos->s, &end_ptr, 10);
 | 
						|
 | 
						|
			if (pid == INT_MIN || pid == INT_MAX ||
 | 
						|
			    (*end_ptr != '\0' && *end_ptr != ','))
 | 
						|
				continue;
 | 
						|
 | 
						|
			tgid = pid;
 | 
						|
			bpf_map_update_elem(fd, &tgid, &val, BPF_ANY);
 | 
						|
		}
 | 
						|
	} else if (target__has_task(target)) {
 | 
						|
		u32 pid;
 | 
						|
		u8 val = 1;
 | 
						|
 | 
						|
		fd = bpf_map__fd(skel->maps.task_filter);
 | 
						|
 | 
						|
		for (i = 0; i < ntasks; i++) {
 | 
						|
			pid = perf_thread_map__pid(evlist->core.threads, i);
 | 
						|
			bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	if (evlist__first(evlist)->cgrp) {
 | 
						|
		struct evsel *evsel;
 | 
						|
		u8 val = 1;
 | 
						|
 | 
						|
		fd = bpf_map__fd(skel->maps.cgroup_filter);
 | 
						|
 | 
						|
		evlist__for_each_entry(evlist, evsel) {
 | 
						|
			struct cgroup *cgrp = evsel->cgrp;
 | 
						|
 | 
						|
			if (cgrp == NULL)
 | 
						|
				continue;
 | 
						|
 | 
						|
			if (!cgrp->id && read_cgroup_id(cgrp) < 0) {
 | 
						|
				pr_err("Failed to read cgroup id of %s\n",
 | 
						|
				       cgrp->name);
 | 
						|
				goto out;
 | 
						|
			}
 | 
						|
 | 
						|
			bpf_map_update_elem(fd, &cgrp->id, &val, BPF_ANY);
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	skel->bss->offcpu_thresh_ns = opts->off_cpu_thresh_ns;
 | 
						|
 | 
						|
	err = off_cpu_bpf__attach(skel);
 | 
						|
	if (err) {
 | 
						|
		pr_err("Failed to attach off-cpu BPF skeleton\n");
 | 
						|
		goto out;
 | 
						|
	}
 | 
						|
 | 
						|
	if (perf_hooks__set_hook("record_start", off_cpu_start, evlist) ||
 | 
						|
	    perf_hooks__set_hook("record_end", off_cpu_finish, evlist)) {
 | 
						|
		pr_err("Failed to attach off-cpu skeleton\n");
 | 
						|
		goto out;
 | 
						|
	}
 | 
						|
 | 
						|
	return 0;
 | 
						|
 | 
						|
out:
 | 
						|
	off_cpu_bpf__destroy(skel);
 | 
						|
	return -1;
 | 
						|
}
 | 
						|
 | 
						|
int off_cpu_write(struct perf_session *session)
 | 
						|
{
 | 
						|
	int bytes = 0, size;
 | 
						|
	int fd, stack;
 | 
						|
	u32 raw_size;
 | 
						|
	u64 sample_type, val, sid = 0;
 | 
						|
	struct evsel *evsel;
 | 
						|
	struct perf_data_file *file = &session->data->file;
 | 
						|
	struct off_cpu_key prev, key;
 | 
						|
	union off_cpu_data data = {
 | 
						|
		.hdr = {
 | 
						|
			.type = PERF_RECORD_SAMPLE,
 | 
						|
			.misc = PERF_RECORD_MISC_USER,
 | 
						|
		},
 | 
						|
	};
 | 
						|
	u64 tstamp = OFF_CPU_TIMESTAMP;
 | 
						|
 | 
						|
	skel->bss->enabled = 0;
 | 
						|
 | 
						|
	evsel = evlist__find_evsel_by_str(session->evlist, OFFCPU_EVENT);
 | 
						|
	if (evsel == NULL) {
 | 
						|
		pr_err("%s evsel not found\n", OFFCPU_EVENT);
 | 
						|
		return 0;
 | 
						|
	}
 | 
						|
 | 
						|
	sample_type = evsel->core.attr.sample_type;
 | 
						|
 | 
						|
	if (sample_type & ~OFFCPU_SAMPLE_TYPES) {
 | 
						|
		pr_err("not supported sample type: %llx\n",
 | 
						|
		       (unsigned long long)sample_type);
 | 
						|
		return -1;
 | 
						|
	}
 | 
						|
 | 
						|
	if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) {
 | 
						|
		if (evsel->core.id)
 | 
						|
			sid = evsel->core.id[0];
 | 
						|
	}
 | 
						|
 | 
						|
	fd = bpf_map__fd(skel->maps.off_cpu);
 | 
						|
	stack = bpf_map__fd(skel->maps.stacks);
 | 
						|
	memset(&prev, 0, sizeof(prev));
 | 
						|
 | 
						|
	while (!bpf_map_get_next_key(fd, &prev, &key)) {
 | 
						|
		int n = 1;  /* start from perf_event_header */
 | 
						|
 | 
						|
		bpf_map_lookup_elem(fd, &key, &val);
 | 
						|
 | 
						|
		/* zero-fill some of the fields, will be overwritten by raw_data when parsing */
 | 
						|
		if (sample_type & PERF_SAMPLE_IDENTIFIER)
 | 
						|
			data.array[n++] = sid;
 | 
						|
		if (sample_type & PERF_SAMPLE_IP)
 | 
						|
			data.array[n++] = 0;  /* will be updated */
 | 
						|
		if (sample_type & PERF_SAMPLE_TID)
 | 
						|
			data.array[n++] = 0;
 | 
						|
		if (sample_type & PERF_SAMPLE_TIME)
 | 
						|
			data.array[n++] = tstamp;
 | 
						|
		if (sample_type & PERF_SAMPLE_CPU)
 | 
						|
			data.array[n++] = 0;
 | 
						|
		if (sample_type & PERF_SAMPLE_PERIOD)
 | 
						|
			data.array[n++] = 0;
 | 
						|
		if (sample_type & PERF_SAMPLE_RAW) {
 | 
						|
			/*
 | 
						|
			 *  [ size ][ data ]
 | 
						|
			 *  [     data     ]
 | 
						|
			 *  [     data     ]
 | 
						|
			 *  [     data     ]
 | 
						|
			 *  [ data ][ empty]
 | 
						|
			 */
 | 
						|
			int len = 0, i = 0;
 | 
						|
			void *raw_data = (void *)data.array + n * sizeof(u64);
 | 
						|
 | 
						|
			off_cpu_raw[i++] = (u64)key.pid << 32 | key.tgid;
 | 
						|
			off_cpu_raw[i++] = val;
 | 
						|
 | 
						|
			/* off_cpu_raw[i] is callchain->nr (updated later) */
 | 
						|
			off_cpu_raw[i + 1] = PERF_CONTEXT_USER;
 | 
						|
			off_cpu_raw[i + 2] = 0;
 | 
						|
 | 
						|
			bpf_map_lookup_elem(stack, &key.stack_id, &off_cpu_raw[i + 2]);
 | 
						|
			while (off_cpu_raw[i + 2 + len])
 | 
						|
				len++;
 | 
						|
 | 
						|
			off_cpu_raw[i] = len + 1;
 | 
						|
			i += len + 2;
 | 
						|
 | 
						|
			off_cpu_raw[i++] = key.cgroup_id;
 | 
						|
 | 
						|
			raw_size = i * sizeof(u64) + sizeof(u32); /* 4 bytes for alignment */
 | 
						|
			memcpy(raw_data, &raw_size, sizeof(raw_size));
 | 
						|
			memcpy(raw_data + sizeof(u32), off_cpu_raw, i * sizeof(u64));
 | 
						|
 | 
						|
			n += i + 1;
 | 
						|
		}
 | 
						|
		if (sample_type & PERF_SAMPLE_CGROUP)
 | 
						|
			data.array[n++] = key.cgroup_id;
 | 
						|
 | 
						|
		size = n * sizeof(u64);
 | 
						|
		data.hdr.size = size;
 | 
						|
		bytes += size;
 | 
						|
 | 
						|
		if (perf_data_file__write(file, &data, size) < 0) {
 | 
						|
			pr_err("failed to write perf data, error: %m\n");
 | 
						|
			return bytes;
 | 
						|
		}
 | 
						|
 | 
						|
		prev = key;
 | 
						|
		/* increase dummy timestamp to sort later samples */
 | 
						|
		tstamp++;
 | 
						|
	}
 | 
						|
	return bytes;
 | 
						|
}
 |