summaryrefslogtreecommitdiff
path: root/tools/perf/builtin-trace.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2016-04-11 18:49:11 (GMT)
committerArnaldo Carvalho de Melo <acme@redhat.com>2016-04-12 01:18:19 (GMT)
commit44621819ddc9d5d0bfd0b0616c6cf33c94189b67 (patch)
treeffd8713f48d2e835debbea10b2809adc8e395c67 /tools/perf/builtin-trace.c
parentea4539652eccc87b14fbcbc90467ebcb87f02ddb (diff)
downloadlinux-44621819ddc9d5d0bfd0b0616c6cf33c94189b67.tar.xz
perf trace: Exclude the kernel part of the callchain leading to a syscall
The kernel parts are not that useful: # trace -m 512 -e nanosleep --call dwarf usleep 1 0.065 ( 0.065 ms): usleep/18732 nanosleep(rqtp: 0x7ffc4ee4e200) = 0 syscall_slow_exit_work ([kernel.kallsyms]) do_syscall_64 ([kernel.kallsyms]) return_from_SYSCALL_64 ([kernel.kallsyms]) __nanosleep (/usr/lib64/libc-2.22.so) usleep (/usr/lib64/libc-2.22.so) main (/usr/bin/usleep) __libc_start_main (/usr/lib64/libc-2.22.so) _start (/usr/bin/usleep) # So lets just use perf_event_attr.exclude_callchain_kernel to avoid collecting it in the ring buffer: # trace -m 512 -e nanosleep --call dwarf usleep 1 0.063 ( 0.063 ms): usleep/19212 nanosleep(rqtp: 0x7ffc3df10fb0) = 0 __nanosleep (/usr/lib64/libc-2.22.so) usleep (/usr/lib64/libc-2.22.so) main (/usr/bin/usleep) __libc_start_main (/usr/lib64/libc-2.22.so) _start (/usr/bin/usleep) # Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Milian Wolff <milian.wolff@kdab.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-qctu3gqhpim0dfbcp9d86c91@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/builtin-trace.c')
-rw-r--r--tools/perf/builtin-trace.c13
1 files changed, 13 insertions, 0 deletions
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 63a3cc9..cfa5ce8 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -159,6 +159,7 @@ struct trace {
bool show_comm;
bool show_tool_stats;
bool trace_syscalls;
+ bool kernel_syscallchains;
bool force;
bool vfs_getname;
int trace_pgfaults;
@@ -2661,6 +2662,15 @@ static int trace__add_syscall_newtp(struct trace *trace)
perf_evlist__add(evlist, sys_enter);
perf_evlist__add(evlist, sys_exit);
+ if (trace->opts.callgraph_set && !trace->kernel_syscallchains) {
+ /*
+ * We're interested only in the user space callchain
+ * leading to the syscall, allow overriding that for
+ * debugging reasons using --kernel_syscall_callchains
+ */
+ sys_exit->attr.exclude_callchain_kernel = 1;
+ }
+
trace->syscalls.events.sys_enter = sys_enter;
trace->syscalls.events.sys_exit = sys_exit;
@@ -3221,6 +3231,7 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
.output = stderr,
.show_comm = true,
.trace_syscalls = true,
+ .kernel_syscallchains = false,
};
const char *output_name = NULL;
const char *ev_qualifier_str = NULL;
@@ -3269,6 +3280,8 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_CALLBACK(0, "call-graph", &trace.opts,
"record_mode[,record_size]", record_callchain_help,
&record_parse_callchain_opt),
+ OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
+ "Show the kernel callchains on the syscall exit path"),
OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout,
"per thread proc mmap processing timeout in ms"),
OPT_END()