From 90525176d71995ffde2d0c532f2758304c666a08 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 31 May 2016 12:47:46 -0300 Subject: perf evsel: Provide way to extract integer value from format_field Out of perf_evsel__intval(), that requires passing the variable name, that will then be searched in the list of tracepoint variables for the given evsel. In cases such as syscall file descriptor ("fd") tracking, this is wasteful, we need just to use perf_evsel__field() and cache the format_field. Cc: Adrian Hunter Cc: David Ahern Cc: Jiri Olsa Cc: Milian Wolff Cc: Namhyung Kim Cc: Wang Nan Link: http://lkml.kernel.org/n/tip-r6f89jx9j5nkx037d0naviqy@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 8d30cbd..f4f01b2 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -2251,17 +2251,11 @@ void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample, return sample->raw_data + offset; } -u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, - const char *name) +u64 format_field__intval(struct format_field *field, struct perf_sample *sample, + bool needs_swap) { - struct format_field *field = perf_evsel__field(evsel, name); - void *ptr; u64 value; - - if (!field) - return 0; - - ptr = sample->raw_data + field->offset; + void *ptr = sample->raw_data + field->offset; switch (field->size) { case 1: @@ -2279,7 +2273,7 @@ u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, return 0; } - if (!evsel->needs_swap) + if (!needs_swap) return value; switch (field->size) { @@ -2296,6 +2290,17 @@ u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, return 0; } +u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, + const char *name) +{ + struct format_field *field = perf_evsel__field(evsel, name); + + if (!field) + return 0; + + return field ? format_field__intval(field, sample, evsel->needs_swap) : 0; +} + bool perf_evsel__fallback(struct perf_evsel *evsel, int err, char *msg, size_t msgsize) { diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 028412b..828ddd1 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -261,6 +261,8 @@ static inline char *perf_evsel__strval(struct perf_evsel *evsel, struct format_field; +u64 format_field__intval(struct format_field *field, struct perf_sample *sample, bool needs_swap); + struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name); #define perf_evsel__match(evsel, t, c) \ -- cgit v0.10.2 From 946ae1d41d4b0c77b9f63b4a0393d8a1283a7f9d Mon Sep 17 00:00:00 2001 From: Wang Nan Date: Tue, 31 May 2016 13:06:15 +0000 Subject: perf evlist: Fix alloc_mmap() failure path If zalloc fail, setting evlist->mmap[i].fd is unsafe and perf_evlist__alloc_mmap() should bail out right after that. Signed-off-by: Wang Nan Acked-by: Masami Hiramatsu Cc: He Kuang Cc: Jiri Olsa Cc: Namhyung Kim Cc: Zefan Li Cc: pi3orama@163.com Fixes: d4c6fb36ac2c ("perf evsel: Record fd into perf_mmap") Link: http://lkml.kernel.org/r/1464699975-230440-1-git-send-email-wangnan0@huawei.com Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index e0f3094..1b918aa 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -946,9 +946,12 @@ static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) if (cpu_map__empty(evlist->cpus)) evlist->nr_mmaps = thread_map__nr(evlist->threads); evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); + if (!evlist->mmap) + return -ENOMEM; + for (i = 0; i < evlist->nr_mmaps; i++) evlist->mmap[i].fd = -1; - return evlist->mmap != NULL ? 0 : -ENOMEM; + return 0; } struct mmap_params { -- cgit v0.10.2 From 703e01652d25edbd249e3043c26543157f0ef15c Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Thu, 17 Mar 2016 18:27:50 +0100 Subject: tools lib api: Respect CROSS_COMPILE for the linker This fixes cross compilation of libapi. Signed-off-by: Lucas Stach Cc: Jiri Olsa Cc: kernel@pengutronix.de Cc: patchwork-lst@pengutronix.de Link: http://lkml.kernel.org/r/1458235670-27341-1-git-send-email-l.stach@pengutronix.de Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/lib/api/Makefile b/tools/lib/api/Makefile index 316f308..67ff93e 100644 --- a/tools/lib/api/Makefile +++ b/tools/lib/api/Makefile @@ -10,6 +10,7 @@ endif CC = $(CROSS_COMPILE)gcc AR = $(CROSS_COMPILE)ar +LD = $(CROSS_COMPILE)ld MAKEFLAGS += --no-print-directory -- cgit v0.10.2 From 40f20e5074b035c7111e135aa939d1d1a96a2480 Mon Sep 17 00:00:00 2001 From: He Kuang Date: Mon, 16 May 2016 04:51:19 +0000 Subject: perf script: Show call graphs when 1st event doesn't have it but some other has There's a display inconsistency when there are multiple tracepoint events, some of which have the 'call-graph' config option set but the first one hasn't, i.e. the whole logic for call graph processing is enabled only if the first tracepoint event has call-graph set. For instance, if we record signal_deliver with call-graph and signal_generate without: $ perf record -g -a -e signal:signal_deliver -e signal:signal_generate/call-graph=no/ [ perf record: Captured and wrote 0.017 MB perf.data (2 samples) ] $ perf script kworker/u2:1 13 [000] 6563.875949: signal:signal_generate: sig=2 errno=0 code=128 comm=perf pid=1313 grp=1 res=0 ff61cc __send_signal+0x3ec ([kernel.kallsyms]) perf 1313 [000] 6563.877584: signal:signal_deliver: sig=2 errno=0 code=128 sa_handler=43115e sa_flags=14000000 7ffff314 get_signal+0x80007f0023a4 ([kernel.kallsyms]) 7fffe358 do_signal+0x80007f002028 ([kernel.kallsyms]) 7fffa5e8 exit_to_usermode_loop+0x80007f002053 ([kernel.kallsyms]) ... Then we exchange the order of these two events in commandline, and keep signal_generate without call-graph. $ perf record -g -a -e signal:signal_generate/call-graph=no/ -e signal:signal_deliver [ perf record: Captured and wrote 0.017 MB perf.data (2 samples) ] $ perf script kworker/u2:2 1314 [000] 6933.353060: signal:signal_generate: sig=2 errno=0 code=128 comm=perf pid=1321 grp=1 res=0 perf 1321 [000] 6933.353872: signal:signal_deliver: sig=2 errno=0 code=128 sa_handler=43115e sa_flags=14000000 This time, the callchain of the event signal_deliver disappeared. The problem is caused by that perf only checks for the first evsel in evlist and decides if callchain should be printed. This patch traverses all evsels in evlist to see if any of them have callchains, and shows the right result: $ perf script kworker/u2:2 1314 [000] 6933.353060: signal:signal_generate: sig=2 errno=0 code=128 comm=perf pid=1321 grp=1 res=0 ff61cc __send_signal+0x3ec ([kernel.kallsyms]) perf 1321 [000] 6933.353872: signal:signal_deliver: sig=2 errno=0 code=128 sa_handler=43115e sa_flags=14000000 7ffff314 get_signal+0x80007f0023a4 ([kernel.kallsyms]) 7fffe358 do_signal+0x80007f002028 ([kernel.kallsyms]) 7fffa5e8 exit_to_usermode_loop+0x80007f002053 ([kernel.kallsyms]) ... Signed-off-by: He Kuang Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Wang Nan Link: http://lkml.kernel.org/r/1463374279-97209-1-git-send-email-hekuang@huawei.com Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index e3ce2f3..4601123 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -339,7 +339,7 @@ static void set_print_ip_opts(struct perf_event_attr *attr) */ static int perf_session__check_output_opt(struct perf_session *session) { - int j; + unsigned int j; struct perf_evsel *evsel; for (j = 0; j < PERF_TYPE_MAX; ++j) { @@ -388,17 +388,20 @@ static int perf_session__check_output_opt(struct perf_session *session) struct perf_event_attr *attr; j = PERF_TYPE_TRACEPOINT; - evsel = perf_session__find_first_evtype(session, j); - if (evsel == NULL) - goto out; - attr = &evsel->attr; + evlist__for_each(session->evlist, evsel) { + if (evsel->attr.type != j) + continue; + + attr = &evsel->attr; - if (attr->sample_type & PERF_SAMPLE_CALLCHAIN) { - output[j].fields |= PERF_OUTPUT_IP; - output[j].fields |= PERF_OUTPUT_SYM; - output[j].fields |= PERF_OUTPUT_DSO; - set_print_ip_opts(attr); + if (attr->sample_type & PERF_SAMPLE_CALLCHAIN) { + output[j].fields |= PERF_OUTPUT_IP; + output[j].fields |= PERF_OUTPUT_SYM; + output[j].fields |= PERF_OUTPUT_DSO; + set_print_ip_opts(attr); + goto out; + } } } -- cgit v0.10.2 From 17a2634bcb88e52bd637fdaa47d7ff0bddb0188f Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 6 Jun 2016 07:36:06 -0700 Subject: perf test: Ignore .scale and other special files 'perf test' tries to parse all entries in /sys/devices/cpu/events/. Ignore the special entries like '.scale', which cannot be directly parsed as an event. This patch assumes all files containing a '.' are special and can be ignored. Reported-by: Arnaldo Carvalho de Melo Signed-off-by: Andi Kleen Cc: Jiri Olsa Link: http://lkml.kernel.org/r/1465223766-29902-1-git-send-email-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index 7865f68..b2a2c74 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -1783,8 +1783,8 @@ static int test_pmu_events(void) struct evlist_test e; char name[MAX_NAME]; - if (!strcmp(ent->d_name, ".") || - !strcmp(ent->d_name, "..")) + /* Names containing . are special and cannot be used directly */ + if (strchr(ent->d_name, '.')) continue; snprintf(name, MAX_NAME, "cpu/event=%s/u", ent->d_name); -- cgit v0.10.2 From 44b1e60ab576c343aa592a2a6c679297cc69740d Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 30 May 2016 12:49:42 -0300 Subject: perf stat: Basic support for TopDown in perf stat Add basic plumbing for TopDown in perf stat TopDown is intended to replace the frontend cycles idle/ backend cycles idle metrics in standard perf stat output. These metrics are not reliable in many workloads, due to out of order effects. This implements a new --topdown mode in perf stat (similar to --transaction) that measures the pipe line bottlenecks using standardized formulas. The measurement can be all done with 5 counters (one fixed counter) The result are four metrics: FrontendBound, BackendBound, BadSpeculation, Retiring that describe the CPU pipeline behavior on a high level. The full top down methology has many hierarchical metrics. This implementation only supports level 1 which can be collected without multiplexing. A full implementation of top down on top of perf is available in pmu-tools toplev. (http://github.com/andikleen/pmu-tools) The current version works on Intel Core CPUs starting with Sandy Bridge, and Atom CPUs starting with Silvermont. In principle the generic metrics should be also implementable on other out of order CPUs. TopDown level 1 uses a set of abstracted metrics which are generic to out of order CPU cores (although some CPUs may not implement all of them): topdown-total-slots Available slots in the pipeline topdown-slots-issued Slots issued into the pipeline topdown-slots-retired Slots successfully retired topdown-fetch-bubbles Pipeline gaps in the frontend topdown-recovery-bubbles Pipeline gaps during recovery from misspeculation These metrics then allow to compute four useful metrics: FrontendBound, BackendBound, Retiring, BadSpeculation. Add a new --topdown options to enable events. When --topdown is specified set up events for all topdown events supported by the kernel. Add topdown-* as a special case to the event parser, as is needed for all events containing -. The actual code to compute the metrics is in follow-on patches. v2: Use standard sysctl read function. v3: Move x86 specific code to arch/ v4: Enable --metric-only implicitly for topdown. v5: Add --single-thread option to not force per core mode v6: Fix output order of topdown metrics v7: Allow combining with -d v8: Remove --single-thread again v9: Rename functions, adding arch_ and topdown_. v10: Expand man page and describe TopDown better Paste intro into commit description. Print error when malloc fails. Signed-off-by: Andi Kleen Acked-by: Jiri Olsa Link: http://lkml.kernel.org/r/1464119559-17203-1-git-send-email-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index 04f23b4..d96ccd4 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt @@ -204,6 +204,38 @@ Aggregate counts per physical processor for system-wide mode measurements. --no-aggr:: Do not aggregate counts across all monitored CPUs. +--topdown:: +Print top down level 1 metrics if supported by the CPU. This allows to +determine bottle necks in the CPU pipeline for CPU bound workloads, +by breaking the cycles consumed down into frontend bound, backend bound, +bad speculation and retiring. + +Frontend bound means that the CPU cannot fetch and decode instructions fast +enough. Backend bound means that computation or memory access is the bottle +neck. Bad Speculation means that the CPU wasted cycles due to branch +mispredictions and similar issues. Retiring means that the CPU computed without +an apparently bottleneck. The bottleneck is only the real bottleneck +if the workload is actually bound by the CPU and not by something else. + +For best results it is usually a good idea to use it with interval +mode like -I 1000, as the bottleneck of workloads can change often. + +The top down metrics are collected per core instead of per +CPU thread. Per core mode is automatically enabled +and -a (global monitoring) is needed, requiring root rights or +perf.perf_event_paranoid=-1. + +Topdown uses the full Performance Monitoring Unit, and needs +disabling of the NMI watchdog (as root): +echo 0 > /proc/sys/kernel/nmi_watchdog +for best results. Otherwise the bottlenecks may be inconsistent +on workload with changing phases. + +This enables --metric-only, unless overriden with --no-metric-only. + +To interpret the results it is usually needed to know on which +CPUs the workload runs on. If needed the CPUs can be forced using +taskset. EXAMPLES -------- diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build index 4659703..4cd8a16 100644 --- a/tools/perf/arch/x86/util/Build +++ b/tools/perf/arch/x86/util/Build @@ -3,6 +3,7 @@ libperf-y += tsc.o libperf-y += pmu.o libperf-y += kvm-stat.o libperf-y += perf_regs.o +libperf-y += group.o libperf-$(CONFIG_DWARF) += dwarf-regs.o libperf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o diff --git a/tools/perf/arch/x86/util/group.c b/tools/perf/arch/x86/util/group.c new file mode 100644 index 0000000..37f92aa --- /dev/null +++ b/tools/perf/arch/x86/util/group.c @@ -0,0 +1,27 @@ +#include +#include "api/fs/fs.h" +#include "util/group.h" + +/* + * Check whether we can use a group for top down. + * Without a group may get bad results due to multiplexing. + */ +bool arch_topdown_check_group(bool *warn) +{ + int n; + + if (sysctl__read_int("kernel/nmi_watchdog", &n) < 0) + return false; + if (n > 0) { + *warn = true; + return false; + } + return true; +} + +void arch_topdown_group_warn(void) +{ + fprintf(stderr, + "nmi_watchdog enabled with topdown. May give wrong results.\n" + "Disable with echo 0 > /proc/sys/kernel/nmi_watchdog\n"); +} diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index ee7ada7..fd76bb0 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -59,10 +59,13 @@ #include "util/thread.h" #include "util/thread_map.h" #include "util/counts.h" +#include "util/group.h" #include "util/session.h" #include "util/tool.h" +#include "util/group.h" #include "asm/bug.h" +#include #include #include #include @@ -98,6 +101,15 @@ static const char * transaction_limited_attrs = { "}" }; +static const char * topdown_attrs[] = { + "topdown-total-slots", + "topdown-slots-retired", + "topdown-recovery-bubbles", + "topdown-fetch-bubbles", + "topdown-slots-issued", + NULL, +}; + static struct perf_evlist *evsel_list; static struct target target = { @@ -112,6 +124,7 @@ static volatile pid_t child_pid = -1; static bool null_run = false; static int detailed_run = 0; static bool transaction_run; +static bool topdown_run = false; static bool big_num = true; static int big_num_opt = -1; static const char *csv_sep = NULL; @@ -124,6 +137,7 @@ static unsigned int initial_delay = 0; static unsigned int unit_width = 4; /* strlen("unit") */ static bool forever = false; static bool metric_only = false; +static bool force_metric_only = false; static struct timespec ref_time; static struct cpu_map *aggr_map; static aggr_get_id_t aggr_get_id; @@ -1520,6 +1534,14 @@ static int stat__set_big_num(const struct option *opt __maybe_unused, return 0; } +static int enable_metric_only(const struct option *opt __maybe_unused, + const char *s __maybe_unused, int unset) +{ + force_metric_only = true; + metric_only = !unset; + return 0; +} + static const struct option stat_options[] = { OPT_BOOLEAN('T', "transaction", &transaction_run, "hardware transaction statistics"), @@ -1578,8 +1600,10 @@ static const struct option stat_options[] = { "aggregate counts per thread", AGGR_THREAD), OPT_UINTEGER('D', "delay", &initial_delay, "ms to wait before starting measurement after program start"), - OPT_BOOLEAN(0, "metric-only", &metric_only, - "Only print computed metrics. No raw values"), + OPT_CALLBACK_NOOPT(0, "metric-only", &metric_only, NULL, + "Only print computed metrics. No raw values", enable_metric_only), + OPT_BOOLEAN(0, "topdown", &topdown_run, + "measure topdown level 1 statistics"), OPT_END() }; @@ -1772,12 +1796,62 @@ static int perf_stat_init_aggr_mode_file(struct perf_stat *st) return 0; } +static int topdown_filter_events(const char **attr, char **str, bool use_group) +{ + int off = 0; + int i; + int len = 0; + char *s; + + for (i = 0; attr[i]; i++) { + if (pmu_have_event("cpu", attr[i])) { + len += strlen(attr[i]) + 1; + attr[i - off] = attr[i]; + } else + off++; + } + attr[i - off] = NULL; + + *str = malloc(len + 1 + 2); + if (!*str) + return -1; + s = *str; + if (i - off == 0) { + *s = 0; + return 0; + } + if (use_group) + *s++ = '{'; + for (i = 0; attr[i]; i++) { + strcpy(s, attr[i]); + s += strlen(s); + *s++ = ','; + } + if (use_group) { + s[-1] = '}'; + *s = 0; + } else + s[-1] = 0; + return 0; +} + +__weak bool arch_topdown_check_group(bool *warn) +{ + *warn = false; + return false; +} + +__weak void arch_topdown_group_warn(void) +{ +} + /* * Add default attributes, if there were no attributes specified or * if -d/--detailed, -d -d or -d -d -d is used: */ static int add_default_attributes(void) { + int err; struct perf_event_attr default_attrs0[] = { { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, @@ -1896,7 +1970,6 @@ static int add_default_attributes(void) return 0; if (transaction_run) { - int err; if (pmu_have_event("cpu", "cycles-ct") && pmu_have_event("cpu", "el-start")) err = parse_events(evsel_list, transaction_attrs, NULL); @@ -1909,6 +1982,46 @@ static int add_default_attributes(void) return 0; } + if (topdown_run) { + char *str = NULL; + bool warn = false; + + if (stat_config.aggr_mode != AGGR_GLOBAL && + stat_config.aggr_mode != AGGR_CORE) { + pr_err("top down event configuration requires --per-core mode\n"); + return -1; + } + stat_config.aggr_mode = AGGR_CORE; + if (nr_cgroups || !target__has_cpu(&target)) { + pr_err("top down event configuration requires system-wide mode (-a)\n"); + return -1; + } + + if (!force_metric_only) + metric_only = true; + if (topdown_filter_events(topdown_attrs, &str, + arch_topdown_check_group(&warn)) < 0) { + pr_err("Out of memory\n"); + return -1; + } + if (topdown_attrs[0] && str) { + if (warn) + arch_topdown_group_warn(); + err = parse_events(evsel_list, str, NULL); + if (err) { + fprintf(stderr, + "Cannot set up top down events %s: %d\n", + str, err); + free(str); + return -1; + } + } else { + fprintf(stderr, "System does not support topdown\n"); + return -1; + } + free(str); + } + if (!evsel_list->nr_entries) { if (target__has_cpu(&target)) default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; diff --git a/tools/perf/util/group.h b/tools/perf/util/group.h new file mode 100644 index 0000000..116debe --- /dev/null +++ b/tools/perf/util/group.h @@ -0,0 +1,7 @@ +#ifndef GROUP_H +#define GROUP_H 1 + +bool arch_topdown_check_group(bool *warn); +void arch_topdown_group_warn(void); + +#endif diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l index 01af1ee..3c15b33 100644 --- a/tools/perf/util/parse-events.l +++ b/tools/perf/util/parse-events.l @@ -260,6 +260,7 @@ cycles-ct { return str(yyscanner, PE_KERNEL_PMU_EVENT); } cycles-t { return str(yyscanner, PE_KERNEL_PMU_EVENT); } mem-loads { return str(yyscanner, PE_KERNEL_PMU_EVENT); } mem-stores { return str(yyscanner, PE_KERNEL_PMU_EVENT); } +topdown-[a-z-]+ { return str(yyscanner, PE_KERNEL_PMU_EVENT); } L1-dcache|l1-d|l1d|L1-data | L1-icache|l1-i|l1i|L1-instruction | -- cgit v0.10.2 From 239bd47f8355eb5defc865cf408824b6cfeca5dc Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 24 May 2016 12:52:37 -0700 Subject: perf stat: Add computation of TopDown formulas Implement the TopDown formulas in 'perf stat'. The topdown basic metrics reported by the kernel are collected, and the formulas are computed and output as normal metrics. See the kernel commit exporting the events for details on the used metrics. Committer note: Output example: # perf stat --topdown -a usleep 1 Performance counter stats for 'system wide': retiring bad speculation frontend bound backend bound S0-C0 2 23.8% 11.6% 28.3% 36.3% S0-C1 2 16.2% 15.7% 36.5% 31.6% 0.000579956 seconds time elapsed # v2: Always print all metrics, only use thresholds for coloring. v3: Mark retiring over threshold green, not red. v4: Only print one decimal digit Fix color printing of one metric v5: Avoid printing -0.0 v6: Remove extra frontend event lookup Signed-off-by: Andi Kleen Acked-by: Jiri Olsa Link: http://lkml.kernel.org/r/1464119559-17203-2-git-send-email-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c index aa9efe0..8a2bbd2 100644 --- a/tools/perf/util/stat-shadow.c +++ b/tools/perf/util/stat-shadow.c @@ -36,6 +36,11 @@ static struct stats runtime_dtlb_cache_stats[NUM_CTX][MAX_NR_CPUS]; static struct stats runtime_cycles_in_tx_stats[NUM_CTX][MAX_NR_CPUS]; static struct stats runtime_transaction_stats[NUM_CTX][MAX_NR_CPUS]; static struct stats runtime_elision_stats[NUM_CTX][MAX_NR_CPUS]; +static struct stats runtime_topdown_total_slots[NUM_CTX][MAX_NR_CPUS]; +static struct stats runtime_topdown_slots_issued[NUM_CTX][MAX_NR_CPUS]; +static struct stats runtime_topdown_slots_retired[NUM_CTX][MAX_NR_CPUS]; +static struct stats runtime_topdown_fetch_bubbles[NUM_CTX][MAX_NR_CPUS]; +static struct stats runtime_topdown_recovery_bubbles[NUM_CTX][MAX_NR_CPUS]; static bool have_frontend_stalled; struct stats walltime_nsecs_stats; @@ -82,6 +87,11 @@ void perf_stat__reset_shadow_stats(void) sizeof(runtime_transaction_stats)); memset(runtime_elision_stats, 0, sizeof(runtime_elision_stats)); memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats)); + memset(runtime_topdown_total_slots, 0, sizeof(runtime_topdown_total_slots)); + memset(runtime_topdown_slots_retired, 0, sizeof(runtime_topdown_slots_retired)); + memset(runtime_topdown_slots_issued, 0, sizeof(runtime_topdown_slots_issued)); + memset(runtime_topdown_fetch_bubbles, 0, sizeof(runtime_topdown_fetch_bubbles)); + memset(runtime_topdown_recovery_bubbles, 0, sizeof(runtime_topdown_recovery_bubbles)); } /* @@ -105,6 +115,16 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count, update_stats(&runtime_transaction_stats[ctx][cpu], count[0]); else if (perf_stat_evsel__is(counter, ELISION_START)) update_stats(&runtime_elision_stats[ctx][cpu], count[0]); + else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS)) + update_stats(&runtime_topdown_total_slots[ctx][cpu], count[0]); + else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED)) + update_stats(&runtime_topdown_slots_issued[ctx][cpu], count[0]); + else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED)) + update_stats(&runtime_topdown_slots_retired[ctx][cpu], count[0]); + else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES)) + update_stats(&runtime_topdown_fetch_bubbles[ctx][cpu],count[0]); + else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES)) + update_stats(&runtime_topdown_recovery_bubbles[ctx][cpu], count[0]); else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) update_stats(&runtime_stalled_cycles_front_stats[ctx][cpu], count[0]); else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND)) @@ -302,6 +322,107 @@ static void print_ll_cache_misses(int cpu, out->print_metric(out->ctx, color, "%7.2f%%", "of all LL-cache hits", ratio); } +/* + * High level "TopDown" CPU core pipe line bottleneck break down. + * + * Basic concept following + * Yasin, A Top Down Method for Performance analysis and Counter architecture + * ISPASS14 + * + * The CPU pipeline is divided into 4 areas that can be bottlenecks: + * + * Frontend -> Backend -> Retiring + * BadSpeculation in addition means out of order execution that is thrown away + * (for example branch mispredictions) + * Frontend is instruction decoding. + * Backend is execution, like computation and accessing data in memory + * Retiring is good execution that is not directly bottlenecked + * + * The formulas are computed in slots. + * A slot is an entry in the pipeline each for the pipeline width + * (for example a 4-wide pipeline has 4 slots for each cycle) + * + * Formulas: + * BadSpeculation = ((SlotsIssued - SlotsRetired) + RecoveryBubbles) / + * TotalSlots + * Retiring = SlotsRetired / TotalSlots + * FrontendBound = FetchBubbles / TotalSlots + * BackendBound = 1.0 - BadSpeculation - Retiring - FrontendBound + * + * The kernel provides the mapping to the low level CPU events and any scaling + * needed for the CPU pipeline width, for example: + * + * TotalSlots = Cycles * 4 + * + * The scaling factor is communicated in the sysfs unit. + * + * In some cases the CPU may not be able to measure all the formulas due to + * missing events. In this case multiple formulas are combined, as possible. + * + * Full TopDown supports more levels to sub-divide each area: for example + * BackendBound into computing bound and memory bound. For now we only + * support Level 1 TopDown. + */ + +static double sanitize_val(double x) +{ + if (x < 0 && x >= -0.02) + return 0.0; + return x; +} + +static double td_total_slots(int ctx, int cpu) +{ + return avg_stats(&runtime_topdown_total_slots[ctx][cpu]); +} + +static double td_bad_spec(int ctx, int cpu) +{ + double bad_spec = 0; + double total_slots; + double total; + + total = avg_stats(&runtime_topdown_slots_issued[ctx][cpu]) - + avg_stats(&runtime_topdown_slots_retired[ctx][cpu]) + + avg_stats(&runtime_topdown_recovery_bubbles[ctx][cpu]); + total_slots = td_total_slots(ctx, cpu); + if (total_slots) + bad_spec = total / total_slots; + return sanitize_val(bad_spec); +} + +static double td_retiring(int ctx, int cpu) +{ + double retiring = 0; + double total_slots = td_total_slots(ctx, cpu); + double ret_slots = avg_stats(&runtime_topdown_slots_retired[ctx][cpu]); + + if (total_slots) + retiring = ret_slots / total_slots; + return retiring; +} + +static double td_fe_bound(int ctx, int cpu) +{ + double fe_bound = 0; + double total_slots = td_total_slots(ctx, cpu); + double fetch_bub = avg_stats(&runtime_topdown_fetch_bubbles[ctx][cpu]); + + if (total_slots) + fe_bound = fetch_bub / total_slots; + return fe_bound; +} + +static double td_be_bound(int ctx, int cpu) +{ + double sum = (td_fe_bound(ctx, cpu) + + td_bad_spec(ctx, cpu) + + td_retiring(ctx, cpu)); + if (sum == 0) + return 0; + return sanitize_val(1.0 - sum); +} + void perf_stat__print_shadow_stats(struct perf_evsel *evsel, double avg, int cpu, struct perf_stat_output_ctx *out) @@ -309,6 +430,7 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel, void *ctxp = out->ctx; print_metric_t print_metric = out->print_metric; double total, ratio = 0.0, total2; + const char *color = NULL; int ctx = evsel_context(evsel); if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) { @@ -452,6 +574,46 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel, avg / ratio); else print_metric(ctxp, NULL, NULL, "CPUs utilized", 0); + } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) { + double fe_bound = td_fe_bound(ctx, cpu); + + if (fe_bound > 0.2) + color = PERF_COLOR_RED; + print_metric(ctxp, color, "%8.1f%%", "frontend bound", + fe_bound * 100.); + } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) { + double retiring = td_retiring(ctx, cpu); + + if (retiring > 0.7) + color = PERF_COLOR_GREEN; + print_metric(ctxp, color, "%8.1f%%", "retiring", + retiring * 100.); + } else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) { + double bad_spec = td_bad_spec(ctx, cpu); + + if (bad_spec > 0.1) + color = PERF_COLOR_RED; + print_metric(ctxp, color, "%8.1f%%", "bad speculation", + bad_spec * 100.); + } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) { + double be_bound = td_be_bound(ctx, cpu); + const char *name = "backend bound"; + static int have_recovery_bubbles = -1; + + /* In case the CPU does not support topdown-recovery-bubbles */ + if (have_recovery_bubbles < 0) + have_recovery_bubbles = pmu_have_event("cpu", + "topdown-recovery-bubbles"); + if (!have_recovery_bubbles) + name = "backend bound/bad spec"; + + if (be_bound > 0.2) + color = PERF_COLOR_RED; + if (td_total_slots(ctx, cpu) > 0) + print_metric(ctxp, color, "%8.1f%%", name, + be_bound * 100.); + else + print_metric(ctxp, NULL, NULL, name, 0); } else if (runtime_nsecs_stats[cpu].n != 0) { char unit = 'M'; char unit_buf[10]; diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index ffa1d06..c1ba255 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -79,6 +79,11 @@ static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = { ID(TRANSACTION_START, cpu/tx-start/), ID(ELISION_START, cpu/el-start/), ID(CYCLES_IN_TX_CP, cpu/cycles-ct/), + ID(TOPDOWN_TOTAL_SLOTS, topdown-total-slots), + ID(TOPDOWN_SLOTS_ISSUED, topdown-slots-issued), + ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired), + ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles), + ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles), }; #undef ID diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h index 0150e78..c29bb94 100644 --- a/tools/perf/util/stat.h +++ b/tools/perf/util/stat.h @@ -17,6 +17,11 @@ enum perf_stat_evsel_id { PERF_STAT_EVSEL_ID__TRANSACTION_START, PERF_STAT_EVSEL_ID__ELISION_START, PERF_STAT_EVSEL_ID__CYCLES_IN_TX_CP, + PERF_STAT_EVSEL_ID__TOPDOWN_TOTAL_SLOTS, + PERF_STAT_EVSEL_ID__TOPDOWN_SLOTS_ISSUED, + PERF_STAT_EVSEL_ID__TOPDOWN_SLOTS_RETIRED, + PERF_STAT_EVSEL_ID__TOPDOWN_FETCH_BUBBLES, + PERF_STAT_EVSEL_ID__TOPDOWN_RECOVERY_BUBBLES, PERF_STAT_EVSEL_ID__MAX, }; -- cgit v0.10.2 From 41c8ca2a924b359e8f1768f8550487cd13a1ec03 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 24 May 2016 12:52:38 -0700 Subject: perf stat: Print topology/time headers with --metric-only When --metric-only is enabled there were no headers for the topology in interval mode. Also when headers were printed they were on a separate line. Before: $ perf stat --metric-only -A -I 1000 -a 1.001038376 frontend cycles idle insn per cycle stalled cycles per insn branch-misses of all branches 1.001038376 CPU0 123.54% 0.23 5.29 7.61% 1.001038376 CPU1 137.78% 0.24 5.13 10.07% 1.001038376 CPU2 64.48% 0.22 5.50 6.84% After: $ perf stat --metric-only -A -I 1000 -a 1.001111114 CPU0 82.46% 0.32 2.60 7.64% 1.001111114 CPU1 126.63% 0.02 42.83 0.15% 1.001111114 CPU2 193.54% 0.32 2.59 6.92% v2: Move all headers on a single line Reported-by: Jiri Olsa Signed-off-by: Andi Kleen Acked-by: Jiri Olsa Link: http://lkml.kernel.org/r/1464119559-17203-3-git-send-email-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index fd76bb0..a168e72 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -1316,7 +1316,7 @@ static int aggr_header_lens[] = { [AGGR_GLOBAL] = 0, }; -static void print_metric_headers(char *prefix) +static void print_metric_headers(const char *prefix, bool no_indent) { struct perf_stat_output_ctx out; struct perf_evsel *counter; @@ -1327,7 +1327,7 @@ static void print_metric_headers(char *prefix) if (prefix) fprintf(stat_config.output, "%s", prefix); - if (!csv_output) + if (!csv_output && !no_indent) fprintf(stat_config.output, "%*s", aggr_header_lens[stat_config.aggr_mode], ""); @@ -1352,28 +1352,40 @@ static void print_interval(char *prefix, struct timespec *ts) sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, csv_sep); - if (num_print_interval == 0 && !csv_output && !metric_only) { + if (num_print_interval == 0 && !csv_output) { switch (stat_config.aggr_mode) { case AGGR_SOCKET: - fprintf(output, "# time socket cpus counts %*s events\n", unit_width, "unit"); + fprintf(output, "# time socket cpus"); + if (!metric_only) + fprintf(output, " counts %*s events\n", unit_width, "unit"); break; case AGGR_CORE: - fprintf(output, "# time core cpus counts %*s events\n", unit_width, "unit"); + fprintf(output, "# time core cpus"); + if (!metric_only) + fprintf(output, " counts %*s events\n", unit_width, "unit"); break; case AGGR_NONE: - fprintf(output, "# time CPU counts %*s events\n", unit_width, "unit"); + fprintf(output, "# time CPU"); + if (!metric_only) + fprintf(output, " counts %*s events\n", unit_width, "unit"); break; case AGGR_THREAD: - fprintf(output, "# time comm-pid counts %*s events\n", unit_width, "unit"); + fprintf(output, "# time comm-pid"); + if (!metric_only) + fprintf(output, " counts %*s events\n", unit_width, "unit"); break; case AGGR_GLOBAL: default: - fprintf(output, "# time counts %*s events\n", unit_width, "unit"); + fprintf(output, "# time"); + if (!metric_only) + fprintf(output, " counts %*s events\n", unit_width, "unit"); case AGGR_UNSET: break; } } + if (num_print_interval == 0 && metric_only) + print_metric_headers(" ", true); if (++num_print_interval == 25) num_print_interval = 0; } @@ -1442,8 +1454,8 @@ static void print_counters(struct timespec *ts, int argc, const char **argv) if (metric_only) { static int num_print_iv; - if (num_print_iv == 0) - print_metric_headers(prefix); + if (num_print_iv == 0 && !interval) + print_metric_headers(prefix, false); if (num_print_iv++ == 25) num_print_iv = 0; if (stat_config.aggr_mode == AGGR_GLOBAL && prefix) -- cgit v0.10.2 From c51fd6395d67a6d414834db7f892c95594247d6f Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 24 May 2016 12:52:39 -0700 Subject: perf stat: Add missing aggregation headers for --metric-only CSV When in CSV mode --metric-only outputs an header, unlike the other modes. Previously it did not properly print headers for the aggregation columns, so the headers were actually shifted against the real values. Fix this here by outputting the correct headers for CSV. v2: Indent array. Signed-off-by: Andi Kleen Acked-by: Jiri Olsa Link: http://lkml.kernel.org/r/1464119559-17203-4-git-send-email-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index a168e72..dff6373 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -1316,6 +1316,14 @@ static int aggr_header_lens[] = { [AGGR_GLOBAL] = 0, }; +static const char *aggr_header_csv[] = { + [AGGR_CORE] = "core,cpus,", + [AGGR_SOCKET] = "socket,cpus", + [AGGR_NONE] = "cpu,", + [AGGR_THREAD] = "comm-pid,", + [AGGR_GLOBAL] = "" +}; + static void print_metric_headers(const char *prefix, bool no_indent) { struct perf_stat_output_ctx out; @@ -1330,6 +1338,12 @@ static void print_metric_headers(const char *prefix, bool no_indent) if (!csv_output && !no_indent) fprintf(stat_config.output, "%*s", aggr_header_lens[stat_config.aggr_mode], ""); + if (csv_output) { + if (stat_config.interval) + fputs("time,", stat_config.output); + fputs(aggr_header_csv[stat_config.aggr_mode], + stat_config.output); + } /* Print metrics headers only */ evlist__for_each(evsel_list, counter) { -- cgit v0.10.2 From 78f71c996fb92d129ec75d572e2f5367a4f4c757 Mon Sep 17 00:00:00 2001 From: Taeung Song Date: Mon, 6 Jun 2016 19:52:52 +0900 Subject: perf config: Fix abnormal termination at perf_parse_file() If a config file has wrong key-value pairs, the perf process will be forcibly terminated by die() at perf_parse_file() called by perf_config() so terminal settings can be crushed because of unusual termination. For example: If user config file has a wrong value 'red;default' instead of a normal value like 'red, default' for a key 'colors.top', # cat ~/.perfconfig [colors] medium = red;default # wrong value and if running sub-command 'top', # perf top perf process is dead by force and terminal setting is broken with a messge like below. Fatal: bad config file line 2 in /root/.perfconfig So fix it. If perf_config() can return on failure without calling die() at perf_parse_file(), this problem can be solved. And if a config file has wrong values, show the error message and then use default config values instead of wrong config values. Signed-off-by: Taeung Song Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Masami Hiramatsu Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1465210380-26749-2-git-send-email-treeze.taeung@gmail.com Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c index dad7d82..b500737 100644 --- a/tools/perf/util/config.c +++ b/tools/perf/util/config.c @@ -275,7 +275,8 @@ static int perf_parse_file(config_fn_t fn, void *data) break; } } - die("bad config file line %d in %s", config_linenr, config_file_name); + pr_err("bad config file line %d in %s\n", config_linenr, config_file_name); + return -1; } static int parse_unit_factor(const char *end, unsigned long *val) @@ -479,16 +480,15 @@ static int perf_config_global(void) int perf_config(config_fn_t fn, void *data) { - int ret = 0, found = 0; + int ret = -1; const char *home = NULL; /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */ if (config_exclusive_filename) return perf_config_from_file(fn, config_exclusive_filename, data); if (perf_config_system() && !access(perf_etc_perfconfig(), R_OK)) { - ret += perf_config_from_file(fn, perf_etc_perfconfig(), - data); - found += 1; + if (perf_config_from_file(fn, perf_etc_perfconfig(), data) < 0) + goto out; } home = getenv("HOME"); @@ -514,14 +514,12 @@ int perf_config(config_fn_t fn, void *data) if (!st.st_size) goto out_free; - ret += perf_config_from_file(fn, user_config, data); - found += 1; + ret = perf_config_from_file(fn, user_config, data); + out_free: free(user_config); } out: - if (found == 0) - return -1; return ret; } -- cgit v0.10.2 From 7db91f251056f90fec4121f028680ab3153a0f3c Mon Sep 17 00:00:00 2001 From: Taeung Song Date: Mon, 6 Jun 2016 19:52:54 +0900 Subject: perf config: Handle the error when config set is NULL at collect_config() collect_config() collect all config key-value pairs from config files and put each config info in config set. But if config set (i.e. 'set' variable at collect_config()) is NULL, this is wrong so handle it. Signed-off-by: Taeung Song Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Masami Hiramatsu Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1465210380-26749-4-git-send-email-treeze.taeung@gmail.com Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c index b500737..c73f1c4 100644 --- a/tools/perf/util/config.c +++ b/tools/perf/util/config.c @@ -607,8 +607,12 @@ static int collect_config(const char *var, const char *value, struct perf_config_section *section = NULL; struct perf_config_item *item = NULL; struct perf_config_set *set = perf_config_set; - struct list_head *sections = &set->sections; + struct list_head *sections; + if (set == NULL) + return -1; + + sections = &set->sections; key = ptr = strdup(var); if (!key) { pr_debug("%s: strdup failed\n", __func__); -- cgit v0.10.2