From 9a0f05cb36888550d1509d60aa55788615abea44 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 21 Nov 2011 15:13:29 +0100 Subject: perf: Update the mmap control page on mmap() Apparently we didn't update the mmap control page right after mmap(), which leads to surprises when userspace wants to use it. Signed-off-by: Peter Zijlstra Cc: Stephane Eranian Cc: Arun Sharma Link: http://lkml.kernel.org/n/tip-dcpi7164djsexmx6ya7lilrc@git.kernel.org Signed-off-by: Ingo Molnar diff --git a/kernel/events/core.c b/kernel/events/core.c index 2f8f3f1..0ca1f64 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3534,6 +3534,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) event->mmap_user = get_current_user(); vma->vm_mm->pinned_vm += event->mmap_locked; + perf_event_update_userpage(event); + unlock: if (!ret) atomic_inc(&event->mmap_count); -- cgit v0.10.2 From 35edc2a5095efb189e60dc32bbb9d2663aec6d24 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sun, 20 Nov 2011 20:36:02 +0100 Subject: perf, arch: Rework perf_event_index() Put the logic to compute the event index into a per pmu method. This is required because the x86 rules are weird and wonderful and don't match the capabilities of the current scheme. AFAIK only powerpc actually has a usable userspace read of the PMCs but I'm not at all sure anybody actually used that. ARM is restored to the default since it currently does not support userspace access at all. And all software events are provided with a method that reports their index as 0 (disabled). Signed-off-by: Peter Zijlstra Cc: Michael Cree Cc: Will Deacon Cc: Deng-Cheng Zhu Cc: Anton Blanchard Cc: Eric B Munson Cc: Heiko Carstens Cc: Paul Mundt Cc: David S. Miller Cc: Richard Kuo Cc: Stephane Eranian Cc: Arun Sharma Link: http://lkml.kernel.org/n/tip-dfydxodki16lylkt3gl2j7cw@git.kernel.org Signed-off-by: Ingo Molnar diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index 0f8e382..08f94d8 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -12,10 +12,6 @@ #ifndef __ARM_PERF_EVENT_H__ #define __ARM_PERF_EVENT_H__ -/* ARM performance counters start from 1 (in the cp15 accesses) so use the - * same indexes here for consistency. */ -#define PERF_EVENT_INDEX_OFFSET 1 - /* ARM perf PMU IDs for use by internal perf clients. */ enum arm_perf_pmu_ids { ARM_PERF_PMU_ID_XSCALE1 = 0, diff --git a/arch/frv/include/asm/perf_event.h b/arch/frv/include/asm/perf_event.h index a69e015..c52ea55 100644 --- a/arch/frv/include/asm/perf_event.h +++ b/arch/frv/include/asm/perf_event.h @@ -12,6 +12,4 @@ #ifndef _ASM_PERF_EVENT_H #define _ASM_PERF_EVENT_H -#define PERF_EVENT_INDEX_OFFSET 0 - #endif /* _ASM_PERF_EVENT_H */ diff --git a/arch/hexagon/include/asm/perf_event.h b/arch/hexagon/include/asm/perf_event.h index 6c2910f..8b8526b 100644 --- a/arch/hexagon/include/asm/perf_event.h +++ b/arch/hexagon/include/asm/perf_event.h @@ -19,6 +19,4 @@ #ifndef _ASM_PERF_EVENT_H #define _ASM_PERF_EVENT_H -#define PERF_EVENT_INDEX_OFFSET 0 - #endif /* _ASM_PERF_EVENT_H */ diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 8f1df12..1a8093f 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -61,8 +61,6 @@ struct pt_regs; extern unsigned long perf_misc_flags(struct pt_regs *regs); extern unsigned long perf_instruction_pointer(struct pt_regs *regs); -#define PERF_EVENT_INDEX_OFFSET 1 - /* * Only override the default definitions in include/linux/perf_event.h * if we have hardware PMU support. diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 10a140f..d614ab5 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -1187,6 +1187,11 @@ static int power_pmu_event_init(struct perf_event *event) return err; } +static int power_pmu_event_idx(struct perf_event *event) +{ + return event->hw.idx; +} + struct pmu power_pmu = { .pmu_enable = power_pmu_enable, .pmu_disable = power_pmu_disable, @@ -1199,6 +1204,7 @@ struct pmu power_pmu = { .start_txn = power_pmu_start_txn, .cancel_txn = power_pmu_cancel_txn, .commit_txn = power_pmu_commit_txn, + .event_idx = power_pmu_event_idx, }; /* diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index a75f168..4eb444e 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h @@ -6,4 +6,3 @@ /* Empty, just to avoid compiling error */ -#define PERF_EVENT_INDEX_OFFSET 0 diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 096c975..9b922c1 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -188,8 +188,6 @@ extern u32 get_ibs_caps(void); #ifdef CONFIG_PERF_EVENTS extern void perf_events_lapic_init(void); -#define PERF_EVENT_INDEX_OFFSET 0 - /* * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups. * This flag is otherwise unused and ABI specified to be 0, so nobody should diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 0885561..02545e6 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -680,6 +680,12 @@ struct pmu { * for each successful ->add() during the transaction. */ void (*cancel_txn) (struct pmu *pmu); /* optional */ + + /* + * Will return the value for perf_event_mmap_page::index for this event, + * if no implementation is provided it will default to: event->hw.idx + 1. + */ + int (*event_idx) (struct perf_event *event); /*optional */ }; /** diff --git a/kernel/events/core.c b/kernel/events/core.c index 0ca1f64..3894309 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3208,10 +3208,6 @@ int perf_event_task_disable(void) return 0; } -#ifndef PERF_EVENT_INDEX_OFFSET -# define PERF_EVENT_INDEX_OFFSET 0 -#endif - static int perf_event_index(struct perf_event *event) { if (event->hw.state & PERF_HES_STOPPED) @@ -3220,7 +3216,7 @@ static int perf_event_index(struct perf_event *event) if (event->state != PERF_EVENT_STATE_ACTIVE) return 0; - return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET; + return event->pmu->event_idx(event); } static void calc_timer_values(struct perf_event *event, @@ -4992,6 +4988,11 @@ static int perf_swevent_init(struct perf_event *event) return 0; } +static int perf_swevent_event_idx(struct perf_event *event) +{ + return 0; +} + static struct pmu perf_swevent = { .task_ctx_nr = perf_sw_context, @@ -5001,6 +5002,8 @@ static struct pmu perf_swevent = { .start = perf_swevent_start, .stop = perf_swevent_stop, .read = perf_swevent_read, + + .event_idx = perf_swevent_event_idx, }; #ifdef CONFIG_EVENT_TRACING @@ -5087,6 +5090,8 @@ static struct pmu perf_tracepoint = { .start = perf_swevent_start, .stop = perf_swevent_stop, .read = perf_swevent_read, + + .event_idx = perf_swevent_event_idx, }; static inline void perf_tp_register(void) @@ -5306,6 +5311,8 @@ static struct pmu perf_cpu_clock = { .start = cpu_clock_event_start, .stop = cpu_clock_event_stop, .read = cpu_clock_event_read, + + .event_idx = perf_swevent_event_idx, }; /* @@ -5378,6 +5385,8 @@ static struct pmu perf_task_clock = { .start = task_clock_event_start, .stop = task_clock_event_stop, .read = task_clock_event_read, + + .event_idx = perf_swevent_event_idx, }; static void perf_pmu_nop_void(struct pmu *pmu) @@ -5405,6 +5414,11 @@ static void perf_pmu_cancel_txn(struct pmu *pmu) perf_pmu_enable(pmu); } +static int perf_event_idx_default(struct perf_event *event) +{ + return event->hw.idx + 1; +} + /* * Ensures all contexts with the same task_ctx_nr have the same * pmu_cpu_context too. @@ -5594,6 +5608,9 @@ got_cpu_context: pmu->pmu_disable = perf_pmu_nop_void; } + if (!pmu->event_idx) + pmu->event_idx = perf_event_idx_default; + list_add_rcu(&pmu->entry, &pmus); ret = 0; unlock: diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index b7971d6..b0309f7 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -613,6 +613,11 @@ static void hw_breakpoint_stop(struct perf_event *bp, int flags) bp->hw.state = PERF_HES_STOPPED; } +static int hw_breakpoint_event_idx(struct perf_event *bp) +{ + return 0; +} + static struct pmu perf_breakpoint = { .task_ctx_nr = perf_sw_context, /* could eventually get its own */ @@ -622,6 +627,8 @@ static struct pmu perf_breakpoint = { .start = hw_breakpoint_start, .stop = hw_breakpoint_stop, .read = hw_breakpoint_pmu_read, + + .event_idx = hw_breakpoint_event_idx, }; int __init init_hw_breakpoint(void) -- cgit v0.10.2 From 365a4038486b57bb2bd516706a80f82f250f5306 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 21 Nov 2011 20:58:59 +0100 Subject: perf: Fix mmap_page::offset computation There's multiple reason the counter might be unavailable, change the condition to !->index since perf_event_index() should return 0 for all those cases. Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/n/tip-1ixr3olci40w8rgv2evv2ldh@git.kernel.org Signed-off-by: Ingo Molnar diff --git a/kernel/events/core.c b/kernel/events/core.c index 3894309..05affc3 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3268,7 +3268,7 @@ void perf_event_update_userpage(struct perf_event *event) barrier(); userpg->index = perf_event_index(event); userpg->offset = perf_event_count(event); - if (event->state == PERF_EVENT_STATE_ACTIVE) + if (userpg->index) userpg->offset -= local64_read(&event->hw.prev_count); userpg->time_enabled = enabled + -- cgit v0.10.2 From fe4a330885aee20f233de36085fb15c38094e635 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sun, 20 Nov 2011 20:44:06 +0100 Subject: perf, x86: Implement user-space RDPMC support, to allow fast, user-space access to self-monitoring counters Implement a correct pmu::event_idx for the x86 counter index rules and set CR4.PCE on CPU_STARTING. Signed-off-by: Peter Zijlstra Cc: Stephane Eranian Cc: Arun Sharma Cc: Thomas Gleixner Cc: "H. Peter Anvin" Link: http://lkml.kernel.org/n/tip-mwxab34dibqgzk5zywutfnha@git.kernel.org Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 5adce10..53b5699 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1210,6 +1210,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) break; case CPU_STARTING: + set_in_cr4(X86_CR4_PCE); if (x86_pmu.cpu_starting) x86_pmu.cpu_starting(cpu); break; @@ -1542,6 +1543,18 @@ static int x86_pmu_event_init(struct perf_event *event) return err; } +static int x86_pmu_event_idx(struct perf_event *event) +{ + int idx = event->hw.idx; + + if (x86_pmu.num_counters_fixed && idx >= X86_PMC_IDX_FIXED) { + idx -= X86_PMC_IDX_FIXED; + idx |= 1 << 30; + } + + return idx + 1; +} + static struct pmu pmu = { .pmu_enable = x86_pmu_enable, .pmu_disable = x86_pmu_disable, @@ -1557,6 +1570,8 @@ static struct pmu pmu = { .start_txn = x86_pmu_start_txn, .cancel_txn = x86_pmu_cancel_txn, .commit_txn = x86_pmu_commit_txn, + + .event_idx = x86_pmu_event_idx, }; /* -- cgit v0.10.2 From 0c9d42ed4cee2aa1dfc3a260b741baae8615744f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sun, 20 Nov 2011 23:30:47 +0100 Subject: perf, x86: Provide means for disabling userspace RDPMC Allow the disabling of RDPMC via a pmu specific attribute: echo 0 > /sys/bus/event_source/devices/cpu/rdpmc Signed-off-by: Peter Zijlstra Cc: Stephane Eranian Cc: Arun Sharma Link: http://lkml.kernel.org/n/tip-pqeog465zo5hsimtkfz73f27@git.kernel.org Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 53b5699..116b040 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -1210,7 +1211,8 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) break; case CPU_STARTING: - set_in_cr4(X86_CR4_PCE); + if (x86_pmu.attr_rdpmc) + set_in_cr4(X86_CR4_PCE); if (x86_pmu.cpu_starting) x86_pmu.cpu_starting(cpu); break; @@ -1320,6 +1322,8 @@ static int __init init_hw_perf_events(void) } } + x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ + pr_info("... version: %d\n", x86_pmu.version); pr_info("... bit width: %d\n", x86_pmu.cntval_bits); pr_info("... generic registers: %d\n", x86_pmu.num_counters); @@ -1555,10 +1559,59 @@ static int x86_pmu_event_idx(struct perf_event *event) return idx + 1; } +static ssize_t get_attr_rdpmc(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc); +} + +static void change_rdpmc(void *info) +{ + bool enable = !!(unsigned long)info; + + if (enable) + set_in_cr4(X86_CR4_PCE); + else + clear_in_cr4(X86_CR4_PCE); +} + +static ssize_t set_attr_rdpmc(struct device *cdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long val = simple_strtoul(buf, NULL, 0); + + if (!!val != !!x86_pmu.attr_rdpmc) { + x86_pmu.attr_rdpmc = !!val; + smp_call_function(change_rdpmc, (void *)val, 1); + } + + return count; +} + +static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc); + +static struct attribute *x86_pmu_attrs[] = { + &dev_attr_rdpmc.attr, + NULL, +}; + +static struct attribute_group x86_pmu_attr_group = { + .attrs = x86_pmu_attrs, +}; + +static const struct attribute_group *x86_pmu_attr_groups[] = { + &x86_pmu_attr_group, + NULL, +}; + static struct pmu pmu = { .pmu_enable = x86_pmu_enable, .pmu_disable = x86_pmu_disable, + .attr_groups = x86_pmu_attr_groups, + .event_init = x86_pmu_event_init, .add = x86_pmu_add, diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 8944062..513d617 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -307,6 +307,14 @@ struct x86_pmu { struct x86_pmu_quirk *quirks; int perfctr_second_write; + /* + * sysfs attrs + */ + int attr_rdpmc; + + /* + * CPU Hotplug hooks + */ int (*cpu_prepare)(int cpu); void (*cpu_starting)(int cpu); void (*cpu_dying)(int cpu); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 02545e6..5311b79 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -615,6 +615,7 @@ struct pmu { struct list_head entry; struct device *dev; + const struct attribute_group **attr_groups; char *name; int type; diff --git a/kernel/events/core.c b/kernel/events/core.c index 05affc3..dcd4049e 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -5505,6 +5505,7 @@ static int pmu_dev_alloc(struct pmu *pmu) if (!pmu->dev) goto out; + pmu->dev->groups = pmu->attr_groups; device_initialize(pmu->dev); ret = dev_set_name(pmu->dev, "%s", pmu->name); if (ret) -- cgit v0.10.2 From e3f3541c19c89a4daae39300defba68943301949 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 21 Nov 2011 11:43:53 +0100 Subject: perf: Extend the mmap control page with time (TSC) fields Extend the mmap control page with fields so that userspace can compute time deltas relative to the provided time fields. Currently only implemented for x86 with constant and nonstop TSC. Signed-off-by: Peter Zijlstra Cc: Stephane Eranian Cc: Arun Sharma Link: http://lkml.kernel.org/n/tip-3u1jucza77j3wuvs0x2bic0f@git.kernel.org Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 116b040..f8bddb5 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -32,6 +32,7 @@ #include #include #include +#include #include "perf_event.h" @@ -1627,6 +1628,19 @@ static struct pmu pmu = { .event_idx = x86_pmu_event_idx, }; +void perf_update_user_clock(struct perf_event_mmap_page *userpg, u64 now) +{ + if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) + return; + + if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) + return; + + userpg->time_mult = this_cpu_read(cyc2ns); + userpg->time_shift = CYC2NS_SCALE_FACTOR; + userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; +} + /* * callchain support */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 5311b79..0b91db2 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -291,12 +291,14 @@ struct perf_event_mmap_page { __s64 offset; /* add to hardware event value */ __u64 time_enabled; /* time event active */ __u64 time_running; /* time event on cpu */ + __u32 time_mult, time_shift; + __u64 time_offset; /* * Hole for extension of the self monitor capabilities */ - __u64 __reserved[123]; /* align to 1k */ + __u64 __reserved[121]; /* align to 1k */ /* * Control data for the mmap() data buffer. diff --git a/kernel/events/core.c b/kernel/events/core.c index dcd4049e..3a9c7d8 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3220,17 +3220,22 @@ static int perf_event_index(struct perf_event *event) } static void calc_timer_values(struct perf_event *event, + u64 *now, u64 *enabled, u64 *running) { - u64 now, ctx_time; + u64 ctx_time; - now = perf_clock(); - ctx_time = event->shadow_ctx_time + now; + *now = perf_clock(); + ctx_time = event->shadow_ctx_time + *now; *enabled = ctx_time - event->tstamp_enabled; *running = ctx_time - event->tstamp_running; } +void __weak perf_update_user_clock(struct perf_event_mmap_page *userpg, u64 now) +{ +} + /* * Callers need to ensure there can be no nesting of this function, otherwise * the seqlock logic goes bad. We can not serialize this because the arch @@ -3240,7 +3245,7 @@ void perf_event_update_userpage(struct perf_event *event) { struct perf_event_mmap_page *userpg; struct ring_buffer *rb; - u64 enabled, running; + u64 enabled, running, now; rcu_read_lock(); /* @@ -3252,7 +3257,7 @@ void perf_event_update_userpage(struct perf_event *event) * because of locking issue as we can be called in * NMI context */ - calc_timer_values(event, &enabled, &running); + calc_timer_values(event, &now, &enabled, &running); rb = rcu_dereference(event->rb); if (!rb) goto unlock; @@ -3277,6 +3282,8 @@ void perf_event_update_userpage(struct perf_event *event) userpg->time_running = running + atomic64_read(&event->child_total_time_running); + perf_update_user_clock(userpg, now); + barrier(); ++userpg->lock; preempt_enable(); @@ -3763,7 +3770,7 @@ static void perf_output_read_group(struct perf_output_handle *handle, static void perf_output_read(struct perf_output_handle *handle, struct perf_event *event) { - u64 enabled = 0, running = 0; + u64 enabled = 0, running = 0, now; u64 read_format = event->attr.read_format; /* @@ -3776,7 +3783,7 @@ static void perf_output_read(struct perf_output_handle *handle, * NMI context */ if (read_format & PERF_FORMAT_TOTAL_TIMES) - calc_timer_values(event, &enabled, &running); + calc_timer_values(event, &now, &enabled, &running); if (event->attr.read_format & PERF_FORMAT_GROUP) perf_output_read_group(handle, event, enabled, running); -- cgit v0.10.2 From 08aa0d1f376e9b966568316bd2019b3c1274d885 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 21 Nov 2011 14:42:47 +0100 Subject: perf tools: Add x86 RDPMC, RDTSC test Implement a simple test for the self-monitoring data from the perf mmap data area control page: 6: x86 rdpmc test: 0: 6053 1: 60053 2: 600059 3: 6000059 4: 60000075 5: 600000247 Ok The counts are expected to increase monotonically - these are recovered via RDPMC, without calling into the kernel. It might be nice to add logic to automagically turn these numbers into OK/FAIL. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Stephane Eranian Cc: Arun Sharma Link: http://lkml.kernel.org/n/tip-evf5yii88ljdgmaihccbxxw1@git.kernel.org [ various small improvements ] Signed-off-by: Ingo Molnar diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c index 2b9a7f4..439b5ed 100644 --- a/tools/perf/builtin-test.c +++ b/tools/perf/builtin-test.c @@ -15,6 +15,8 @@ #include "util/thread_map.h" #include "../../include/linux/hw_breakpoint.h" +#include + static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym) { bool *visited = symbol__priv(sym); @@ -1296,6 +1298,173 @@ out: return (err < 0 || errs > 0) ? -1 : 0; } + +#if defined(__x86_64__) || defined(__i386__) + +#define barrier() asm volatile("" ::: "memory") + +static u64 rdpmc(unsigned int counter) +{ + unsigned int low, high; + + asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter)); + + return low | ((u64)high) << 32; +} + +static u64 rdtsc(void) +{ + unsigned int low, high; + + asm volatile("rdtsc" : "=a" (low), "=d" (high)); + + return low | ((u64)high) << 32; +} + +static u64 mmap_read_self(void *addr) +{ + struct perf_event_mmap_page *pc = addr; + u32 seq, idx, time_mult = 0, time_shift = 0; + u64 count, cyc = 0, time_offset = 0, enabled, running, delta; + + do { + seq = pc->lock; + barrier(); + + enabled = pc->time_enabled; + running = pc->time_running; + + if (enabled != running) { + cyc = rdtsc(); + time_mult = pc->time_mult; + time_shift = pc->time_shift; + time_offset = pc->time_offset; + } + + idx = pc->index; + count = pc->offset; + if (idx) + count += rdpmc(idx - 1); + + barrier(); + } while (pc->lock != seq); + + if (enabled != running) { + u64 quot, rem; + + quot = (cyc >> time_shift); + rem = cyc & ((1 << time_shift) - 1); + delta = time_offset + quot * time_mult + + ((rem * time_mult) >> time_shift); + + enabled += delta; + if (idx) + running += delta; + + quot = count / running; + rem = count % running; + count = quot * enabled + (rem * enabled) / running; + } + + return count; +} + +/* + * If the RDPMC instruction faults then signal this back to the test parent task: + */ +static void segfault_handler(int sig __used, siginfo_t *info __used, void *uc __used) +{ + exit(-1); +} + +static int __test__rdpmc(void) +{ + long page_size = sysconf(_SC_PAGE_SIZE); + volatile int tmp = 0; + u64 i, loops = 1000; + int n; + int fd; + void *addr; + struct perf_event_attr attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_INSTRUCTIONS, + .exclude_kernel = 1, + }; + u64 delta_sum = 0; + struct sigaction sa; + + sigfillset(&sa.sa_mask); + sa.sa_sigaction = segfault_handler; + sigaction(SIGSEGV, &sa, NULL); + + fprintf(stderr, "\n\n"); + + fd = sys_perf_event_open(&attr, 0, -1, -1, 0); + if (fd < 0) { + die("Error: sys_perf_event_open() syscall returned " + "with %d (%s)\n", fd, strerror(errno)); + } + + addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0); + if (addr == (void *)(-1)) { + die("Error: mmap() syscall returned " + "with (%s)\n", strerror(errno)); + } + + for (n = 0; n < 6; n++) { + u64 stamp, now, delta; + + stamp = mmap_read_self(addr); + + for (i = 0; i < loops; i++) + tmp++; + + now = mmap_read_self(addr); + loops *= 10; + + delta = now - stamp; + fprintf(stderr, "%14d: %14Lu\n", n, (long long)delta); + + delta_sum += delta; + } + + munmap(addr, page_size); + close(fd); + + fprintf(stderr, " "); + + if (!delta_sum) + return -1; + + return 0; +} + +static int test__rdpmc(void) +{ + int status = 0; + int wret = 0; + int ret; + int pid; + + pid = fork(); + if (pid < 0) + return -1; + + if (!pid) { + ret = __test__rdpmc(); + + exit(ret); + } + + wret = waitpid(pid, &status, 0); + if (wret < 0 || status) + return -1; + + return 0; +} + +#endif + static struct test { const char *desc; int (*func)(void); @@ -1320,6 +1489,12 @@ static struct test { .desc = "parse events tests", .func = test__parse_events, }, +#if defined(__x86_64__) || defined(__i386__) + { + .desc = "x86 rdpmc test", + .func = test__rdpmc, + }, +#endif { .desc = "Validate PERF_RECORD_* events & perf_sample fields", .func = test__PERF_RECORD, @@ -1412,7 +1587,5 @@ int cmd_test(int argc, const char **argv, const char *prefix __used) if (symbol__init() < 0) return -1; - setup_pager(); - return __cmd_test(argc, argv); } -- cgit v0.10.2 From 6c303d3ab39f0dc69546f179c424ee1124f50906 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 22 Nov 2011 21:13:48 +0100 Subject: tracing: let trace_signal_generate() report more info, kill overflow_fail/lose_info __send_signal()->trace_signal_generate() doesn't report enough info. The users want to know was the signal actually delivered or not, and they also need the shared/private info. The patch moves trace_signal_generate() at the end of __send_signal() and adds the 2 additional arguments. This also allows us to kill trace_signal_overflow_fail/lose_info, we can simply add the appropriate TRACE_SIGNAL_ "result" codes. Reported-by: Seiji Aguchi Reviewed-by: Seiji Aguchi Acked-by: Steven Rostedt Signed-off-by: Oleg Nesterov diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h index 17df434..39a8a43 100644 --- a/include/trace/events/signal.h +++ b/include/trace/events/signal.h @@ -23,11 +23,23 @@ } \ } while (0) +#ifndef TRACE_HEADER_MULTI_READ +enum { + TRACE_SIGNAL_DELIVERED, + TRACE_SIGNAL_IGNORED, + TRACE_SIGNAL_ALREADY_PENDING, + TRACE_SIGNAL_OVERFLOW_FAIL, + TRACE_SIGNAL_LOSE_INFO, +}; +#endif + /** * signal_generate - called when a signal is generated * @sig: signal number * @info: pointer to struct siginfo * @task: pointer to struct task_struct + * @group: shared or private + * @result: TRACE_SIGNAL_* * * Current process sends a 'sig' signal to 'task' process with * 'info' siginfo. If 'info' is SEND_SIG_NOINFO or SEND_SIG_PRIV, @@ -37,9 +49,10 @@ */ TRACE_EVENT(signal_generate, - TP_PROTO(int sig, struct siginfo *info, struct task_struct *task), + TP_PROTO(int sig, struct siginfo *info, struct task_struct *task, + int group, int result), - TP_ARGS(sig, info, task), + TP_ARGS(sig, info, task, group, result), TP_STRUCT__entry( __field( int, sig ) @@ -47,6 +60,8 @@ TRACE_EVENT(signal_generate, __field( int, code ) __array( char, comm, TASK_COMM_LEN ) __field( pid_t, pid ) + __field( int, group ) + __field( int, result ) ), TP_fast_assign( @@ -54,11 +69,14 @@ TRACE_EVENT(signal_generate, TP_STORE_SIGINFO(__entry, info); memcpy(__entry->comm, task->comm, TASK_COMM_LEN); __entry->pid = task->pid; + __entry->group = group; + __entry->result = result; ), - TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d", + TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d grp=%d res=%d", __entry->sig, __entry->errno, __entry->code, - __entry->comm, __entry->pid) + __entry->comm, __entry->pid, __entry->group, + __entry->result) ); /** @@ -101,65 +119,6 @@ TRACE_EVENT(signal_deliver, __entry->sa_handler, __entry->sa_flags) ); -DECLARE_EVENT_CLASS(signal_queue_overflow, - - TP_PROTO(int sig, int group, struct siginfo *info), - - TP_ARGS(sig, group, info), - - TP_STRUCT__entry( - __field( int, sig ) - __field( int, group ) - __field( int, errno ) - __field( int, code ) - ), - - TP_fast_assign( - __entry->sig = sig; - __entry->group = group; - TP_STORE_SIGINFO(__entry, info); - ), - - TP_printk("sig=%d group=%d errno=%d code=%d", - __entry->sig, __entry->group, __entry->errno, __entry->code) -); - -/** - * signal_overflow_fail - called when signal queue is overflow - * @sig: signal number - * @group: signal to process group or not (bool) - * @info: pointer to struct siginfo - * - * Kernel fails to generate 'sig' signal with 'info' siginfo, because - * siginfo queue is overflow, and the signal is dropped. - * 'group' is not 0 if the signal will be sent to a process group. - * 'sig' is always one of RT signals. - */ -DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail, - - TP_PROTO(int sig, int group, struct siginfo *info), - - TP_ARGS(sig, group, info) -); - -/** - * signal_lose_info - called when siginfo is lost - * @sig: signal number - * @group: signal to process group or not (bool) - * @info: pointer to struct siginfo - * - * Kernel generates 'sig' signal but loses 'info' siginfo, because siginfo - * queue is overflow. - * 'group' is not 0 if the signal will be sent to a process group. - * 'sig' is always one of non-RT signals. - */ -DEFINE_EVENT(signal_queue_overflow, signal_lose_info, - - TP_PROTO(int sig, int group, struct siginfo *info), - - TP_ARGS(sig, group, info) -); - #endif /* _TRACE_SIGNAL_H */ /* This part must be outside protection */ diff --git a/kernel/signal.c b/kernel/signal.c index c73c428..1bd9e86 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1054,13 +1054,13 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, struct sigpending *pending; struct sigqueue *q; int override_rlimit; - - trace_signal_generate(sig, info, t); + int ret = 0, result; assert_spin_locked(&t->sighand->siglock); + result = TRACE_SIGNAL_IGNORED; if (!prepare_signal(sig, t, from_ancestor_ns)) - return 0; + goto ret; pending = group ? &t->signal->shared_pending : &t->pending; /* @@ -1068,8 +1068,11 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, * exactly one non-rt signal, so that we can get more * detailed information about the cause of the signal. */ + result = TRACE_SIGNAL_ALREADY_PENDING; if (legacy_queue(pending, sig)) - return 0; + goto ret; + + result = TRACE_SIGNAL_DELIVERED; /* * fast-pathed signals for kernel-internal things like SIGSTOP * or SIGKILL. @@ -1127,14 +1130,15 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, * signal was rt and sent by user using something * other than kill(). */ - trace_signal_overflow_fail(sig, group, info); - return -EAGAIN; + result = TRACE_SIGNAL_OVERFLOW_FAIL; + ret = -EAGAIN; + goto ret; } else { /* * This is a silent loss of information. We still * send the signal, but the *info bits are lost. */ - trace_signal_lose_info(sig, group, info); + result = TRACE_SIGNAL_LOSE_INFO; } } @@ -1142,7 +1146,9 @@ out_set: signalfd_notify(t, sig); sigaddset(&pending->signal, sig); complete_signal(sig, t, group); - return 0; +ret: + trace_signal_generate(sig, info, t, group, result); + return ret; } static int send_signal(int sig, struct siginfo *info, struct task_struct *t, -- cgit v0.10.2 From 163566f60bfe6a8176650155e2d98649b0dfabf8 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 22 Nov 2011 21:37:41 +0100 Subject: tracing: send_sigqueue() needs trace_signal_generate() too Add trace_signal_generate() into send_sigqueue(). send_sigqueue() is very similar to __send_signal(), just it uses the preallocated info. It should do the same wrt tracing. Reported-by: Seiji Aguchi Reviewed-by: Seiji Aguchi Acked-by: Steven Rostedt Signed-off-by: Oleg Nesterov diff --git a/kernel/signal.c b/kernel/signal.c index 1bd9e86..8511e39 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1591,7 +1591,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) int sig = q->info.si_signo; struct sigpending *pending; unsigned long flags; - int ret; + int ret, result; BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); @@ -1600,6 +1600,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) goto ret; ret = 1; /* the signal is ignored */ + result = TRACE_SIGNAL_IGNORED; if (!prepare_signal(sig, t, 0)) goto out; @@ -1611,6 +1612,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) */ BUG_ON(q->info.si_code != SI_TIMER); q->info.si_overrun++; + result = TRACE_SIGNAL_ALREADY_PENDING; goto out; } q->info.si_overrun = 0; @@ -1620,7 +1622,9 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) list_add_tail(&q->list, &pending->list); sigaddset(&pending->signal, sig); complete_signal(sig, t, group); + result = TRACE_SIGNAL_DELIVERED; out: + trace_signal_generate(sig, &q->info, t, group, result); unlock_task_sighand(t, &flags); ret: return ret; -- cgit v0.10.2 From 9ae7d3351aac238eef9646479693105688fd9cc9 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 19 Jan 2012 14:07:23 -0200 Subject: perf tools: Add fprintf methods for thread_map and cpu_map classes For helping with debugging. Cc: David Ahern Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-m06n4rp7pwr6dlzwoq89cl69@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 6893eec..adc72f0 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -166,6 +166,17 @@ out: return cpus; } +size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp) +{ + int i; + size_t printed = fprintf(fp, "%d cpu%s: ", + map->nr, map->nr > 1 ? "s" : ""); + for (i = 0; i < map->nr; ++i) + printed += fprintf(fp, "%s%d", i ? ", " : "", map->map[i]); + + return printed + fprintf(fp, "\n"); +} + struct cpu_map *cpu_map__dummy_new(void) { struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int)); diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 072c0a3..c415185 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -1,6 +1,8 @@ #ifndef __PERF_CPUMAP_H #define __PERF_CPUMAP_H +#include + struct cpu_map { int nr; int map[]; @@ -10,4 +12,6 @@ struct cpu_map *cpu_map__new(const char *cpu_list); struct cpu_map *cpu_map__dummy_new(void); void cpu_map__delete(struct cpu_map *map); +size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp); + #endif /* __PERF_CPUMAP_H */ diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c index a5df131..894d52f 100644 --- a/tools/perf/util/thread_map.c +++ b/tools/perf/util/thread_map.c @@ -62,3 +62,14 @@ void thread_map__delete(struct thread_map *threads) { free(threads); } + +size_t thread_map__fprintf(struct thread_map *threads, FILE *fp) +{ + int i; + size_t printed = fprintf(fp, "%d thread%s: ", + threads->nr, threads->nr > 1 ? "s" : ""); + for (i = 0; i < threads->nr; ++i) + printed += fprintf(fp, "%s%d", i ? ", " : "", threads->map[i]); + + return printed + fprintf(fp, "\n"); +} diff --git a/tools/perf/util/thread_map.h b/tools/perf/util/thread_map.h index 3cb9073..736ab4a26 100644 --- a/tools/perf/util/thread_map.h +++ b/tools/perf/util/thread_map.h @@ -2,6 +2,7 @@ #define __PERF_THREAD_MAP_H #include +#include struct thread_map { int nr; @@ -12,4 +13,7 @@ struct thread_map *thread_map__new_by_pid(pid_t pid); struct thread_map *thread_map__new_by_tid(pid_t tid); struct thread_map *thread_map__new(pid_t pid, pid_t tid); void thread_map__delete(struct thread_map *threads); + +size_t thread_map__fprintf(struct thread_map *threads, FILE *fp); + #endif /* __PERF_THREAD_MAP_H */ -- cgit v0.10.2 From 0d37aa34f8806bb443dd3c8621fd9bdbb50c58bb Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 19 Jan 2012 14:08:15 -0200 Subject: perf tools: Introduce per user view The new --uid command line option will show only the tasks for a given user, using the proc interface to figure out the existing tasks. Kernel work is needed to close races at startup, but this should already be useful in many use cases. Cc: David Ahern Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-bdnspm000gw2l984a2t53o8z@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index 2937f7e..ff9a66e 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -58,6 +58,10 @@ OPTIONS --tid=:: Record events on existing thread ID. +-u:: +--uid=:: + Record events in threads owned by uid. Name or number. + -r:: --realtime=:: Collect data with this RT SCHED_FIFO priority. diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt index b1a5bbb..ab1454e 100644 --- a/tools/perf/Documentation/perf-top.txt +++ b/tools/perf/Documentation/perf-top.txt @@ -78,6 +78,10 @@ Default is to monitor all CPUS. --tid=:: Profile events on existing thread ID. +-u:: +--uid=:: + Record events in threads owned by uid. Name or number. + -r :: --realtime=:: Collect data with this RT SCHED_FIFO priority. diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 0abfb18..32870ee 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -44,6 +44,7 @@ struct perf_record { struct perf_evlist *evlist; struct perf_session *session; const char *progname; + const char *uid_str; int output; unsigned int page_size; int realtime_prio; @@ -727,6 +728,7 @@ const struct option record_options[] = { OPT_CALLBACK('G', "cgroup", &record.evlist, "name", "monitor event in cgroup name only", parse_cgroups), + OPT_STRING('u', "uid", &record.uid_str, "user", "user to profile"), OPT_END() }; @@ -748,7 +750,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) argc = parse_options(argc, argv, record_options, record_usage, PARSE_OPT_STOP_AT_NON_OPTION); if (!argc && rec->opts.target_pid == -1 && rec->opts.target_tid == -1 && - !rec->opts.system_wide && !rec->opts.cpu_list) + !rec->opts.system_wide && !rec->opts.cpu_list && !rec->uid_str) usage_with_options(record_usage, record_options); if (rec->force && rec->append_file) { @@ -788,11 +790,17 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) goto out_symbol_exit; } + rec->opts.uid = parse_target_uid(rec->uid_str, rec->opts.target_tid, + rec->opts.target_pid); + if (rec->uid_str != NULL && rec->opts.uid == UINT_MAX - 1) + goto out_free_fd; + if (rec->opts.target_pid != -1) rec->opts.target_tid = rec->opts.target_pid; if (perf_evlist__create_maps(evsel_list, rec->opts.target_pid, - rec->opts.target_tid, rec->opts.cpu_list) < 0) + rec->opts.target_tid, rec->opts.uid, + rec->opts.cpu_list) < 0) usage_with_options(record_usage, record_options); list_for_each_entry(pos, &evsel_list->entries, node) { diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index f5d2a63..459b862 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -1201,7 +1201,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) if (target_pid != -1) target_tid = target_pid; - evsel_list->threads = thread_map__new(target_pid, target_tid); + evsel_list->threads = thread_map__new(target_pid, target_tid, UINT_MAX); if (evsel_list->threads == NULL) { pr_err("Problems finding threads of monitor\n"); usage_with_options(stat_usage, options); diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c index 3854e86..3ce709e 100644 --- a/tools/perf/builtin-test.c +++ b/tools/perf/builtin-test.c @@ -276,7 +276,7 @@ static int test__open_syscall_event(void) return -1; } - threads = thread_map__new(-1, getpid()); + threads = thread_map__new(-1, getpid(), UINT_MAX); if (threads == NULL) { pr_debug("thread_map__new\n"); return -1; @@ -342,7 +342,7 @@ static int test__open_syscall_event_on_all_cpus(void) return -1; } - threads = thread_map__new(-1, getpid()); + threads = thread_map__new(-1, getpid(), UINT_MAX); if (threads == NULL) { pr_debug("thread_map__new\n"); return -1; @@ -490,7 +490,7 @@ static int test__basic_mmap(void) expected_nr_events[i] = random() % 257; } - threads = thread_map__new(-1, getpid()); + threads = thread_map__new(-1, getpid(), UINT_MAX); if (threads == NULL) { pr_debug("thread_map__new\n"); return -1; @@ -1054,7 +1054,7 @@ static int test__PERF_RECORD(void) * we're monitoring, the one forked there. */ err = perf_evlist__create_maps(evlist, opts.target_pid, - opts.target_tid, opts.cpu_list); + opts.target_tid, UINT_MAX, opts.cpu_list); if (err < 0) { pr_debug("Not enough memory to create thread/cpu maps\n"); goto out_delete_evlist; diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 8f80df8..e8b033c 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -64,7 +64,6 @@ #include #include - void get_term_dimensions(struct winsize *ws) { char *s = getenv("LINES"); @@ -537,10 +536,20 @@ static void perf_top__sort_new_samples(void *arg) static void *display_thread_tui(void *arg) { + struct perf_evsel *pos; struct perf_top *top = arg; const char *help = "For a higher level overview, try: perf top --sort comm,dso"; perf_top__sort_new_samples(top); + + /* + * Initialize the uid_filter_str, in the future the TUI will allow + * Zooming in/out UIDs. For now juse use whatever the user passed + * via --uid. + */ + list_for_each_entry(pos, &top->evlist->entries, node) + pos->hists.uid_filter_str = top->uid_str; + perf_evlist__tui_browse_hists(top->evlist, help, perf_top__sort_new_samples, top, top->delay_secs); @@ -949,7 +958,7 @@ static int __cmd_top(struct perf_top *top) if (ret) goto out_delete; - if (top->target_tid != -1) + if (top->target_tid != -1 || top->uid != UINT_MAX) perf_event__synthesize_thread_map(&top->tool, top->evlist->threads, perf_event__process, &top->session->host_machine); @@ -1089,6 +1098,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) .delay_secs = 2, .target_pid = -1, .target_tid = -1, + .uid = UINT_MAX, .freq = 1000, /* 1 KHz */ .sample_id_all_avail = true, .mmap_pages = 128, @@ -1162,6 +1172,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) "Display raw encoding of assembly instructions (default)"), OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", "Specify disassembler style (e.g. -M intel for intel syntax)"), + OPT_STRING('u', "uid", &top.uid_str, "user", "user to profile"), OPT_END() }; @@ -1187,6 +1198,10 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) setup_browser(false); + top.uid = parse_target_uid(top.uid_str, top.target_tid, top.target_pid); + if (top.uid_str != NULL && top.uid == UINT_MAX - 1) + goto out_delete_evlist; + /* CPU and PID are mutually exclusive */ if (top.target_tid > 0 && top.cpu_list) { printf("WARNING: PID switch overriding CPU\n"); @@ -1198,7 +1213,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) top.target_tid = top.target_pid; if (perf_evlist__create_maps(top.evlist, top.target_pid, - top.target_tid, top.cpu_list) < 0) + top.target_tid, top.uid, top.cpu_list) < 0) usage_with_options(top_usage, options); if (!top.evlist->nr_entries && @@ -1262,6 +1277,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) status = __cmd_top(&top); +out_delete_evlist: perf_evlist__delete(top.evlist); return status; diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 64f8bee..92af168 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -188,6 +188,7 @@ void pthread__unblock_sigwinch(void); struct perf_record_opts { pid_t target_pid; pid_t target_tid; + uid_t uid; bool call_graph; bool group; bool inherit_stat; diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 3f16e08..a6d50e3 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -594,14 +594,14 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, } int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, - pid_t target_tid, const char *cpu_list) + pid_t target_tid, uid_t uid, const char *cpu_list) { - evlist->threads = thread_map__new(target_pid, target_tid); + evlist->threads = thread_map__new(target_pid, target_tid, uid); if (evlist->threads == NULL) return -1; - if (cpu_list == NULL && target_tid != -1) + if (uid != UINT_MAX || (cpu_list == NULL && target_tid != -1)) evlist->cpus = cpu_map__dummy_new(); else evlist->cpus = cpu_map__new(cpu_list); diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 8922aee..9c51660 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -107,7 +107,7 @@ static inline void perf_evlist__set_maps(struct perf_evlist *evlist, } int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, - pid_t target_tid, const char *cpu_list); + pid_t tid, uid_t uid, const char *cpu_list); void perf_evlist__delete_maps(struct perf_evlist *evlist); int perf_evlist__set_filters(struct perf_evlist *evlist); diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index f55f0a8d..0d48613 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -55,6 +55,7 @@ struct hists { u64 nr_entries; const struct thread *thread_filter; const struct dso *dso_filter; + const char *uid_filter_str; pthread_mutex_t lock; struct events_stats stats; u64 event_stream; diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 9dd47a4..e03b58a 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -425,14 +425,14 @@ struct pyrf_thread_map { static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads, PyObject *args, PyObject *kwargs) { - static char *kwlist[] = { "pid", "tid", NULL }; - int pid = -1, tid = -1; + static char *kwlist[] = { "pid", "tid", "uid", NULL }; + int pid = -1, tid = -1, uid = UINT_MAX; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", - kwlist, &pid, &tid)) + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii", + kwlist, &pid, &tid, &uid)) return -1; - pthreads->threads = thread_map__new(pid, tid); + pthreads->threads = thread_map__new(pid, tid, uid); if (pthreads->threads == NULL) return -1; return 0; diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c index 894d52f..3d4b6c5 100644 --- a/tools/perf/util/thread_map.c +++ b/tools/perf/util/thread_map.c @@ -1,6 +1,11 @@ #include +#include +#include #include #include +#include +#include +#include #include "thread_map.h" /* Skip "." and ".." directories */ @@ -23,7 +28,7 @@ struct thread_map *thread_map__new_by_pid(pid_t pid) sprintf(name, "/proc/%d/task", pid); items = scandir(name, &namelist, filter, NULL); if (items <= 0) - return NULL; + return NULL; threads = malloc(sizeof(*threads) + sizeof(pid_t) * items); if (threads != NULL) { @@ -51,10 +56,99 @@ struct thread_map *thread_map__new_by_tid(pid_t tid) return threads; } -struct thread_map *thread_map__new(pid_t pid, pid_t tid) +struct thread_map *thread_map__new_by_uid(uid_t uid) +{ + DIR *proc; + int max_threads = 32, items, i; + char path[256]; + struct dirent dirent, *next, **namelist = NULL; + struct thread_map *threads = malloc(sizeof(*threads) + + max_threads * sizeof(pid_t)); + if (threads == NULL) + goto out; + + proc = opendir("/proc"); + if (proc == NULL) + goto out_free_threads; + + threads->nr = 0; + + while (!readdir_r(proc, &dirent, &next) && next) { + char *end; + bool grow = false; + struct stat st; + pid_t pid = strtol(dirent.d_name, &end, 10); + + if (*end) /* only interested in proper numerical dirents */ + continue; + + snprintf(path, sizeof(path), "/proc/%s", dirent.d_name); + + if (stat(path, &st) != 0) + continue; + + if (st.st_uid != uid) + continue; + + snprintf(path, sizeof(path), "/proc/%d/task", pid); + items = scandir(path, &namelist, filter, NULL); + if (items <= 0) + goto out_free_closedir; + + while (threads->nr + items >= max_threads) { + max_threads *= 2; + grow = true; + } + + if (grow) { + struct thread_map *tmp; + + tmp = realloc(threads, (sizeof(*threads) + + max_threads * sizeof(pid_t))); + if (tmp == NULL) + goto out_free_namelist; + + threads = tmp; + } + + for (i = 0; i < items; i++) + threads->map[threads->nr + i] = atoi(namelist[i]->d_name); + + for (i = 0; i < items; i++) + free(namelist[i]); + free(namelist); + + threads->nr += items; + } + +out_closedir: + closedir(proc); +out: + return threads; + +out_free_threads: + free(threads); + return NULL; + +out_free_namelist: + for (i = 0; i < items; i++) + free(namelist[i]); + free(namelist); + +out_free_closedir: + free(threads); + threads = NULL; + goto out_closedir; +} + +struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid) { if (pid != -1) return thread_map__new_by_pid(pid); + + if (tid == -1 && uid != UINT_MAX) + return thread_map__new_by_uid(uid); + return thread_map__new_by_tid(tid); } diff --git a/tools/perf/util/thread_map.h b/tools/perf/util/thread_map.h index 736ab4a26..c75ddba 100644 --- a/tools/perf/util/thread_map.h +++ b/tools/perf/util/thread_map.h @@ -11,7 +11,8 @@ struct thread_map { struct thread_map *thread_map__new_by_pid(pid_t pid); struct thread_map *thread_map__new_by_tid(pid_t tid); -struct thread_map *thread_map__new(pid_t pid, pid_t tid); +struct thread_map *thread_map__new_by_uid(uid_t uid); +struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid); void thread_map__delete(struct thread_map *threads); size_t thread_map__fprintf(struct thread_map *threads, FILE *fp); diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c index 500471d..e4370ca 100644 --- a/tools/perf/util/top.c +++ b/tools/perf/util/top.c @@ -75,6 +75,9 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) else if (top->target_tid != -1) ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %d", top->target_tid); + else if (top->uid_str != NULL) + ret += SNPRINTF(bf + ret, size - ret, " (uid: %s", + top->uid_str); else ret += SNPRINTF(bf + ret, size - ret, " (all"); diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h index a248f3c..def3e53 100644 --- a/tools/perf/util/top.h +++ b/tools/perf/util/top.h @@ -24,6 +24,7 @@ struct perf_top { int print_entries, count_filter, delay_secs; int freq; pid_t target_pid, target_tid; + uid_t uid; bool hide_kernel_symbols, hide_user_symbols, zero; bool system_wide; bool use_tui, use_stdio; @@ -45,6 +46,7 @@ struct perf_top { int realtime_prio; int sym_pcnt_filter; const char *sym_filter; + const char *uid_str; }; size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size); diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c index 1212a38..7b6669d 100644 --- a/tools/perf/util/ui/browsers/hists.c +++ b/tools/perf/util/ui/browsers/hists.c @@ -841,6 +841,9 @@ static int hists__browser_title(struct hists *self, char *bf, size_t size, nr_events = convert_unit(nr_events, &unit); printed = snprintf(bf, size, "Events: %lu%c %s", nr_events, unit, ev_name); + if (self->uid_filter_str) + printed += snprintf(bf + printed, size - printed, + ", UID: %s", self->uid_filter_str); if (thread) printed += snprintf(bf + printed, size - printed, ", Thread: %s(%d)", diff --git a/tools/perf/util/usage.c b/tools/perf/util/usage.c index d76d1c0..d0c0139 100644 --- a/tools/perf/util/usage.c +++ b/tools/perf/util/usage.c @@ -7,6 +7,7 @@ * Copyright (C) Linus Torvalds, 2005 */ #include "util.h" +#include "debug.h" static void report(const char *prefix, const char *err, va_list params) { @@ -81,3 +82,41 @@ void warning(const char *warn, ...) warn_routine(warn, params); va_end(params); } + +uid_t parse_target_uid(const char *str, pid_t tid, pid_t pid) +{ + struct passwd pwd, *result; + char buf[1024]; + + if (str == NULL) + return UINT_MAX; + + /* CPU and PID are mutually exclusive */ + if (tid > 0 || pid > 0) { + ui__warning("PID/TID switch overriding UID\n"); + sleep(1); + return UINT_MAX; + } + + getpwnam_r(str, &pwd, buf, sizeof(buf), &result); + + if (result == NULL) { + char *endptr; + int uid = strtol(str, &endptr, 10); + + if (*endptr != '\0') { + ui__error("Invalid user %s\n", str); + return UINT_MAX - 1; + } + + getpwuid_r(uid, &pwd, buf, sizeof(buf), &result); + + if (result == NULL) { + ui__error("Problems obtaining information for user %s\n", + str); + return UINT_MAX - 1; + } + } + + return result->pw_uid; +} diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index b9c530c..061dbf8 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -246,6 +246,8 @@ struct perf_event_attr; void event_attr_init(struct perf_event_attr *attr); +uid_t parse_target_uid(const char *str, pid_t tid, pid_t pid); + #define _STR(x) #x #define STR(x) _STR(x) -- cgit v0.10.2 From 9ea811973d49a1df0be04ff6e4df449e4fca4fb5 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Wed, 18 Jan 2012 13:28:13 +0000 Subject: perf bench: Make "default" memcpy() selection actually use glibc's implementation Since arch/x86/lib/memcpy_64.S implements not only __memcpy, but also memcpy, without further precautions this function will get chose by the static linker for resolving all references, and hence the "default" measurement didn't really measure anything else than the "x86-64-unrolled" one. Fix this by renaming (through the pre-processor) the conflicting symbol. On my Westmere system, the glibc variant turns out to require about 4% less instructions, but 15% more cycles for the default 1Mb block size measured. Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/4F16D6FD020000780006D72F@nat28.tlf.novell.com Signed-off-by: Jan Beulich Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/bench/mem-memcpy-x86-64-asm.S b/tools/perf/bench/mem-memcpy-x86-64-asm.S index a57b66e..384b607 100644 --- a/tools/perf/bench/mem-memcpy-x86-64-asm.S +++ b/tools/perf/bench/mem-memcpy-x86-64-asm.S @@ -1,2 +1,2 @@ - +#define memcpy MEMCPY /* don't hide glibc's memcpy() */ #include "../../../arch/x86/lib/memcpy_64.S" -- cgit v0.10.2 From 800eb01484b3ca1eaf4eb5186df13fb24de2db19 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Wed, 18 Jan 2012 13:28:56 +0000 Subject: perf bench: Also allow measuring alternative memcpy implementations Intended to be able to support the current selection of the preferred memcpy() implementation, this patch adds the ability to also measure the two alternative implementations, again by way of using some pre-processsor replacement. While on my Westmere system this proves that the movsb based variant is worse than the movsq based one (since the ERMS feature isn't there), it also shows that here for the default as well as small sizes the unrolled variant outperforms the movsq one. Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/4F16D728020000780006D732@nat28.tlf.novell.com Signed-off-by: Jan Beulich Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/bench/mem-memcpy-x86-64-asm-def.h b/tools/perf/bench/mem-memcpy-x86-64-asm-def.h index d588b87..d66ab79 100644 --- a/tools/perf/bench/mem-memcpy-x86-64-asm-def.h +++ b/tools/perf/bench/mem-memcpy-x86-64-asm-def.h @@ -2,3 +2,11 @@ MEMCPY_FN(__memcpy, "x86-64-unrolled", "unrolled memcpy() in arch/x86/lib/memcpy_64.S") + +MEMCPY_FN(memcpy_c, + "x86-64-movsq", + "movsq-based memcpy() in arch/x86/lib/memcpy_64.S") + +MEMCPY_FN(memcpy_c_e, + "x86-64-movsb", + "movsb-based memcpy() in arch/x86/lib/memcpy_64.S") diff --git a/tools/perf/bench/mem-memcpy-x86-64-asm.S b/tools/perf/bench/mem-memcpy-x86-64-asm.S index 384b607..a20780b 100644 --- a/tools/perf/bench/mem-memcpy-x86-64-asm.S +++ b/tools/perf/bench/mem-memcpy-x86-64-asm.S @@ -1,2 +1,6 @@ #define memcpy MEMCPY /* don't hide glibc's memcpy() */ +#define altinstr_replacement text +#define globl p2align 4; .globl +#define Lmemcpy_c globl memcpy_c; memcpy_c +#define Lmemcpy_c_e globl memcpy_c_e; memcpy_c_e #include "../../../arch/x86/lib/memcpy_64.S" -- cgit v0.10.2 From be3de80dc2e671d9ee15e69fe9cd84d2b71e2225 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 24 Jan 2012 10:03:22 -0200 Subject: perf bench: Also allow measuring memset() This simply clones the respective memcpy() implementation. Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/4F16D743020000780006D735@nat28.tlf.novell.com Signed-off-by: Jan Beulich Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/Makefile b/tools/perf/Makefile index ac86d67..599031a 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -61,7 +61,7 @@ ifeq ($(ARCH),x86_64) ifeq (${IS_X86_64}, 1) RAW_ARCH := x86_64 ARCH_CFLAGS := -DARCH_X86_64 - ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S + ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S endif endif @@ -362,8 +362,10 @@ BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o ifeq ($(RAW_ARCH),x86_64) BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy-x86-64-asm.o +BUILTIN_OBJS += $(OUTPUT)bench/mem-memset-x86-64-asm.o endif BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o +BUILTIN_OBJS += $(OUTPUT)bench/mem-memset.o BUILTIN_OBJS += $(OUTPUT)builtin-diff.o BUILTIN_OBJS += $(OUTPUT)builtin-evlist.o diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h index f7781c6..a09bece 100644 --- a/tools/perf/bench/bench.h +++ b/tools/perf/bench/bench.h @@ -4,6 +4,7 @@ extern int bench_sched_messaging(int argc, const char **argv, const char *prefix); extern int bench_sched_pipe(int argc, const char **argv, const char *prefix); extern int bench_mem_memcpy(int argc, const char **argv, const char *prefix __used); +extern int bench_mem_memset(int argc, const char **argv, const char *prefix); #define BENCH_FORMAT_DEFAULT_STR "default" #define BENCH_FORMAT_DEFAULT 0 diff --git a/tools/perf/bench/mem-memset-arch.h b/tools/perf/bench/mem-memset-arch.h new file mode 100644 index 0000000..a040fa7 --- /dev/null +++ b/tools/perf/bench/mem-memset-arch.h @@ -0,0 +1,12 @@ + +#ifdef ARCH_X86_64 + +#define MEMSET_FN(fn, name, desc) \ + extern void *fn(void *, int, size_t); + +#include "mem-memset-x86-64-asm-def.h" + +#undef MEMSET_FN + +#endif + diff --git a/tools/perf/bench/mem-memset-x86-64-asm-def.h b/tools/perf/bench/mem-memset-x86-64-asm-def.h new file mode 100644 index 0000000..a71dff9 --- /dev/null +++ b/tools/perf/bench/mem-memset-x86-64-asm-def.h @@ -0,0 +1,12 @@ + +MEMSET_FN(__memset, + "x86-64-unrolled", + "unrolled memset() in arch/x86/lib/memset_64.S") + +MEMSET_FN(memset_c, + "x86-64-stosq", + "movsq-based memset() in arch/x86/lib/memset_64.S") + +MEMSET_FN(memset_c_e, + "x86-64-stosb", + "movsb-based memset() in arch/x86/lib/memset_64.S") diff --git a/tools/perf/bench/mem-memset-x86-64-asm.S b/tools/perf/bench/mem-memset-x86-64-asm.S new file mode 100644 index 0000000..cb92170 --- /dev/null +++ b/tools/perf/bench/mem-memset-x86-64-asm.S @@ -0,0 +1,6 @@ +#define memset MEMSET /* don't hide glibc's memset() */ +#define altinstr_replacement text +#define globl p2align 4; .globl +#define Lmemset_c globl memset_c; memset_c +#define Lmemset_c_e globl memset_c_e; memset_c_e +#include "../../../arch/x86/lib/memset_64.S" diff --git a/tools/perf/bench/mem-memset.c b/tools/perf/bench/mem-memset.c new file mode 100644 index 0000000..9c0c6f0 --- /dev/null +++ b/tools/perf/bench/mem-memset.c @@ -0,0 +1,291 @@ +/* + * mem-memset.c + * + * memset: Simple memory set in various ways + * + * Trivial clone of mem-memcpy.c. + */ +#include + +#include "../perf.h" +#include "../util/util.h" +#include "../util/parse-options.h" +#include "../util/header.h" +#include "bench.h" +#include "mem-memset-arch.h" + +#include +#include +#include +#include +#include + +#define K 1024 + +static const char *length_str = "1MB"; +static const char *routine = "default"; +static bool use_clock; +static int clock_fd; +static bool only_prefault; +static bool no_prefault; + +static const struct option options[] = { + OPT_STRING('l', "length", &length_str, "1MB", + "Specify length of memory to copy. " + "available unit: B, MB, GB (upper and lower)"), + OPT_STRING('r', "routine", &routine, "default", + "Specify routine to copy"), + OPT_BOOLEAN('c', "clock", &use_clock, + "Use CPU clock for measuring"), + OPT_BOOLEAN('o', "only-prefault", &only_prefault, + "Show only the result with page faults before memset()"), + OPT_BOOLEAN('n', "no-prefault", &no_prefault, + "Show only the result without page faults before memset()"), + OPT_END() +}; + +typedef void *(*memset_t)(void *, int, size_t); + +struct routine { + const char *name; + const char *desc; + memset_t fn; +}; + +static const struct routine routines[] = { + { "default", + "Default memset() provided by glibc", + memset }, +#ifdef ARCH_X86_64 + +#define MEMSET_FN(fn, name, desc) { name, desc, fn }, +#include "mem-memset-x86-64-asm-def.h" +#undef MEMSET_FN + +#endif + + { NULL, + NULL, + NULL } +}; + +static const char * const bench_mem_memset_usage[] = { + "perf bench mem memset ", + NULL +}; + +static struct perf_event_attr clock_attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES +}; + +static void init_clock(void) +{ + clock_fd = sys_perf_event_open(&clock_attr, getpid(), -1, -1, 0); + + if (clock_fd < 0 && errno == ENOSYS) + die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); + else + BUG_ON(clock_fd < 0); +} + +static u64 get_clock(void) +{ + int ret; + u64 clk; + + ret = read(clock_fd, &clk, sizeof(u64)); + BUG_ON(ret != sizeof(u64)); + + return clk; +} + +static double timeval2double(struct timeval *ts) +{ + return (double)ts->tv_sec + + (double)ts->tv_usec / (double)1000000; +} + +static void alloc_mem(void **dst, size_t length) +{ + *dst = zalloc(length); + if (!dst) + die("memory allocation failed - maybe length is too large?\n"); +} + +static u64 do_memset_clock(memset_t fn, size_t len, bool prefault) +{ + u64 clock_start = 0ULL, clock_end = 0ULL; + void *dst = NULL; + + alloc_mem(&dst, len); + + if (prefault) + fn(dst, -1, len); + + clock_start = get_clock(); + fn(dst, 0, len); + clock_end = get_clock(); + + free(dst); + return clock_end - clock_start; +} + +static double do_memset_gettimeofday(memset_t fn, size_t len, bool prefault) +{ + struct timeval tv_start, tv_end, tv_diff; + void *dst = NULL; + + alloc_mem(&dst, len); + + if (prefault) + fn(dst, -1, len); + + BUG_ON(gettimeofday(&tv_start, NULL)); + fn(dst, 0, len); + BUG_ON(gettimeofday(&tv_end, NULL)); + + timersub(&tv_end, &tv_start, &tv_diff); + + free(dst); + return (double)((double)len / timeval2double(&tv_diff)); +} + +#define pf (no_prefault ? 0 : 1) + +#define print_bps(x) do { \ + if (x < K) \ + printf(" %14lf B/Sec", x); \ + else if (x < K * K) \ + printf(" %14lfd KB/Sec", x / K); \ + else if (x < K * K * K) \ + printf(" %14lf MB/Sec", x / K / K); \ + else \ + printf(" %14lf GB/Sec", x / K / K / K); \ + } while (0) + +int bench_mem_memset(int argc, const char **argv, + const char *prefix __used) +{ + int i; + size_t len; + double result_bps[2]; + u64 result_clock[2]; + + argc = parse_options(argc, argv, options, + bench_mem_memset_usage, 0); + + if (use_clock) + init_clock(); + + len = (size_t)perf_atoll((char *)length_str); + + result_clock[0] = result_clock[1] = 0ULL; + result_bps[0] = result_bps[1] = 0.0; + + if ((s64)len <= 0) { + fprintf(stderr, "Invalid length:%s\n", length_str); + return 1; + } + + /* same to without specifying either of prefault and no-prefault */ + if (only_prefault && no_prefault) + only_prefault = no_prefault = false; + + for (i = 0; routines[i].name; i++) { + if (!strcmp(routines[i].name, routine)) + break; + } + if (!routines[i].name) { + printf("Unknown routine:%s\n", routine); + printf("Available routines...\n"); + for (i = 0; routines[i].name; i++) { + printf("\t%s ... %s\n", + routines[i].name, routines[i].desc); + } + return 1; + } + + if (bench_format == BENCH_FORMAT_DEFAULT) + printf("# Copying %s Bytes ...\n\n", length_str); + + if (!only_prefault && !no_prefault) { + /* show both of results */ + if (use_clock) { + result_clock[0] = + do_memset_clock(routines[i].fn, len, false); + result_clock[1] = + do_memset_clock(routines[i].fn, len, true); + } else { + result_bps[0] = + do_memset_gettimeofday(routines[i].fn, + len, false); + result_bps[1] = + do_memset_gettimeofday(routines[i].fn, + len, true); + } + } else { + if (use_clock) { + result_clock[pf] = + do_memset_clock(routines[i].fn, + len, only_prefault); + } else { + result_bps[pf] = + do_memset_gettimeofday(routines[i].fn, + len, only_prefault); + } + } + + switch (bench_format) { + case BENCH_FORMAT_DEFAULT: + if (!only_prefault && !no_prefault) { + if (use_clock) { + printf(" %14lf Clock/Byte\n", + (double)result_clock[0] + / (double)len); + printf(" %14lf Clock/Byte (with prefault)\n ", + (double)result_clock[1] + / (double)len); + } else { + print_bps(result_bps[0]); + printf("\n"); + print_bps(result_bps[1]); + printf(" (with prefault)\n"); + } + } else { + if (use_clock) { + printf(" %14lf Clock/Byte", + (double)result_clock[pf] + / (double)len); + } else + print_bps(result_bps[pf]); + + printf("%s\n", only_prefault ? " (with prefault)" : ""); + } + break; + case BENCH_FORMAT_SIMPLE: + if (!only_prefault && !no_prefault) { + if (use_clock) { + printf("%lf %lf\n", + (double)result_clock[0] / (double)len, + (double)result_clock[1] / (double)len); + } else { + printf("%lf %lf\n", + result_bps[0], result_bps[1]); + } + } else { + if (use_clock) { + printf("%lf\n", (double)result_clock[pf] + / (double)len); + } else + printf("%lf\n", result_bps[pf]); + } + break; + default: + /* reaching this means there's some disaster: */ + die("unknown format: %d\n", bench_format); + break; + } + + return 0; +} diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c index fcb9626..b0e74ab 100644 --- a/tools/perf/builtin-bench.c +++ b/tools/perf/builtin-bench.c @@ -52,6 +52,9 @@ static struct bench_suite mem_suites[] = { { "memcpy", "Simple memory copy in various ways", bench_mem_memcpy }, + { "memset", + "Simple memory set in various ways", + bench_mem_memset }, suite_all, { NULL, NULL, diff --git a/tools/perf/util/include/asm/dwarf2.h b/tools/perf/util/include/asm/dwarf2.h index bb4198e..afe3819 100644 --- a/tools/perf/util/include/asm/dwarf2.h +++ b/tools/perf/util/include/asm/dwarf2.h @@ -2,10 +2,12 @@ #ifndef PERF_DWARF2_H #define PERF_DWARF2_H -/* dwarf2.h ... dummy header file for including arch/x86/lib/memcpy_64.S */ +/* dwarf2.h ... dummy header file for including arch/x86/lib/mem{cpy,set}_64.S */ #define CFI_STARTPROC #define CFI_ENDPROC +#define CFI_REMEMBER_STATE +#define CFI_RESTORE_STATE #endif /* PERF_DWARF2_H */ -- cgit v0.10.2 From e3e877e79b7c6a322f9f628e87052c13581238cc Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Wed, 18 Jan 2012 13:29:59 +0000 Subject: perf bench: Allow passing an iteration count to "bench mem mem{cpy,set}" "perf stat ... perf bench mem mem..." is pretty meaningless when using small block sizes (as the overhead of the invocation of each test run basically hides the actual test result in the noise). Repeating the actually interesting function's invocation a number of times allows the results to become meaningful. Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/4F16D767020000780006D738@nat28.tlf.novell.com Signed-off-by: Jan Beulich Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c index db82021..6ad2b1c 100644 --- a/tools/perf/bench/mem-memcpy.c +++ b/tools/perf/bench/mem-memcpy.c @@ -24,6 +24,7 @@ static const char *length_str = "1MB"; static const char *routine = "default"; +static int iterations = 1; static bool use_clock; static int clock_fd; static bool only_prefault; @@ -35,6 +36,8 @@ static const struct option options[] = { "available unit: B, MB, GB (upper and lower)"), OPT_STRING('r', "routine", &routine, "default", "Specify routine to copy"), + OPT_INTEGER('i', "iterations", &iterations, + "repeat memcpy() invocation this number of times"), OPT_BOOLEAN('c', "clock", &use_clock, "Use CPU clock for measuring"), OPT_BOOLEAN('o', "only-prefault", &only_prefault, @@ -121,6 +124,7 @@ static u64 do_memcpy_clock(memcpy_t fn, size_t len, bool prefault) { u64 clock_start = 0ULL, clock_end = 0ULL; void *src = NULL, *dst = NULL; + int i; alloc_mem(&src, &dst, len); @@ -128,7 +132,8 @@ static u64 do_memcpy_clock(memcpy_t fn, size_t len, bool prefault) fn(dst, src, len); clock_start = get_clock(); - fn(dst, src, len); + for (i = 0; i < iterations; ++i) + fn(dst, src, len); clock_end = get_clock(); free(src); @@ -140,6 +145,7 @@ static double do_memcpy_gettimeofday(memcpy_t fn, size_t len, bool prefault) { struct timeval tv_start, tv_end, tv_diff; void *src = NULL, *dst = NULL; + int i; alloc_mem(&src, &dst, len); @@ -147,7 +153,8 @@ static double do_memcpy_gettimeofday(memcpy_t fn, size_t len, bool prefault) fn(dst, src, len); BUG_ON(gettimeofday(&tv_start, NULL)); - fn(dst, src, len); + for (i = 0; i < iterations; ++i) + fn(dst, src, len); BUG_ON(gettimeofday(&tv_end, NULL)); timersub(&tv_end, &tv_start, &tv_diff); diff --git a/tools/perf/bench/mem-memset.c b/tools/perf/bench/mem-memset.c index 9c0c6f0..59d4933 100644 --- a/tools/perf/bench/mem-memset.c +++ b/tools/perf/bench/mem-memset.c @@ -24,6 +24,7 @@ static const char *length_str = "1MB"; static const char *routine = "default"; +static int iterations = 1; static bool use_clock; static int clock_fd; static bool only_prefault; @@ -35,6 +36,8 @@ static const struct option options[] = { "available unit: B, MB, GB (upper and lower)"), OPT_STRING('r', "routine", &routine, "default", "Specify routine to copy"), + OPT_INTEGER('i', "iterations", &iterations, + "repeat memset() invocation this number of times"), OPT_BOOLEAN('c', "clock", &use_clock, "Use CPU clock for measuring"), OPT_BOOLEAN('o', "only-prefault", &only_prefault, @@ -117,6 +120,7 @@ static u64 do_memset_clock(memset_t fn, size_t len, bool prefault) { u64 clock_start = 0ULL, clock_end = 0ULL; void *dst = NULL; + int i; alloc_mem(&dst, len); @@ -124,7 +128,8 @@ static u64 do_memset_clock(memset_t fn, size_t len, bool prefault) fn(dst, -1, len); clock_start = get_clock(); - fn(dst, 0, len); + for (i = 0; i < iterations; ++i) + fn(dst, i, len); clock_end = get_clock(); free(dst); @@ -135,6 +140,7 @@ static double do_memset_gettimeofday(memset_t fn, size_t len, bool prefault) { struct timeval tv_start, tv_end, tv_diff; void *dst = NULL; + int i; alloc_mem(&dst, len); @@ -142,7 +148,8 @@ static double do_memset_gettimeofday(memset_t fn, size_t len, bool prefault) fn(dst, -1, len); BUG_ON(gettimeofday(&tv_start, NULL)); - fn(dst, 0, len); + for (i = 0; i < iterations; ++i) + fn(dst, i, len); BUG_ON(gettimeofday(&tv_end, NULL)); timersub(&tv_end, &tv_start, &tv_diff); -- cgit v0.10.2 From 2ef1ea3826434bdebe17b2941356a8f764ff5fcd Mon Sep 17 00:00:00 2001 From: David Daney Date: Tue, 17 Jan 2012 13:41:01 -0800 Subject: perf tools: Fix broken build by defining _GNU_SOURCE in Makefile When building on my Debian/mips system, util/util.c fails to build because commit 1aed2671738785e8f5aea663a6fda91aa7ef59b5 (perf kvm: Do guest-only counting by default) indirectly includes stdio.h before the feature selection in util.h is done. This prevents _GNU_SOURCE in util.h from enabling the declaration of getline(), from now second inclusion of stdio.h, and the build is broken. There is another breakage in util/evsel.c caused by include ordering, but I didn't fully track down the commit that caused it. The root cause of all this is an inconsistent definition of _GNU_SOURCE, so I move the definition into the Makefile so that it is passed to all invocations of the compiler and used uniformly for all system header files. All other #define and #undef of _GNU_SOURCE are removed as they cause conflicts with the definition passed to the compiler. All the features.h definitions (_LARGEFILE64_SOURCE _FILE_OFFSET_BITS=64 and _GNU_SOURCE) are needed by the python glue code too, so they are moved to BASIC_CFLAGS, and the misleading comments about BASIC_CFLAGS are removed. This gives me a clean build on x86_64 (fc12) and mips (Debian). Cc: David Daney Cc: Ingo Molnar Cc: Joerg Roedel Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1326836461-11952-1-git-send-email-ddaney.cavm@gmail.com Signed-off-by: David Daney Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 599031a..d64f581 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -104,7 +104,7 @@ endif CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) EXTLIBS = -lpthread -lrt -lelf -lm -ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 +ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE ALL_LDFLAGS = $(LDFLAGS) STRIP ?= strip @@ -168,10 +168,7 @@ endif ### --- END CONFIGURATION SECTION --- -# Those must not be GNU-specific; they are shared with perl/ which may -# be built by a different compiler. (Note that this is an artifact now -# but it still might be nice to keep that distinction.) -BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include +BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE BASIC_LDFLAGS = # Guard against environment variables diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 59d43ab..fb85661 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -20,7 +20,6 @@ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ -#define _GNU_SOURCE #include #include #include @@ -31,7 +30,6 @@ #include #include -#undef _GNU_SOURCE #include "perf.h" #include "builtin.h" #include "util/util.h" diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index eb25900..29cb654 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -19,7 +19,6 @@ * */ -#define _GNU_SOURCE #include #include #include @@ -33,7 +32,6 @@ #include #include -#undef _GNU_SOURCE #include "util.h" #include "event.h" #include "string.h" diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 215d50f..0975438 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1,4 +1,3 @@ -#define _GNU_SOURCE #include #include #include diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 6c164dc..1a8d4dc 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -21,14 +21,13 @@ * The parts for function graph printing was taken and modified from the * Linux Kernel that were written by Frederic Weisbecker. */ -#define _GNU_SOURCE + #include #include #include #include #include -#undef _GNU_SOURCE #include "../perf.h" #include "util.h" #include "trace-event.h" diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c index 7b6669d..bfba049 100644 --- a/tools/perf/util/ui/browsers/hists.c +++ b/tools/perf/util/ui/browsers/hists.c @@ -1,6 +1,4 @@ -#define _GNU_SOURCE #include -#undef _GNU_SOURCE #include "../libslang.h" #include #include diff --git a/tools/perf/util/ui/helpline.c b/tools/perf/util/ui/helpline.c index 6ef3c56..4f48f59 100644 --- a/tools/perf/util/ui/helpline.c +++ b/tools/perf/util/ui/helpline.c @@ -1,4 +1,3 @@ -#define _GNU_SOURCE #include #include #include diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index 061dbf8..232d17e 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -40,7 +40,6 @@ #define decimal_length(x) ((int)(sizeof(x) * 2.56 + 0.5) + 1) #define _ALL_SOURCE 1 -#define _GNU_SOURCE 1 #define _BSD_SOURCE 1 #define HAS_BOOL -- cgit v0.10.2 From a844d1ef09872a8b8c66d431edd1b8a943e51c7a Mon Sep 17 00:00:00 2001 From: Srikar Dronamraju Date: Fri, 20 Jan 2012 17:43:54 +0530 Subject: perf probe: Usability fixes Ingo pointed out few perf probe usability related errors during his review of uprobes. Since these issues are independent of uprobes, fixing them in a separate patch. Suggested-by: Ingo Molnar Acked-by: Masami Hiramatsu Cc: Masami Hiramatsu Link: http://lkml.kernel.org/r/20120120121354.GL15447@linux.vnet.ibm.com Signed-off-by: Srikar Dronamraju Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 29cb654..b9bbdd2 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -1729,7 +1729,7 @@ static int __add_probe_trace_events(struct perf_probe_event *pev, } ret = 0; - printf("Add new event%s\n", (ntevs > 1) ? "s:" : ":"); + printf("Added new event%s\n", (ntevs > 1) ? "s:" : ":"); for (i = 0; i < ntevs; i++) { tev = &tevs[i]; if (pev->event) @@ -1784,7 +1784,7 @@ static int __add_probe_trace_events(struct perf_probe_event *pev, if (ret >= 0) { /* Show how to use the event. */ - printf("\nYou can now use it on all perf tools, such as:\n\n"); + printf("\nYou can now use it in all perf tools, such as:\n\n"); printf("\tperf record -e %s:%s -aR sleep 1\n\n", tev->group, tev->event); } @@ -1959,7 +1959,7 @@ static int __del_trace_probe_event(int fd, struct str_node *ent) goto error; } - printf("Remove event: %s\n", ent->s); + printf("Removed event: %s\n", ent->s); return 0; error: pr_warning("Failed to delete event: %s\n", strerror(-ret)); -- cgit v0.10.2 From f8f4b2872295dca88339ec0c403b2217b1197353 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Fri, 20 Jan 2012 10:49:12 +0100 Subject: perf tools: Fix strlen() bug in perf_event__synthesize_event_type() The event_type record has a max length for the event name. It's called MAX_EVENT_NAME. The name may be truncated to fit the max length. But the header.size still reflects the original name length. If that length is > MAX_EVENT_NAME, then the header.size field is bogus. Fix this by using the length of the name after the potential truncation. Cc: David Ahern Cc: Ingo Molnar Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20120120094912.GA4882@quad Signed-off-by: Stephane Eranian Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 3e7e0b0..ecd7f4d 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -2105,7 +2105,7 @@ int perf_event__synthesize_event_type(struct perf_tool *tool, strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1); ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE; - size = strlen(name); + size = strlen(ev.event_type.event_type.name); size = ALIGN(size, sizeof(u64)); ev.event_type.header.size = sizeof(ev.event_type) - (sizeof(ev.event_type.event_type.name) - size); -- cgit v0.10.2 From efb3040d481a1594592b1defb4526c406c7a4751 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 26 Jan 2012 13:32:15 +0100 Subject: jump_label: Add some documentation akpm figured we could do with a blub explaining what static_branch() is and why it lives... Grumpily-requested-by: Andrew Morton Cc: Jason Baron Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/n/tip-h02wu6kabpoojxf03wke704k@git.kernel.org Signed-off-by: Ingo Molnar diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 5ce8b14..f7c6958 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -1,6 +1,38 @@ #ifndef _LINUX_JUMP_LABEL_H #define _LINUX_JUMP_LABEL_H +/* + * Jump label support + * + * Copyright (C) 2009-2012 Jason Baron + * Copyright (C) 2011-2012 Peter Zijlstra + * + * Jump labels provide an interface to generate dynamic branches using + * self-modifying code. Assuming toolchain and architecture support the result + * of a "if (static_branch(&key))" statement is a unconditional branch (which + * defaults to false - and the true block is placed out of line). + * + * However at runtime we can change the 'static' branch target using + * jump_label_{inc,dec}(). These function as a 'reference' count on the key + * object and for as long as there are references all branches referring to + * that particular key will point to the (out of line) true block. + * + * Since this relies on modifying code the jump_label_{inc,dec}() functions + * must be considered absolute slow paths (machine wide synchronization etc.). + * OTOH, since the affected branches are unconditional their runtime overhead + * will be absolutely minimal, esp. in the default (off) case where the total + * effect is a single NOP of appropriate size. The on case will patch in a jump + * to the out-of-line block. + * + * When the control is directly exposed to userspace it is prudent to delay the + * decrement to avoid high frequency code modifications which can (and do) + * cause significant performance degradation. Struct jump_label_key_deferred and + * jump_label_dec_deferred() provide for this. + * + * Lacking toolchain and or architecture support, it falls back to a simple + * conditional branch. + */ + #include #include #include -- cgit v0.10.2 From f9d36996564f91c517b75b02942015f7e09a6574 Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Wed, 25 Jan 2012 15:20:40 +0100 Subject: perf evsel: Fix spelling typo Correct spelling "unsuported" to "unsupported" in tools/peft/util/evsel.c Cc: Peter Zijlstra Cc: standby24x7@gmail.com Cc: trivial@kernel.org Link: http://lkml.kernel.org/r/1327500312-9520-1-git-send-email-standby24x7@gmail.com Signed-off-by: Masanari Iida Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 667f3b7..dcfefab 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -535,7 +535,7 @@ int perf_event__parse_sample(const union perf_event *event, u64 type, } if (type & PERF_SAMPLE_READ) { - fprintf(stderr, "PERF_SAMPLE_READ is unsuported for now\n"); + fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n"); return -1; } -- cgit v0.10.2 From 547a92e0aedb88129e7fbd804697a11949de2e5a Mon Sep 17 00:00:00 2001 From: Akihiro Nagai Date: Mon, 30 Jan 2012 13:42:57 +0900 Subject: perf script: Unify the expressions indicating "unknown" The perf script command uses various expressions to indicate "unknown". It is unfriendly for user scripts to parse it. So, this patch unifies the expressions to "[unknown]". Cc: David Ahern Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Masami Hiramatsu Cc: Paul Mackerras Cc: Peter Zijlstra Cc: yrl.pp-manager.tt@hitachi.com Link: http://lkml.kernel.org/r/20120130044257.2384.62905.stgit@linux3 Signed-off-by: Akihiro Nagai Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index bb68ddf..add13ec 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -300,7 +300,7 @@ static void print_sample_start(struct perf_sample *sample, } else evname = __event_name(attr->type, attr->config); - printf("%s: ", evname ? evname : "(unknown)"); + printf("%s: ", evname ? evname : "[unknown]"); } } @@ -323,7 +323,6 @@ static void print_sample_addr(union perf_event *event, { struct addr_location al; u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - const char *symname, *dsoname; printf("%16" PRIx64, sample->addr); @@ -343,21 +342,14 @@ static void print_sample_addr(union perf_event *event, al.sym = map__find_symbol(al.map, al.addr, NULL); if (PRINT_FIELD(SYM)) { - if (al.sym && al.sym->name) - symname = al.sym->name; - else - symname = ""; - - printf(" %16s", symname); + printf(" "); + symbol__fprintf_symname(al.sym, stdout); } if (PRINT_FIELD(DSO)) { - if (al.map && al.map->dso && al.map->dso->name) - dsoname = al.map->dso->name; - else - dsoname = ""; - - printf(" (%s)", dsoname); + printf(" ("); + map__fprintf_dsoname(al.map, stdout); + printf(")"); } } diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 316aa0a..1107960 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -212,6 +212,18 @@ size_t map__fprintf(struct map *self, FILE *fp) self->start, self->end, self->pgoff, self->dso->name); } +size_t map__fprintf_dsoname(struct map *map, FILE *fp) +{ + const char *dsoname; + + if (map && map->dso && map->dso->name) + dsoname = map->dso->name; + else + dsoname = "[unknown]"; + + return fprintf(fp, "%s", dsoname); +} + /* * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN. * map->dso->adjust_symbols==1 for ET_EXEC-like cases. diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index 2b8017f..b100c20 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -118,6 +118,7 @@ void map__delete(struct map *self); struct map *map__clone(struct map *self); int map__overlap(struct map *l, struct map *r); size_t map__fprintf(struct map *self, FILE *fp); +size_t map__fprintf_dsoname(struct map *map, FILE *fp); int map__load(struct map *self, symbol_filter_t filter); struct symbol *map__find_symbol(struct map *self, diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index b5ca255..e5334a9 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1296,7 +1296,6 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, int print_sym, int print_dso) { struct addr_location al; - const char *symname, *dsoname; struct callchain_cursor *cursor = &evsel->hists.callchain_cursor; struct callchain_cursor_node *node; @@ -1324,20 +1323,13 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, printf("\t%16" PRIx64, node->ip); if (print_sym) { - if (node->sym && node->sym->name) - symname = node->sym->name; - else - symname = ""; - - printf(" %s", symname); + printf(" "); + symbol__fprintf_symname(node->sym, stdout); } if (print_dso) { - if (node->map && node->map->dso && node->map->dso->name) - dsoname = node->map->dso->name; - else - dsoname = ""; - - printf(" (%s)", dsoname); + printf(" ("); + map__fprintf_dsoname(al.map, stdout); + printf(")"); } printf("\n"); @@ -1347,21 +1339,14 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, } else { printf("%16" PRIx64, sample->ip); if (print_sym) { - if (al.sym && al.sym->name) - symname = al.sym->name; - else - symname = ""; - - printf(" %s", symname); + printf(" "); + symbol__fprintf_symname(al.sym, stdout); } if (print_dso) { - if (al.map && al.map->dso && al.map->dso->name) - dsoname = al.map->dso->name; - else - dsoname = ""; - - printf(" (%s)", dsoname); + printf(" ("); + map__fprintf_dsoname(al.map, stdout); + printf(")"); } } } diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 0975438..b580fa8 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -263,6 +263,18 @@ static size_t symbol__fprintf(struct symbol *sym, FILE *fp) sym->name); } +size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp) +{ + const char *symname; + + if (sym && sym->name) + symname = sym->name; + else + symname = "[unknown]"; + + return fprintf(fp, "%s", symname); +} + void dso__set_long_name(struct dso *dso, char *name) { if (name == NULL) diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 123c2e1..d349c7a 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -241,6 +241,7 @@ void machines__destroy_guest_kernel_maps(struct rb_root *machines); int symbol__init(void); void symbol__exit(void); +size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp); bool symbol_type__is_a(char symbol_type, enum map_type map_type); size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp); -- cgit v0.10.2 From 9558259697b827106b464648e850e568e0b0c931 Mon Sep 17 00:00:00 2001 From: Akihiro Nagai Date: Mon, 30 Jan 2012 13:43:09 +0900 Subject: perf script: Print branch_from and branch_to of BTS events BTS records branch_from_addr and branch_to_addr in IP and ADDR field in perf_sample. This patch enables to print them in following format, () => () Sample: ffffffff814675d2 irq_return ([kernel.kallsyms]) => 3f03e016b0 _start (/lib64/ld-2.14.so) ffffffff814675d2 irq_return ([kernel.kallsyms]) => 3f03e016b0 _start (/lib64/ld-2.14.so) 3f03e016b3 _start (/lib64/ld-2.14.so) => 3f03e04b80 _dl_start (/lib64/ld-2.14.so) ffffffff814675d2 irq_return ([kernel.kallsyms]) => 3f03e04b80 _dl_start (/lib64/ld-2.14.so) ffffffff814675d2 irq_return ([kernel.kallsyms]) => 3f03e04ba6 _dl_start (/lib64/ld-2.14.so) ffffffff814675d2 irq_return ([kernel.kallsyms]) => 3f03e04bad _dl_start (/lib64/ld-2.14.so) 3f03e04bfb _dl_start (/lib64/ld-2.14.so) => 3f03e04c1d _dl_start (/lib64/ld-2.14.so) [snip] Cc: David Ahern Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Masami Hiramatsu Cc: Paul Mackerras Cc: Peter Zijlstra Cc: yrl.pp-manager.tt@hitachi.com Link: http://lkml.kernel.org/r/20120130044309.2384.44252.stgit@linux3 Signed-off-by: Akihiro Nagai Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index add13ec..414d49a 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -304,6 +304,13 @@ static void print_sample_start(struct perf_sample *sample, } } +static bool is_bts_event(struct perf_event_attr *attr) +{ + return ((attr->type == PERF_TYPE_HARDWARE) && + (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && + (attr->sample_period == 1)); +} + static bool sample_addr_correlates_sym(struct perf_event_attr *attr) { if ((attr->type == PERF_TYPE_SOFTWARE) && @@ -312,6 +319,9 @@ static bool sample_addr_correlates_sym(struct perf_event_attr *attr) (attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))) return true; + if (is_bts_event(attr)) + return true; + return false; } @@ -353,6 +363,33 @@ static void print_sample_addr(union perf_event *event, } } +static void print_sample_bts(union perf_event *event, + struct perf_sample *sample, + struct perf_evsel *evsel, + struct machine *machine, + struct thread *thread) +{ + struct perf_event_attr *attr = &evsel->attr; + + /* print branch_from information */ + if (PRINT_FIELD(IP)) { + if (!symbol_conf.use_callchain) + printf(" "); + else + printf("\n"); + perf_event__print_ip(event, sample, machine, evsel, + PRINT_FIELD(SYM), PRINT_FIELD(DSO)); + } + + printf(" => "); + + /* print branch_to information */ + if (PRINT_FIELD(ADDR)) + print_sample_addr(event, sample, machine, thread, attr); + + printf("\n"); +} + static void process_event(union perf_event *event __unused, struct perf_sample *sample, struct perf_evsel *evsel, @@ -366,6 +403,11 @@ static void process_event(union perf_event *event __unused, print_sample_start(sample, thread, attr); + if (is_bts_event(attr)) { + print_sample_bts(event, sample, evsel, machine, thread); + return; + } + if (PRINT_FIELD(TRACE)) print_trace_event(sample->cpu, sample->raw_data, sample->raw_size); -- cgit v0.10.2 From a978f2ab4166a84c77d0f846f59690f2a892d058 Mon Sep 17 00:00:00 2001 From: Akihiro Nagai Date: Mon, 30 Jan 2012 13:43:15 +0900 Subject: perf script: Add the offset field specifier Add the offset field specifier 'symoff' to show the offset from the symbols in the output of perf-script. We can get the more detailed address information. Output sample: ffffffff81467612 irq_return+0x0 => 301ec016b0 _start+0x0 ffffffff81467612 irq_return+0x0 => 301ec016b0 _start+0x0 301ec016b3 _start+0x3 => 301ec04b70 _dl_start+0x0 ffffffff81467612 irq_return+0x0 => 301ec04b70 _dl_start+0x0 ffffffff81467612 irq_return+0x0 => 301ec04b96 _dl_start+0x26 ffffffff81467612 irq_return+0x0 => 301ec04b9d _dl_start+0x2d 301ec04beb _dl_start+0x7b => 301ec04c0d _dl_start+0x9d 301ec04c11 _dl_start+0xa1 => 301ec04bf0 _dl_start+0x80 [snip] Cc: David Ahern Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Masami Hiramatsu Cc: Paul Mackerras Cc: Peter Zijlstra Cc: yrl.pp-manager.tt@hitachi.com Link: http://lkml.kernel.org/r/20120130044314.2384.67094.stgit@linux3 Signed-off-by: Akihiro Nagai Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt index 2f6cef4..228c7bb 100644 --- a/tools/perf/Documentation/perf-script.txt +++ b/tools/perf/Documentation/perf-script.txt @@ -115,7 +115,7 @@ OPTIONS -f:: --fields:: Comma separated list of fields to print. Options are: - comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr. + comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff. Field list can be prepended with the type, trace, sw or hw, to indicate to which event type the field list applies. e.g., -f sw:comm,tid,time,ip,sym and -f trace:time,cpu,trace diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 414d49a..752d401 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -40,6 +40,7 @@ enum perf_output_field { PERF_OUTPUT_SYM = 1U << 8, PERF_OUTPUT_DSO = 1U << 9, PERF_OUTPUT_ADDR = 1U << 10, + PERF_OUTPUT_SYMOFFSET = 1U << 11, }; struct output_option { @@ -57,6 +58,7 @@ struct output_option { {.str = "sym", .field = PERF_OUTPUT_SYM}, {.str = "dso", .field = PERF_OUTPUT_DSO}, {.str = "addr", .field = PERF_OUTPUT_ADDR}, + {.str = "symoff", .field = PERF_OUTPUT_SYMOFFSET}, }; /* default set to maintain compatibility with current format */ @@ -193,6 +195,11 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel, "to symbols.\n"); return -EINVAL; } + if (PRINT_FIELD(SYMOFFSET) && !PRINT_FIELD(SYM)) { + pr_err("Display of offsets requested but symbol is not" + "selected.\n"); + return -EINVAL; + } if (PRINT_FIELD(DSO) && !PRINT_FIELD(IP) && !PRINT_FIELD(ADDR)) { pr_err("Display of DSO requested but neither sample IP nor " "sample address\nis selected. Hence, no addresses to convert " @@ -353,7 +360,10 @@ static void print_sample_addr(union perf_event *event, if (PRINT_FIELD(SYM)) { printf(" "); - symbol__fprintf_symname(al.sym, stdout); + if (PRINT_FIELD(SYMOFFSET)) + symbol__fprintf_symname_offs(al.sym, &al, stdout); + else + symbol__fprintf_symname(al.sym, stdout); } if (PRINT_FIELD(DSO)) { @@ -378,7 +388,8 @@ static void print_sample_bts(union perf_event *event, else printf("\n"); perf_event__print_ip(event, sample, machine, evsel, - PRINT_FIELD(SYM), PRINT_FIELD(DSO)); + PRINT_FIELD(SYM), PRINT_FIELD(DSO), + PRINT_FIELD(SYMOFFSET)); } printf(" => "); @@ -421,7 +432,8 @@ static void process_event(union perf_event *event __unused, else printf("\n"); perf_event__print_ip(event, sample, machine, evsel, - PRINT_FIELD(SYM), PRINT_FIELD(DSO)); + PRINT_FIELD(SYM), PRINT_FIELD(DSO), + PRINT_FIELD(SYMOFFSET)); } printf("\n"); @@ -1131,7 +1143,10 @@ static const struct option options[] = { OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", "Look for files with symbols relative to this directory"), OPT_CALLBACK('f', "fields", NULL, "str", - "comma separated output fields prepend with 'type:'. Valid types: hw,sw,trace,raw. Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,addr", + "comma separated output fields prepend with 'type:'. " + "Valid types: hw,sw,trace,raw. " + "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso," + "addr,symoff", parse_output_fields), OPT_BOOLEAN('a', "all-cpus", &system_wide, "system-wide collection from all CPUs"), diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index e5334a9..552c1c5 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1293,7 +1293,7 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, struct machine *machine, struct perf_evsel *evsel, - int print_sym, int print_dso) + int print_sym, int print_dso, int print_symoffset) { struct addr_location al; struct callchain_cursor *cursor = &evsel->hists.callchain_cursor; @@ -1340,7 +1340,11 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, printf("%16" PRIx64, sample->ip); if (print_sym) { printf(" "); - symbol__fprintf_symname(al.sym, stdout); + if (print_symoffset) + symbol__fprintf_symname_offs(al.sym, &al, + stdout); + else + symbol__fprintf_symname(al.sym, stdout); } if (print_dso) { diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 37bc383..c8d9017 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -147,7 +147,7 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, struct machine *machine, struct perf_evsel *evsel, - int print_sym, int print_dso); + int print_sym, int print_dso, int print_symoffset); int perf_session__cpu_bitmap(struct perf_session *session, const char *cpu_list, unsigned long *cpu_bitmap); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index b580fa8..fc6e12f 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -263,16 +263,26 @@ static size_t symbol__fprintf(struct symbol *sym, FILE *fp) sym->name); } -size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp) +size_t symbol__fprintf_symname_offs(const struct symbol *sym, + const struct addr_location *al, FILE *fp) { - const char *symname; + unsigned long offset; + size_t length; - if (sym && sym->name) - symname = sym->name; - else - symname = "[unknown]"; + if (sym && sym->name) { + length = fprintf(fp, "%s", sym->name); + if (al) { + offset = al->addr - sym->start; + length += fprintf(fp, "+0x%lx", offset); + } + return length; + } else + return fprintf(fp, "[unknown]"); +} - return fprintf(fp, "%s", symname); +size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp) +{ + return symbol__fprintf_symname_offs(sym, NULL, fp); } void dso__set_long_name(struct dso *dso, char *name) diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index d349c7a..ba909c9 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -241,6 +241,8 @@ void machines__destroy_guest_kernel_maps(struct rb_root *machines); int symbol__init(void); void symbol__exit(void); +size_t symbol__fprintf_symname_offs(const struct symbol *sym, + const struct addr_location *al, FILE *fp); size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp); bool symbol_type__is_a(char symbol_type, enum map_type map_type); -- cgit v0.10.2 From 0bc8d20580af74c9a8a39c200e269261e5cded05 Mon Sep 17 00:00:00 2001 From: Akihiro Nagai Date: Mon, 30 Jan 2012 13:43:20 +0900 Subject: perf script: Add option resolving vmlinux path Add the option get the path of [kernel.kallsyms]. Specify '--show-kernel-path' option to use this function. This patch enables other applications to use this output easily. Without --show-kernel-path option ffffffff81467612 irq_return ([kernel.kallsyms]) ffffffff81467612 irq_return ([kernel.kallsyms]) 7f24fc02a6b3 _start (/lib64/ld-2.14.so) [snip] With --show-kernel-path option ffffffff81467612 irq_return (/lib/modules/3.2.0+/build/vmlinux) ffffffff81467612 irq_return (/lib/modules/3.2.0+/build/vmlinux) 7f24fc02a6b3 _start (/lib64/ld-2.14.so) [snip] Cc: David Ahern Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Masami Hiramatsu Cc: Paul Mackerras Cc: Peter Zijlstra Cc: yrl.pp-manager.tt@hitachi.com Link: http://lkml.kernel.org/r/20120130044320.2384.73322.stgit@linux3 Signed-off-by: Akihiro Nagai Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt index 228c7bb..e9cbfcd 100644 --- a/tools/perf/Documentation/perf-script.txt +++ b/tools/perf/Documentation/perf-script.txt @@ -200,6 +200,9 @@ OPTIONS It currently includes: cpu and numa topology of the host system. It can only be used with the perf script report mode. +--show-kernel-path:: + Try to resolve the path of [kernel.kallsyms] + SEE ALSO -------- linkperf:perf-record[1], linkperf:perf-script-perl[1], diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 752d401..d4ce733 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -1155,6 +1155,9 @@ static const struct option options[] = { "only display events for these comms"), OPT_BOOLEAN('I', "show-info", &show_full_info, "display extended information from perf.data file"), + OPT_BOOLEAN('\0', "show-kernel-path", &symbol_conf.show_kernel_path, + "Show the path of [kernel.kallsyms]"), + OPT_END() }; diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 1107960..dea6d1c 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -216,9 +216,12 @@ size_t map__fprintf_dsoname(struct map *map, FILE *fp) { const char *dsoname; - if (map && map->dso && map->dso->name) - dsoname = map->dso->name; - else + if (map && map->dso && (map->dso->name || map->dso->long_name)) { + if (symbol_conf.show_kernel_path && map->dso->long_name) + dsoname = map->dso->long_name; + else if (map->dso->name) + dsoname = map->dso->name; + } else dsoname = "[unknown]"; return fprintf(fp, "%s", dsoname); diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index ba909c9..2a683d4 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -70,6 +70,7 @@ struct symbol_conf { unsigned short priv_size; unsigned short nr_events; bool try_vmlinux_path, + show_kernel_path, use_modules, sort_by_name, show_nr_samples, -- cgit v0.10.2 From 008953dc6a3164b7ec2be2684a8fad1e636d5648 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Mon, 30 Jan 2012 11:50:37 +0100 Subject: perf top: Fix number of samples displayed In recent versions of perf top, pressing the 'e' key to change the number of displayed samples had no effect. The number of samples was still dictated by the size of the terminal (stdio mode). That was quite annoying because typically only the first dozen samples really matter. This patch fixes this. Cc: David Ahern Cc: Ingo Molnar Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20120130105037.GA5160@quad Signed-off-by: Stephane Eranian Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index e8b033c..d869b21 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -88,8 +88,6 @@ void get_term_dimensions(struct winsize *ws) static void perf_top__update_print_entries(struct perf_top *top) { - top->print_entries = top->winsize.ws_row; - if (top->print_entries > 9) top->print_entries -= 9; } @@ -99,6 +97,13 @@ static void perf_top__sig_winch(int sig __used, siginfo_t *info __used, void *ar struct perf_top *top = arg; get_term_dimensions(&top->winsize); + if (!top->print_entries + || (top->print_entries+4) > top->winsize.ws_row) { + top->print_entries = top->winsize.ws_row; + } else { + top->print_entries += 4; + top->winsize.ws_row = top->print_entries; + } perf_top__update_print_entries(top); } @@ -452,8 +457,10 @@ static void perf_top__handle_keypress(struct perf_top *top, int c) }; perf_top__sig_winch(SIGWINCH, NULL, top); sigaction(SIGWINCH, &act, NULL); - } else + } else { + perf_top__sig_winch(SIGWINCH, NULL, top); signal(SIGWINCH, SIG_DFL); + } break; case 'E': if (top->evlist->nr_entries > 1) { -- cgit v0.10.2 From d1eec3ecaef083affaf3210246b01b6e80d3a44e Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Sun, 29 Jan 2012 17:55:56 +0900 Subject: perf lock: Document lock info subcommand The commit 26242d859c9be ("perf lock: Add "info" subcommand for dumping misc information") added the subcommand but missed documentation. Add it. Also update stale 'trace' subcommand to 'script'. Cc: Hitoshi Mitake Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1327827356-8786-5-git-send-email-namhyung@gmail.com Signed-off-by: Namhyung Kim Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt index d6b2a4f..c7f5f55 100644 --- a/tools/perf/Documentation/perf-lock.txt +++ b/tools/perf/Documentation/perf-lock.txt @@ -8,7 +8,7 @@ perf-lock - Analyze lock events SYNOPSIS -------- [verse] -'perf lock' {record|report|trace} +'perf lock' {record|report|script|info} DESCRIPTION ----------- @@ -20,10 +20,13 @@ and statistics with this 'perf lock' command. produces the file "perf.data" which contains tracing results of lock events. - 'perf lock trace' shows raw lock events. - 'perf lock report' reports statistical data. + 'perf lock script' shows raw lock events. + + 'perf lock info' shows metadata like threads or addresses + of lock instances. + COMMON OPTIONS -------------- @@ -47,6 +50,17 @@ REPORT OPTIONS Sorting key. Possible values: acquired (default), contended, wait_total, wait_max, wait_min. +INFO OPTIONS +------------ + +-t:: +--threads:: + dump thread list in perf.data + +-m:: +--map:: + dump map of lock instances (address:name table) + SEE ALSO -------- linkperf:perf[1] diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c index 2296c39..12c8148 100644 --- a/tools/perf/builtin-lock.c +++ b/tools/perf/builtin-lock.c @@ -922,12 +922,12 @@ static const struct option info_options[] = { OPT_BOOLEAN('t', "threads", &info_threads, "dump thread list in perf.data"), OPT_BOOLEAN('m', "map", &info_map, - "map of lock instances (name:address table)"), + "map of lock instances (address:name table)"), OPT_END() }; static const char * const lock_usage[] = { - "perf lock [] {record|trace|report}", + "perf lock [] {record|report|script|info}", NULL }; -- cgit v0.10.2 From d30d4a080d195892091ad7d014fc9293cc08ea0f Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Sun, 29 Jan 2012 17:55:55 +0900 Subject: perf tools: Remove unnecessary ctype.h inclusion There are unnecessary #include out there, and they might cause a nasty build failure in some environment. As we already have most of ctype macros in util.h, just get rid of them. A few of exceptions are util/symbol.c which needs isupper() macro util.h doesn't provide and perl scripting support code which includes ctype.h internally. Suggested-by: Ingo Molnar Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1327827356-8786-4-git-send-email-namhyung@gmail.com Signed-off-by: Namhyung Kim Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c index 6ad2b1c..7155722 100644 --- a/tools/perf/bench/mem-memcpy.c +++ b/tools/perf/bench/mem-memcpy.c @@ -5,7 +5,6 @@ * * Written by Hitoshi Mitake */ -#include #include "../perf.h" #include "../util/util.h" diff --git a/tools/perf/bench/mem-memset.c b/tools/perf/bench/mem-memset.c index 59d4933..e907918 100644 --- a/tools/perf/bench/mem-memset.c +++ b/tools/perf/bench/mem-memset.c @@ -5,7 +5,6 @@ * * Trivial clone of mem-memcpy.c. */ -#include #include "../perf.h" #include "../util/util.h" diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 5d73262..67dc4ae 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 0b2a487..c2623c6 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include "../../perf.h" diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 1a8d4dc..e0a4f65 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include "../perf.h" diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c index f55cc3a..b9592e0 100644 --- a/tools/perf/util/trace-event-read.c +++ b/tools/perf/util/trace-event-read.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include "../perf.h" diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c index a3fdf55..18ae6c1 100644 --- a/tools/perf/util/trace-event-scripting.c +++ b/tools/perf/util/trace-event-scripting.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include "../perf.h" diff --git a/tools/perf/util/ui/browsers/map.c b/tools/perf/util/ui/browsers/map.c index 6905bcc..eca6575 100644 --- a/tools/perf/util/ui/browsers/map.c +++ b/tools/perf/util/ui/browsers/map.c @@ -3,9 +3,9 @@ #include #include #include -#include #include #include +#include "../../util.h" #include "../../debug.h" #include "../../symbol.h" #include "../browser.h" -- cgit v0.10.2 From cfeb1d90a1b1db96383b48888cb7a5f10ca12e12 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 30 Jan 2012 14:43:21 -0200 Subject: perf python: Use attr.watermark in twatch.py We want to be woken up for every PERF_RECORD_ event, attr.wakeup_events is only for PERF_RECORD_SAMPLE, so also use attr.watermark = 1 to fix that. Suggested-by: Peter Zijlstra Cc: David Ahern Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-v3lnpwgrr8mllcr3ntduuqvc@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/python/twatch.py b/tools/perf/python/twatch.py index df638c4..b11cca5 100755 --- a/tools/perf/python/twatch.py +++ b/tools/perf/python/twatch.py @@ -19,7 +19,7 @@ def main(): cpus = perf.cpu_map() threads = perf.thread_map() evsel = perf.evsel(task = 1, comm = 1, mmap = 0, - wakeup_events = 1, sample_period = 1, + wakeup_events = 1, watermark = 1, sample_id_all = 1, sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID) evsel.open(cpus = cpus, threads = threads); -- cgit v0.10.2 From 0529bc1fe1e3e509a2fc06fe887b0c76b1eb01f9 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 27 Jan 2012 15:34:20 +0100 Subject: perf evlist: Make splice_list_tail method public Making perf_evlist__splice_list_tail globaly accessible. It is used in the upcomming paches. Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Corey Ashford Link: http://lkml.kernel.org/r/1327674868-10486-2-git-send-email-jolsa@redhat.com Signed-off-by: Jiri Olsa Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index a6d50e3..a57a8cf 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -97,9 +97,9 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) ++evlist->nr_entries; } -static void perf_evlist__splice_list_tail(struct perf_evlist *evlist, - struct list_head *list, - int nr_entries) +void perf_evlist__splice_list_tail(struct perf_evlist *evlist, + struct list_head *list, + int nr_entries) { list_splice_tail(list, &evlist->entries); evlist->nr_entries += nr_entries; diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 9c51660..1b4282b 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -117,4 +117,9 @@ u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist); bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist); bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist); + +void perf_evlist__splice_list_tail(struct perf_evlist *evlist, + struct list_head *list, + int nr_entries); + #endif /* __PERF_EVLIST_H */ -- cgit v0.10.2 From 946d863cc0d2262080c8101c8e6299974e449fe3 Mon Sep 17 00:00:00 2001 From: John Kacur Date: Wed, 1 Feb 2012 23:16:31 +0100 Subject: perf tools: Remove distclean from Makefile help output distclean as an alias for clean was removed from the perf Makefile by commit a3d1ee10d1bf4520af3d44c1aa6cd46956ec4fd7 However, that commit neglected to remove it from the help output of the perf Makefile, which could result in a user trying the following. $ cd tools/perf/ $ make help | grep distclean distclean - alias to clean $ make distclean make: *** No rule to make target `distclean'. Stop. This patch removes it from the Makefile help output. Link: http://lkml.kernel.org/r/1328134591-19851-1-git-send-email-jkacur@redhat.com Signed-off-by: John Kacur Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/Makefile b/tools/perf/Makefile index d64f581..64df5de 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -794,7 +794,6 @@ help: @echo ' quick-install-html - install the html documentation quickly' @echo '' @echo 'Perf maintainer targets:' - @echo ' distclean - alias to clean' @echo ' clean - clean all binary objects and build output' doc: -- cgit v0.10.2 From 4eced2347c447c9409877368fc52478c356b4767 Mon Sep 17 00:00:00 2001 From: Srikar Dronamraju Date: Thu, 2 Feb 2012 19:50:40 +0530 Subject: perf probe: Rename target_module to target This is a precursor patch that modifies names that refer to kernel/module to also refer to user space names. Cc: Ananth N Mavinakayanahalli Cc: Andi Kleen Cc: Andrew Morton Cc: Anton Arapov Cc: Christoph Hellwig Cc: Ingo Molnar Cc: Jim Keniston Cc: Linus Torvalds Cc: Linux-mm Cc: Masami Hiramatsu Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Roland McGrath Cc: Stephen Rothwell Cc: Steven Rostedt Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20120202142040.5967.64156.sendpatchset@srdronam.in.ibm.com Signed-off-by: Srikar Dronamraju Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index fb85661..4935c09 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -58,7 +58,7 @@ static struct { struct perf_probe_event events[MAX_PROBES]; struct strlist *dellist; struct line_range line_range; - const char *target_module; + const char *target; int max_probe_points; struct strfilter *filter; } params; @@ -246,7 +246,7 @@ static const struct option options[] = { "file", "vmlinux pathname"), OPT_STRING('s', "source", &symbol_conf.source_prefix, "directory", "path to kernel source"), - OPT_STRING('m', "module", ¶ms.target_module, + OPT_STRING('m', "module", ¶ms.target, "modname|path", "target module name (for online) or path (for offline)"), #endif @@ -333,7 +333,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) if (!params.filter) params.filter = strfilter__new(DEFAULT_FUNC_FILTER, NULL); - ret = show_available_funcs(params.target_module, + ret = show_available_funcs(params.target, params.filter); strfilter__delete(params.filter); if (ret < 0) @@ -354,7 +354,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) usage_with_options(probe_usage, options); } - ret = show_line_range(¶ms.line_range, params.target_module); + ret = show_line_range(¶ms.line_range, params.target); if (ret < 0) pr_err(" Error: Failed to show lines. (%d)\n", ret); return ret; @@ -371,7 +371,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) ret = show_available_vars(params.events, params.nevents, params.max_probe_points, - params.target_module, + params.target, params.filter, params.show_ext_vars); strfilter__delete(params.filter); @@ -393,7 +393,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) if (params.nevents) { ret = add_perf_probe_events(params.events, params.nevents, params.max_probe_points, - params.target_module, + params.target, params.force_add); if (ret < 0) { pr_err(" Error: Failed to add events. (%d)\n", ret); diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index b9bbdd2..c1a513e 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -273,10 +273,10 @@ static int add_module_to_probe_trace_events(struct probe_trace_event *tevs, /* Try to find perf_probe_event with debuginfo */ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, struct probe_trace_event **tevs, - int max_tevs, const char *module) + int max_tevs, const char *target) { bool need_dwarf = perf_probe_event_need_dwarf(pev); - struct debuginfo *dinfo = open_debuginfo(module); + struct debuginfo *dinfo = open_debuginfo(target); int ntevs, ret = 0; if (!dinfo) { @@ -295,9 +295,9 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, if (ntevs > 0) { /* Succeeded to find trace events */ pr_debug("find %d probe_trace_events.\n", ntevs); - if (module) + if (target) ret = add_module_to_probe_trace_events(*tevs, ntevs, - module); + target); return ret < 0 ? ret : ntevs; } @@ -1796,14 +1796,14 @@ static int __add_probe_trace_events(struct perf_probe_event *pev, static int convert_to_probe_trace_events(struct perf_probe_event *pev, struct probe_trace_event **tevs, - int max_tevs, const char *module) + int max_tevs, const char *target) { struct symbol *sym; int ret = 0, i; struct probe_trace_event *tev; /* Convert perf_probe_event with debuginfo */ - ret = try_to_find_probe_trace_events(pev, tevs, max_tevs, module); + ret = try_to_find_probe_trace_events(pev, tevs, max_tevs, target); if (ret != 0) return ret; /* Found in debuginfo or got an error */ @@ -1819,8 +1819,8 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev, goto error; } - if (module) { - tev->point.module = strdup(module); + if (target) { + tev->point.module = strdup(target); if (tev->point.module == NULL) { ret = -ENOMEM; goto error; @@ -1884,7 +1884,7 @@ struct __event_package { }; int add_perf_probe_events(struct perf_probe_event *pevs, int npevs, - int max_tevs, const char *module, bool force_add) + int max_tevs, const char *target, bool force_add) { int i, j, ret; struct __event_package *pkgs; @@ -1907,7 +1907,7 @@ int add_perf_probe_events(struct perf_probe_event *pevs, int npevs, ret = convert_to_probe_trace_events(pkgs[i].pev, &pkgs[i].tevs, max_tevs, - module); + target); if (ret < 0) goto end; pkgs[i].ntevs = ret; @@ -2063,7 +2063,7 @@ static int filter_available_functions(struct map *map __unused, return 1; } -int show_available_funcs(const char *module, struct strfilter *_filter) +int show_available_funcs(const char *target, struct strfilter *_filter) { struct map *map; int ret; @@ -2074,9 +2074,9 @@ int show_available_funcs(const char *module, struct strfilter *_filter) if (ret < 0) return ret; - map = kernel_get_module_map(module); + map = kernel_get_module_map(target); if (!map) { - pr_err("Failed to find %s map.\n", (module) ? : "kernel"); + pr_err("Failed to find %s map.\n", (target) ? : "kernel"); return -EINVAL; } available_func_filter = _filter; -- cgit v0.10.2 From 781ba9d2ed9df07dbb413fb5ee80ef7d353841c9 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Thu, 15 Dec 2011 17:32:40 +0100 Subject: perf record: Make feature initialization generic Loop over all features to enable it instead of explicitly enabling every single feature. Reducing duplicate code and making it more robust to later changes e.g. when adding more features. Cc: Ingo Molnar Link: http://lkml.kernel.org/r/1323966762-8574-3-git-send-email-robert.richter@amd.com Signed-off-by: Robert Richter Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 32870ee..f8d9a54 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -386,7 +386,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) { struct stat st; int flags; - int err, output; + int err, output, feat; unsigned long waking = 0; const bool forks = argc > 0; struct machine *machine; @@ -453,8 +453,14 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) rec->session = session; - if (!rec->no_buildid) - perf_header__set_feat(&session->header, HEADER_BUILD_ID); + for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++) + perf_header__set_feat(&session->header, feat); + + if (rec->no_buildid) + perf_header__clear_feat(&session->header, HEADER_BUILD_ID); + + if (!have_tracepoints(&evsel_list->entries)) + perf_header__clear_feat(&session->header, HEADER_TRACE_INFO); if (!rec->file_new) { err = perf_session__read_header(session, output); @@ -462,22 +468,6 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) goto out_delete_session; } - if (have_tracepoints(&evsel_list->entries)) - perf_header__set_feat(&session->header, HEADER_TRACE_INFO); - - perf_header__set_feat(&session->header, HEADER_HOSTNAME); - perf_header__set_feat(&session->header, HEADER_OSRELEASE); - perf_header__set_feat(&session->header, HEADER_ARCH); - perf_header__set_feat(&session->header, HEADER_CPUDESC); - perf_header__set_feat(&session->header, HEADER_NRCPUS); - perf_header__set_feat(&session->header, HEADER_EVENT_DESC); - perf_header__set_feat(&session->header, HEADER_CMDLINE); - perf_header__set_feat(&session->header, HEADER_VERSION); - perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY); - perf_header__set_feat(&session->header, HEADER_TOTAL_MEM); - perf_header__set_feat(&session->header, HEADER_NUMA_TOPOLOGY); - perf_header__set_feat(&session->header, HEADER_CPUID); - if (forks) { err = perf_evlist__prepare_workload(evsel_list, opts, argv); if (err < 0) { diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index ac4ec95..e68f617 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -11,6 +11,7 @@ enum { HEADER_RESERVED = 0, /* always cleared */ + HEADER_FIRST_FEATURE = 1, HEADER_TRACE_INFO = 1, HEADER_BUILD_ID, -- cgit v0.10.2 From ac483c446b67870444c9eeaf8325d3d2af9b91bc Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 2 Jan 2012 10:04:14 +0100 Subject: ftrace: Change filter/notrace set functions to return exit code Currently the ftrace_set_filter and ftrace_set_notrace functions do not return any return code. So there's no way for ftrace_ops user to tell wether the filter was correctly applied. The set_ftrace_filter interface returns error in case the filter did not match: # echo krava > set_ftrace_filter bash: echo: write error: Invalid argument Changing both ftrace_set_filter and ftrace_set_notrace functions to return zero if the filter was applied correctly or -E* values in case of error. Link: http://lkml.kernel.org/r/1325495060-6402-2-git-send-email-jolsa@redhat.com Acked-by: Frederic Weisbecker Signed-off-by: Jiri Olsa Signed-off-by: Steven Rostedt diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 028e26f..f33fb3b 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -178,9 +178,9 @@ struct dyn_ftrace { }; int ftrace_force_update(void); -void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, +int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, int len, int reset); -void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, +int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, int len, int reset); void ftrace_set_global_filter(unsigned char *buf, int len, int reset); void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 683d559..e2e0597 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -3146,8 +3146,10 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, mutex_lock(&ftrace_regex_lock); if (reset) ftrace_filter_reset(hash); - if (buf) - ftrace_match_records(hash, buf, len); + if (buf && !ftrace_match_records(hash, buf, len)) { + ret = -EINVAL; + goto out_regex_unlock; + } mutex_lock(&ftrace_lock); ret = ftrace_hash_move(ops, enable, orig_hash, hash); @@ -3157,6 +3159,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, mutex_unlock(&ftrace_lock); + out_regex_unlock: mutex_unlock(&ftrace_regex_lock); free_ftrace_hash(hash); @@ -3173,10 +3176,10 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, * Filters denote which functions should be enabled when tracing is enabled. * If @buf is NULL and reset is set, all functions will be enabled for tracing. */ -void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, +int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, int len, int reset) { - ftrace_set_regex(ops, buf, len, reset, 1); + return ftrace_set_regex(ops, buf, len, reset, 1); } EXPORT_SYMBOL_GPL(ftrace_set_filter); @@ -3191,10 +3194,10 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter); * is enabled. If @buf is NULL and reset is set, all functions will be enabled * for tracing. */ -void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, +int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, int len, int reset) { - ftrace_set_regex(ops, buf, len, reset, 0); + return ftrace_set_regex(ops, buf, len, reset, 0); } EXPORT_SYMBOL_GPL(ftrace_set_notrace); /** -- cgit v0.10.2 From f069686e4bdc60a637d210ea3eea25fcdb82df88 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 25 Jan 2012 20:18:55 -0500 Subject: tracing/softirq: Move __raise_softirq_irqoff() out of header The __raise_softirq_irqoff() contains a tracepoint. As tracepoints in headers can cause issues, and not to mention, bloats the kernel when they are in a static inline, it is best to move the function that contains the tracepoint out of the header and into softirq.c. Link: http://lkml.kernel.org/r/20120118120711.GB14863@elte.hu Suggested-by: Ingo Molnar Signed-off-by: Steven Rostedt diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index a64b00e..3f830e0 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -20,7 +20,6 @@ #include #include #include -#include /* * These correspond to the IORESOURCE_IRQ_* defines in @@ -456,11 +455,7 @@ asmlinkage void do_softirq(void); asmlinkage void __do_softirq(void); extern void open_softirq(int nr, void (*action)(struct softirq_action *)); extern void softirq_init(void); -static inline void __raise_softirq_irqoff(unsigned int nr) -{ - trace_softirq_raise(nr); - or_softirq_pending(1UL << nr); -} +extern void __raise_softirq_irqoff(unsigned int nr); extern void raise_softirq_irqoff(unsigned int nr); extern void raise_softirq(unsigned int nr); diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index f7c543a..fc41824 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -16,6 +16,8 @@ #include #include +#include + #include "internals.h" /** diff --git a/kernel/softirq.c b/kernel/softirq.c index 4eb3a0f..06d4099 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -385,6 +385,12 @@ void raise_softirq(unsigned int nr) local_irq_restore(flags); } +void __raise_softirq_irqoff(unsigned int nr) +{ + trace_softirq_raise(nr); + or_softirq_pending(1UL << nr); +} + void open_softirq(int nr, void (*action)(struct softirq_action *)) { softirq_vec[nr].action = action; -- cgit v0.10.2 From e89cef136a8b56eef1acf702591fd3039fc3385d Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 1 Feb 2012 13:54:08 +0100 Subject: perf tool: Fix perf stack to non executable on x86_64 By adding following objects: bench/mem-memset-x86-64-asm.o bench/mem-memcpy-x86-64-asm.o the x86_64 perf binary ended up with executable stack. The reason was that above objects are assembler sourced and are missing the GNU-stack note section. In such case the linker assumes that the final binary should not be restricted at all and mark the stack as RWX. Adding section ".note.GNU-stack" definition to mentioned objects, with all flags disabled, thus omiting those objects from linker stack flags decision. Reported-at: https://bugzilla.redhat.com/show_bug.cgi?id=783570 Reported-by: Clark Williams Acked-by: Eric Dumazet Cc: Corey Ashford Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1328100848-5630-1-git-send-email-jolsa@redhat.com Signed-off-by: Jiri Olsa [ committer note: Remaining bits after what was already added to perf/urgent ] Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/bench/mem-memset-x86-64-asm.S b/tools/perf/bench/mem-memset-x86-64-asm.S index cb92170..9e5af89 100644 --- a/tools/perf/bench/mem-memset-x86-64-asm.S +++ b/tools/perf/bench/mem-memset-x86-64-asm.S @@ -4,3 +4,10 @@ #define Lmemset_c globl memset_c; memset_c #define Lmemset_c_e globl memset_c_e; memset_c_e #include "../../../arch/x86/lib/memset_64.S" + +/* + * We need to provide note.GNU-stack section, saying that we want + * NOT executable stack. Otherwise the final linking will assume that + * the ELF stack should not be restricted at all and set it RWX. + */ +.section .note.GNU-stack,"",@progbits -- cgit v0.10.2 From 762b2935fc0ea2eab3a0edc8333fedb95266c7a9 Mon Sep 17 00:00:00 2001 From: Franck Bui-Huu Date: Mon, 6 Feb 2012 16:17:23 +0100 Subject: perf doc: Allow producing documentation in a specified output directory Currently we can put the object files in a different directory by using 'O=' comand line argument. However the generated documentation files don't honor this directive, This patch fixes that. It's been tested for man target but the others seems currently broken so no tests have been done on them so far. Link: http://lkml.kernel.org/r/1328541443-18003-1-git-send-email-fbuihuu@gmail.com Signed-off-by: Franck Bui-Huu Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile index 4626a39..ca600e0 100644 --- a/tools/perf/Documentation/Makefile +++ b/tools/perf/Documentation/Makefile @@ -1,3 +1,10 @@ +OUTPUT := ./ +ifeq ("$(origin O)", "command line") + ifneq ($(O),) + OUTPUT := $(O)/ + endif +endif + MAN1_TXT= \ $(filter-out $(addsuffix .txt, $(ARTICLES) $(SP_ARTICLES)), \ $(wildcard perf-*.txt)) \ @@ -6,10 +13,11 @@ MAN5_TXT= MAN7_TXT= MAN_TXT = $(MAN1_TXT) $(MAN5_TXT) $(MAN7_TXT) -MAN_XML=$(patsubst %.txt,%.xml,$(MAN_TXT)) -MAN_HTML=$(patsubst %.txt,%.html,$(MAN_TXT)) +_MAN_XML=$(patsubst %.txt,%.xml,$(MAN_TXT)) +_MAN_HTML=$(patsubst %.txt,%.html,$(MAN_TXT)) -DOC_HTML=$(MAN_HTML) +MAN_XML=$(addprefix $(OUTPUT),$(_MAN_XML)) +MAN_HTML=$(addprefix $(OUTPUT),$(_MAN_HTML)) ARTICLES = # with their own formatting rules. @@ -18,11 +26,17 @@ API_DOCS = $(patsubst %.txt,%,$(filter-out technical/api-index-skel.txt technica SP_ARTICLES += $(API_DOCS) SP_ARTICLES += technical/api-index -DOC_HTML += $(patsubst %,%.html,$(ARTICLES) $(SP_ARTICLES)) +_DOC_HTML = $(_MAN_HTML) +_DOC_HTML+=$(patsubst %,%.html,$(ARTICLES) $(SP_ARTICLES)) +DOC_HTML=$(addprefix $(OUTPUT),$(_DOC_HTML)) -DOC_MAN1=$(patsubst %.txt,%.1,$(MAN1_TXT)) -DOC_MAN5=$(patsubst %.txt,%.5,$(MAN5_TXT)) -DOC_MAN7=$(patsubst %.txt,%.7,$(MAN7_TXT)) +_DOC_MAN1=$(patsubst %.txt,%.1,$(MAN1_TXT)) +_DOC_MAN5=$(patsubst %.txt,%.5,$(MAN5_TXT)) +_DOC_MAN7=$(patsubst %.txt,%.7,$(MAN7_TXT)) + +DOC_MAN1=$(addprefix $(OUTPUT),$(_DOC_MAN1)) +DOC_MAN5=$(addprefix $(OUTPUT),$(_DOC_MAN5)) +DOC_MAN7=$(addprefix $(OUTPUT),$(_DOC_MAN7)) # Make the path relative to DESTDIR, not prefix ifndef DESTDIR @@ -150,9 +164,9 @@ man1: $(DOC_MAN1) man5: $(DOC_MAN5) man7: $(DOC_MAN7) -info: perf.info perfman.info +info: $(OUTPUT)perf.info $(OUTPUT)perfman.info -pdf: user-manual.pdf +pdf: $(OUTPUT)user-manual.pdf install: install-man @@ -166,7 +180,7 @@ install-man: man install-info: info $(INSTALL) -d -m 755 $(DESTDIR)$(infodir) - $(INSTALL) -m 644 perf.info perfman.info $(DESTDIR)$(infodir) + $(INSTALL) -m 644 $(OUTPUT)perf.info $(OUTPUT)perfman.info $(DESTDIR)$(infodir) if test -r $(DESTDIR)$(infodir)/dir; then \ $(INSTALL_INFO) --info-dir=$(DESTDIR)$(infodir) perf.info ;\ $(INSTALL_INFO) --info-dir=$(DESTDIR)$(infodir) perfman.info ;\ @@ -176,7 +190,7 @@ install-info: info install-pdf: pdf $(INSTALL) -d -m 755 $(DESTDIR)$(pdfdir) - $(INSTALL) -m 644 user-manual.pdf $(DESTDIR)$(pdfdir) + $(INSTALL) -m 644 $(OUTPUT)user-manual.pdf $(DESTDIR)$(pdfdir) #install-html: html # '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir) @@ -189,14 +203,14 @@ install-pdf: pdf # # Determine "include::" file references in asciidoc files. # -doc.dep : $(wildcard *.txt) build-docdep.perl +$(OUTPUT)doc.dep : $(wildcard *.txt) build-docdep.perl $(QUIET_GEN)$(RM) $@+ $@ && \ $(PERL_PATH) ./build-docdep.perl >$@+ $(QUIET_STDERR) && \ mv $@+ $@ --include doc.dep +-include $(OUPTUT)doc.dep -cmds_txt = cmds-ancillaryinterrogators.txt \ +_cmds_txt = cmds-ancillaryinterrogators.txt \ cmds-ancillarymanipulators.txt \ cmds-mainporcelain.txt \ cmds-plumbinginterrogators.txt \ @@ -205,32 +219,36 @@ cmds_txt = cmds-ancillaryinterrogators.txt \ cmds-synchelpers.txt \ cmds-purehelpers.txt \ cmds-foreignscminterface.txt +cmds_txt=$(addprefix $(OUTPUT),$(_cmds_txt)) -$(cmds_txt): cmd-list.made +$(cmds_txt): $(OUTPUT)cmd-list.made -cmd-list.made: cmd-list.perl ../command-list.txt $(MAN1_TXT) +$(OUTPUT)cmd-list.made: cmd-list.perl ../command-list.txt $(MAN1_TXT) $(QUIET_GEN)$(RM) $@ && \ $(PERL_PATH) ./cmd-list.perl ../command-list.txt $(QUIET_STDERR) && \ date >$@ clean: - $(RM) *.xml *.xml+ *.html *.html+ *.1 *.5 *.7 - $(RM) *.texi *.texi+ *.texi++ perf.info perfman.info - $(RM) howto-index.txt howto/*.html doc.dep - $(RM) technical/api-*.html technical/api-index.txt - $(RM) $(cmds_txt) *.made - -$(MAN_HTML): %.html : %.txt + $(RM) $(MAN_XML) $(addsuffix +,$(MAN_XML)) + $(RM) $(MAN_HTML) $(addsuffix +,$(MAN_HTML)) + $(RM) $(DOC_HTML) $(DOC_MAN1) $(DOC_MAN5) $(DOC_MAN7) + $(RM) $(OUTPUT)*.texi $(OUTPUT)*.texi+ $(OUTPUT)*.texi++ + $(RM) $(OUTPUT)perf.info $(OUTPUT)perfman.info + $(RM) $(OUTPUT)howto-index.txt $(OUTPUT)howto/*.html $(OUTPUT)doc.dep + $(RM) $(OUTPUT)technical/api-*.html $(OUTPUT)technical/api-index.txt + $(RM) $(cmds_txt) $(OUTPUT)*.made + +$(MAN_HTML): $(OUTPUT)%.html : %.txt $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ $(ASCIIDOC) -b xhtml11 -d manpage -f asciidoc.conf \ $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \ mv $@+ $@ -%.1 %.5 %.7 : %.xml +$(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : $(OUTPUT)%.xml $(QUIET_XMLTO)$(RM) $@ && \ - xmlto -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $< + xmlto -o $(OUTPUT) -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $< -%.xml : %.txt +$(OUTPUT)%.xml : %.txt $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ $(ASCIIDOC) -b docbook -d manpage -f asciidoc.conf \ $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \ @@ -239,25 +257,25 @@ $(MAN_HTML): %.html : %.txt XSLT = docbook.xsl XSLTOPTS = --xinclude --stringparam html.stylesheet docbook-xsl.css -user-manual.html: user-manual.xml +$(OUTPUT)user-manual.html: $(OUTPUT)user-manual.xml $(QUIET_XSLTPROC)xsltproc $(XSLTOPTS) -o $@ $(XSLT) $< -perf.info: user-manual.texi - $(QUIET_MAKEINFO)$(MAKEINFO) --no-split -o $@ user-manual.texi +$(OUTPUT)perf.info: $(OUTPUT)user-manual.texi + $(QUIET_MAKEINFO)$(MAKEINFO) --no-split -o $@ $(OUTPUT)user-manual.texi -user-manual.texi: user-manual.xml +$(OUTPUT)user-manual.texi: $(OUTPUT)user-manual.xml $(QUIET_DB2TEXI)$(RM) $@+ $@ && \ - $(DOCBOOK2X_TEXI) user-manual.xml --encoding=UTF-8 --to-stdout >$@++ && \ + $(DOCBOOK2X_TEXI) $(OUTPUT)user-manual.xml --encoding=UTF-8 --to-stdout >$@++ && \ $(PERL_PATH) fix-texi.perl <$@++ >$@+ && \ rm $@++ && \ mv $@+ $@ -user-manual.pdf: user-manual.xml +$(OUTPUT)user-manual.pdf: $(OUTPUT)user-manual.xml $(QUIET_DBLATEX)$(RM) $@+ $@ && \ $(DBLATEX) -o $@+ -p /etc/asciidoc/dblatex/asciidoc-dblatex.xsl -s /etc/asciidoc/dblatex/asciidoc-dblatex.sty $< && \ mv $@+ $@ -perfman.texi: $(MAN_XML) cat-texi.perl +$(OUTPUT)perfman.texi: $(MAN_XML) cat-texi.perl $(QUIET_DB2TEXI)$(RM) $@+ $@ && \ ($(foreach xml,$(MAN_XML),$(DOCBOOK2X_TEXI) --encoding=UTF-8 \ --to-stdout $(xml) &&) true) > $@++ && \ @@ -265,7 +283,7 @@ perfman.texi: $(MAN_XML) cat-texi.perl rm $@++ && \ mv $@+ $@ -perfman.info: perfman.texi +$(OUTPUT)perfman.info: $(OUTPUT)perfman.texi $(QUIET_MAKEINFO)$(MAKEINFO) --no-split --no-validate $*.texi $(patsubst %.txt,%.texi,$(MAN_TXT)): %.texi : %.xml -- cgit v0.10.2 From 5fde2523bddb71d96f12b6ee8d2a9a43cb99da96 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 6 Feb 2012 16:44:44 +0900 Subject: perf stat: Adjust print unit The default 'M/sec' unit is not useful if the result is small enough. Adjust it dynamically according to the value. Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1328514285-26232-1-git-send-email-namhyung.kim@lge.com Signed-off-by: Namhyung Kim Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 459b862..32d930e 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -844,12 +844,18 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) fprintf(output, " # %8.3f GHz ", ratio); } else if (runtime_nsecs_stats[cpu].n != 0) { + char unit = 'M'; + total = avg_stats(&runtime_nsecs_stats[cpu]); if (total) ratio = 1000.0 * avg / total; + if (ratio < 0.001) { + ratio *= 1000; + unit = 'K'; + } - fprintf(output, " # %8.3f M/sec ", ratio); + fprintf(output, " # %8.3f %c/sec ", ratio, unit); } else { fprintf(output, " "); } -- cgit v0.10.2 From 9dac6a29e0ce0cd9dec497baa123e216b00b525d Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 6 Feb 2012 16:44:45 +0900 Subject: perf stat: Align scaled output of cpu-clock The output of cpu-clock event is controlled in nsec_printout(), but its alignment was broken: Performance counter stats for 'sleep 1': 6,038,774 instructions # 0.00 insns per cycle 180 faults # 0.007 K/sec [99.95%] 1,282,201 branches # 0.053 M/sec [99.84%] 24126.221811 cpu-clock [99.62%] 24121.689540 task-clock # 24.098 CPUs utilized [99.52%] 1.001001017 seconds time elapsed This patch fixes this: Performance counter stats for 'sleep 1': 13,540,843 instructions # 0.00 insns per cycle 180 faults # 0.007 K/sec [99.94%] 2,875,386 branches # 0.119 M/sec [99.82%] 24144.221137 cpu-clock [99.61%] 24133.515366 task-clock # 24.109 CPUs utilized [99.52%] 1.001020946 seconds time elapsed Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1328514285-26232-2-git-send-email-namhyung.kim@lge.com Signed-off-by: Namhyung Kim Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 32d930e..d14b37a 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -576,6 +576,8 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) fprintf(output, " # %8.3f CPUs utilized ", avg / avg_stats(&walltime_nsecs_stats)); + else + fprintf(output, " "); } /* used for get_ratio_color() */ -- cgit v0.10.2 From c98fdeaa92731308ed80386261fa2589addefa47 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 7 Feb 2012 13:08:52 +0100 Subject: x86/sched/perf/AMD: Set sched_clock_stable Stephane Eranian reported that doing a scheduler latency measurements with perf on AMD doesn't work out as expected due to the fact that the sched_clock() granularity is too coarse, i.e. done in jiffies due to the sched_clock_stable not set, which, if set, would mean that we get to use the TSC as sample source which would give us much higher precision. However, there's no reason not to set sched_clock_stable on AMD because all families from F10h and upwards do have an invariant TSC and have the CPUID flag to prove (CPUID_8000_0007_EDX[8]). Make it so, #1. Signed-off-by: Borislav Petkov Cc: Borislav Petkov Cc: Venki Pallipadi Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Arnaldo Carvalho de Melo Cc: Robert Richter Cc: Eric Dumazet Cc: Andreas Herrmann Link: http://lkml.kernel.org/r/20120206132546.GA30854@quad [ Should any non-standard system break the TSC, we should mark them so explicitly, in their platform init handler, or in a DMI quirk. ] Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index f4773f4..0a44b90 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -456,6 +457,8 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); + if (!check_tsc_unstable()) + sched_clock_stable = 1; } #ifdef CONFIG_X86_64 -- cgit v0.10.2 From 73323f541fe5f55a3b8a5c3d565bfc1efd64abf6 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 2 Feb 2012 13:54:44 +0100 Subject: perf tools: fix endianness detection in perf.data The current version of perf detects whether or not the perf.data file is written in a different endianness using the attr_size field in the header of the file. This field represents sizeof(struct perf_event_attr) as known to perf record. If the sizes do not match, then perf tries the byte-swapped version. If they match, then the tool assumes a different endianness. The issue with the approach is that it assumes the size of perf_event_attr always has to match between perf record and perf report. However, the kernel perf_event ABI is extensible. New fields can be added to struct perf_event_attr. Consequently, it is not possible to use attr_size to detect endianness. This patch takes another approach by using the magic number written at the beginning of the perf.data file to detect endianness. The magic number is an eight-byte signature. It's primary purpose is to identify (signature) a perf.data file. But it could also be used to encode the endianness. The patch introduces a new value for this signature. The key difference is that the signature is written differently in the file depending on the endianness. Thus, by comparing the signature from the file with the tool's own signature it is possible to detect endianness. The new signature is "PERFILE2". Backward compatiblity with existing perf.data file is ensured. Tested-by: David Ahern Acked-by: David Ahern Cc: Andi Kleen Cc: Anshuman Khandual Cc: Arun Sharma Cc: David Ahern Cc: Ingo Molnar Cc: Lin Ming Cc: Peter Zijlstra Cc: Roberto Agostino Vitillo Cc: Robert Richter Cc: Vince Weaver Link: http://lkml.kernel.org/r/1328187288-24395-15-git-send-email-eranian@google.com Signed-off-by: Stephane Eranian Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index ecd7f4d..6f4187d 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -63,9 +63,20 @@ char *perf_header__find_event(u64 id) return NULL; } -static const char *__perf_magic = "PERFFILE"; +/* + * magic2 = "PERFILE2" + * must be a numerical value to let the endianness + * determine the memory layout. That way we are able + * to detect endianness when reading the perf.data file + * back. + * + * we check for legacy (PERFFILE) format. + */ +static const char *__perf_magic1 = "PERFFILE"; +static const u64 __perf_magic2 = 0x32454c4946524550ULL; +static const u64 __perf_magic2_sw = 0x50455246494c4532ULL; -#define PERF_MAGIC (*(u64 *)__perf_magic) +#define PERF_MAGIC __perf_magic2 struct perf_file_attr { struct perf_event_attr attr; @@ -1620,24 +1631,59 @@ out_free: return err; } +static int check_magic_endian(u64 *magic, struct perf_file_header *header, + struct perf_header *ph) +{ + int ret; + + /* check for legacy format */ + ret = memcmp(magic, __perf_magic1, sizeof(*magic)); + if (ret == 0) { + pr_debug("legacy perf.data format\n"); + if (!header) + return -1; + + if (header->attr_size != sizeof(struct perf_file_attr)) { + u64 attr_size = bswap_64(header->attr_size); + + if (attr_size != sizeof(struct perf_file_attr)) + return -1; + + ph->needs_swap = true; + } + return 0; + } + + /* check magic number with same endianness */ + if (*magic == __perf_magic2) + return 0; + + /* check magic number but opposite endianness */ + if (*magic != __perf_magic2_sw) + return -1; + + ph->needs_swap = true; + + return 0; +} + int perf_file_header__read(struct perf_file_header *header, struct perf_header *ph, int fd) { + int ret; + lseek(fd, 0, SEEK_SET); - if (readn(fd, header, sizeof(*header)) <= 0 || - memcmp(&header->magic, __perf_magic, sizeof(header->magic))) + ret = readn(fd, header, sizeof(*header)); + if (ret <= 0) return -1; - if (header->attr_size != sizeof(struct perf_file_attr)) { - u64 attr_size = bswap_64(header->attr_size); - - if (attr_size != sizeof(struct perf_file_attr)) - return -1; + if (check_magic_endian(&header->magic, header, ph) < 0) + return -1; + if (ph->needs_swap) { mem_bswap_64(header, offsetof(struct perf_file_header, - adds_features)); - ph->needs_swap = true; + adds_features)); } if (header->size != sizeof(*header)) { @@ -1873,8 +1919,13 @@ static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, struct perf_header *ph, int fd, bool repipe) { - if (readn(fd, header, sizeof(*header)) <= 0 || - memcmp(&header->magic, __perf_magic, sizeof(header->magic))) + int ret; + + ret = readn(fd, header, sizeof(*header)); + if (ret <= 0) + return -1; + + if (check_magic_endian(&header->magic, NULL, ph) < 0) return -1; if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0) -- cgit v0.10.2 From d3665498955779e56453501a16f4ad084f798802 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Mon, 6 Feb 2012 15:27:52 -0700 Subject: perf record: No build id option fails A recent refactoring of perf-record introduced the following: perf record -a -B Couldn't generating buildids. Use --no-buildid to profile anyway. sleep: Terminated I believe the triple negative was meant to be only a double negative. :-) While I'm there, fixed the grammar on the error message. Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Robert Richter Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1328567272-13190-1-git-send-email-dsahern@gmail.com Signed-off-by: David Ahern Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index f8d9a54..d6d1c6c 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -494,9 +494,9 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) return err; } - if (!!rec->no_buildid + if (!rec->no_buildid && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) { - pr_err("Couldn't generating buildids. " + pr_err("Couldn't generate buildids. " "Use --no-buildid to profile anyway.\n"); return -1; } -- cgit v0.10.2 From 9919cba7ff71147803c988521cc1ceb80e7f0f6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fernando=20Luis=20V=C3=A1zquez=20Cao?= Date: Thu, 9 Feb 2012 17:42:20 -0500 Subject: watchdog: Update documentation The soft and hard lockup detectors are now built on top of the hrtimer and perf subsystems. Update the documentation accordingly. Signed-off-by: Fernando Luis Vazquez Cao Acked-by: Randy Dunlap Signed-off-by: Don Zickus Link: http://lkml.kernel.org/r/1328827342-6253-1-git-send-email-dzickus@redhat.com Signed-off-by: Ingo Molnar diff --git a/Documentation/lockup-watchdogs.txt b/Documentation/lockup-watchdogs.txt new file mode 100644 index 0000000..d2a3660 --- /dev/null +++ b/Documentation/lockup-watchdogs.txt @@ -0,0 +1,63 @@ +=============================================================== +Softlockup detector and hardlockup detector (aka nmi_watchdog) +=============================================================== + +The Linux kernel can act as a watchdog to detect both soft and hard +lockups. + +A 'softlockup' is defined as a bug that causes the kernel to loop in +kernel mode for more than 20 seconds (see "Implementation" below for +details), without giving other tasks a chance to run. The current +stack trace is displayed upon detection and, by default, the system +will stay locked up. Alternatively, the kernel can be configured to +panic; a sysctl, "kernel.softlockup_panic", a kernel parameter, +"softlockup_panic" (see "Documentation/kernel-parameters.txt" for +details), and a compile option, "BOOTPARAM_HARDLOCKUP_PANIC", are +provided for this. + +A 'hardlockup' is defined as a bug that causes the CPU to loop in +kernel mode for more than 10 seconds (see "Implementation" below for +details), without letting other interrupts have a chance to run. +Similarly to the softlockup case, the current stack trace is displayed +upon detection and the system will stay locked up unless the default +behavior is changed, which can be done through a compile time knob, +"BOOTPARAM_HARDLOCKUP_PANIC", and a kernel parameter, "nmi_watchdog" +(see "Documentation/kernel-parameters.txt" for details). + +The panic option can be used in combination with panic_timeout (this +timeout is set through the confusingly named "kernel.panic" sysctl), +to cause the system to reboot automatically after a specified amount +of time. + +=== Implementation === + +The soft and hard lockup detectors are built on top of the hrtimer and +perf subsystems, respectively. A direct consequence of this is that, +in principle, they should work in any architecture where these +subsystems are present. + +A periodic hrtimer runs to generate interrupts and kick the watchdog +task. An NMI perf event is generated every "watchdog_thresh" +(compile-time initialized to 10 and configurable through sysctl of the +same name) seconds to check for hardlockups. If any CPU in the system +does not receive any hrtimer interrupt during that time the +'hardlockup detector' (the handler for the NMI perf event) will +generate a kernel warning or call panic, depending on the +configuration. + +The watchdog task is a high priority kernel thread that updates a +timestamp every time it is scheduled. If that timestamp is not updated +for 2*watchdog_thresh seconds (the softlockup threshold) the +'softlockup detector' (coded inside the hrtimer callback function) +will dump useful debug information to the system log, after which it +will call panic if it was instructed to do so or resume execution of +other kernel code. + +The period of the hrtimer is 2*watchdog_thresh/5, which means it has +two or three chances to generate an interrupt before the hardlockup +detector kicks in. + +As explained above, a kernel knob is provided that allows +administrators to configure the period of the hrtimer and the perf +event. The right value for a particular environment is a trade-off +between fast response to lockups and detection overhead. diff --git a/Documentation/nmi_watchdog.txt b/Documentation/nmi_watchdog.txt deleted file mode 100644 index bf9f80a..0000000 --- a/Documentation/nmi_watchdog.txt +++ /dev/null @@ -1,83 +0,0 @@ - -[NMI watchdog is available for x86 and x86-64 architectures] - -Is your system locking up unpredictably? No keyboard activity, just -a frustrating complete hard lockup? Do you want to help us debugging -such lockups? If all yes then this document is definitely for you. - -On many x86/x86-64 type hardware there is a feature that enables -us to generate 'watchdog NMI interrupts'. (NMI: Non Maskable Interrupt -which get executed even if the system is otherwise locked up hard). -This can be used to debug hard kernel lockups. By executing periodic -NMI interrupts, the kernel can monitor whether any CPU has locked up, -and print out debugging messages if so. - -In order to use the NMI watchdog, you need to have APIC support in your -kernel. For SMP kernels, APIC support gets compiled in automatically. For -UP, enable either CONFIG_X86_UP_APIC (Processor type and features -> Local -APIC support on uniprocessors) or CONFIG_X86_UP_IOAPIC (Processor type and -features -> IO-APIC support on uniprocessors) in your kernel config. -CONFIG_X86_UP_APIC is for uniprocessor machines without an IO-APIC. -CONFIG_X86_UP_IOAPIC is for uniprocessor with an IO-APIC. [Note: certain -kernel debugging options, such as Kernel Stack Meter or Kernel Tracer, -may implicitly disable the NMI watchdog.] - -For x86-64, the needed APIC is always compiled in. - -Using local APIC (nmi_watchdog=2) needs the first performance register, so -you can't use it for other purposes (such as high precision performance -profiling.) However, at least oprofile and the perfctr driver disable the -local APIC NMI watchdog automatically. - -To actually enable the NMI watchdog, use the 'nmi_watchdog=N' boot -parameter. Eg. the relevant lilo.conf entry: - - append="nmi_watchdog=1" - -For SMP machines and UP machines with an IO-APIC use nmi_watchdog=1. -For UP machines without an IO-APIC use nmi_watchdog=2, this only works -for some processor types. If in doubt, boot with nmi_watchdog=1 and -check the NMI count in /proc/interrupts; if the count is zero then -reboot with nmi_watchdog=2 and check the NMI count. If it is still -zero then log a problem, you probably have a processor that needs to be -added to the nmi code. - -A 'lockup' is the following scenario: if any CPU in the system does not -execute the period local timer interrupt for more than 5 seconds, then -the NMI handler generates an oops and kills the process. This -'controlled crash' (and the resulting kernel messages) can be used to -debug the lockup. Thus whenever the lockup happens, wait 5 seconds and -the oops will show up automatically. If the kernel produces no messages -then the system has crashed so hard (eg. hardware-wise) that either it -cannot even accept NMI interrupts, or the crash has made the kernel -unable to print messages. - -Be aware that when using local APIC, the frequency of NMI interrupts -it generates, depends on the system load. The local APIC NMI watchdog, -lacking a better source, uses the "cycles unhalted" event. As you may -guess it doesn't tick when the CPU is in the halted state (which happens -when the system is idle), but if your system locks up on anything but the -"hlt" processor instruction, the watchdog will trigger very soon as the -"cycles unhalted" event will happen every clock tick. If it locks up on -"hlt", then you are out of luck -- the event will not happen at all and the -watchdog won't trigger. This is a shortcoming of the local APIC watchdog --- unfortunately there is no "clock ticks" event that would work all the -time. The I/O APIC watchdog is driven externally and has no such shortcoming. -But its NMI frequency is much higher, resulting in a more significant hit -to the overall system performance. - -On x86 nmi_watchdog is disabled by default so you have to enable it with -a boot time parameter. - -It's possible to disable the NMI watchdog in run-time by writing "0" to -/proc/sys/kernel/nmi_watchdog. Writing "1" to the same file will re-enable -the NMI watchdog. Notice that you still need to use "nmi_watchdog=" parameter -at boot time. - -NOTE: In kernels prior to 2.4.2-ac18 the NMI-oopser is enabled unconditionally -on x86 SMP boxes. - -[ feel free to send bug reports, suggestions and patches to - Ingo Molnar or the Linux SMP mailing - list at ] - -- cgit v0.10.2 From 5f329089431aac08bf759fa2bcb34077a6aa1f4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fernando=20Luis=20V=C3=A1zquez=20Cao?= Date: Thu, 9 Feb 2012 17:42:21 -0500 Subject: watchdog: Update Kconfig entries The soft and hard lockup thresholds have changed so the corresponding Kconfig entries need to be updated accordingly. Add a reference to watchdog_thresh while at it. Signed-off-by: Fernando Luis Vazquez Cao Signed-off-by: Don Zickus Link: http://lkml.kernel.org/r/1328827342-6253-2-git-send-email-dzickus@redhat.com Signed-off-by: Ingo Molnar diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 8745ac7..9739c0b 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -166,18 +166,21 @@ config LOCKUP_DETECTOR hard and soft lockups. Softlockups are bugs that cause the kernel to loop in kernel - mode for more than 60 seconds, without giving other tasks a + mode for more than 20 seconds, without giving other tasks a chance to run. The current stack trace is displayed upon detection and the system will stay locked up. Hardlockups are bugs that cause the CPU to loop in kernel mode - for more than 60 seconds, without letting other interrupts have a + for more than 10 seconds, without letting other interrupts have a chance to run. The current stack trace is displayed upon detection and the system will stay locked up. The overhead should be minimal. A periodic hrtimer runs to - generate interrupts and kick the watchdog task every 10-12 seconds. - An NMI is generated every 60 seconds or so to check for hardlockups. + generate interrupts and kick the watchdog task every 4 seconds. + An NMI is generated every 10 seconds or so to check for hardlockups. + + The frequency of hrtimer and NMI events and the soft and hard lockup + thresholds can be controlled through the sysctl watchdog_thresh. config HARDLOCKUP_DETECTOR def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI && \ @@ -189,7 +192,8 @@ config BOOTPARAM_HARDLOCKUP_PANIC help Say Y here to enable the kernel to panic on "hard lockups", which are bugs that cause the kernel to loop in kernel - mode with interrupts disabled for more than 60 seconds. + mode with interrupts disabled for more than 10 seconds (configurable + using the watchdog_thresh sysctl). Say N if unsure. @@ -206,8 +210,8 @@ config BOOTPARAM_SOFTLOCKUP_PANIC help Say Y here to enable the kernel to panic on "soft lockups", which are bugs that cause the kernel to loop in kernel - mode for more than 60 seconds, without giving other tasks a - chance to run. + mode for more than 20 seconds (configurable using the watchdog_thresh + sysctl), without giving other tasks a chance to run. The panic can be used in combination with panic_timeout, to cause the system to reboot automatically after a -- cgit v0.10.2 From 86f5e6a7b192721995ece919985ac75222402351 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fernando=20Luis=20V=C3=A1zquez=20Cao?= Date: Thu, 9 Feb 2012 17:42:22 -0500 Subject: watchdog: Fix code/comments mismatches Reflect the change in the soft and hard lockup thresholds and their relation to the frequency of the hrtimer and NMI events in the code comments. While at it, remove references to files that do not exist anymore. Signed-off-by: Fernando Luis Vazquez Cao Signed-off-by: Don Zickus Link: http://lkml.kernel.org/r/1328827342-6253-3-git-send-email-dzickus@redhat.com Signed-off-by: Ingo Molnar diff --git a/kernel/watchdog.c b/kernel/watchdog.c index d117262..14bc092 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -3,12 +3,9 @@ * * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. * - * this code detects hard lockups: incidents in where on a CPU - * the kernel does not respond to anything except NMI. - * - * Note: Most of this code is borrowed heavily from softlockup.c, - * so thanks to Ingo for the initial implementation. - * Some chunks also taken from arch/x86/kernel/apic/nmi.c, thanks + * Note: Most of this code is borrowed heavily from the original softlockup + * detector, so thanks to Ingo for the initial implementation. + * Some chunks also taken from the old x86-specific nmi watchdog code, thanks * to those contributors as well. */ @@ -117,9 +114,10 @@ static unsigned long get_sample_period(void) { /* * convert watchdog_thresh from seconds to ns - * the divide by 5 is to give hrtimer 5 chances to - * increment before the hardlockup detector generates - * a warning + * the divide by 5 is to give hrtimer several chances (two + * or three with the current relation between the soft + * and hard thresholds) to increment before the + * hardlockup detector generates a warning */ return get_softlockup_thresh() * (NSEC_PER_SEC / 5); } @@ -336,9 +334,11 @@ static int watchdog(void *unused) set_current_state(TASK_INTERRUPTIBLE); /* - * Run briefly once per second to reset the softlockup timestamp. - * If this gets delayed for more than 60 seconds then the - * debug-printout triggers in watchdog_timer_fn(). + * Run briefly (kicked by the hrtimer callback function) once every + * get_sample_period() seconds (4 seconds by default) to reset the + * softlockup timestamp. If this gets delayed for more than + * 2*watchdog_thresh seconds then the debug-printout triggers in + * watchdog_timer_fn(). */ while (!kthread_should_stop()) { __touch_watchdog(); -- cgit v0.10.2 From f8d98f1095210da708a59f3a0b6fd267ad8f3f03 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 10 Feb 2012 14:33:40 +0900 Subject: x86: Fix to decode grouped AVX with VEX pp bits Fix to decode grouped AVX with VEX pp bits which should be handled as same as last-prefixes. This fixes below warnings in posttest with CONFIG_CRYPTO_SHA1_SSSE3=y. Warning: arch/x86/tools/test_get_len found difference at :ffffffff810d5fc0 Warning: ffffffff810d6069: c5 f9 73 de 04 vpsrldq $0x4,%xmm6,%xmm0 Warning: objdump says 5 bytes, but insn_get_length() says 4 ... With this change, test_get_len can decode it correctly. $ arch/x86/tools/test_get_len -v -y ffffffff810d6069: c5 f9 73 de 04 vpsrldq $0x4,%xmm6,%xmm0 Succeed: decoded and checked 1 instructions Reported-by: Ingo Molnar Signed-off-by: Masami Hiramatsu Cc: yrl.pp-manager.tt@hitachi.com Link: http://lkml.kernel.org/r/20120210053340.30429.73410.stgit@localhost.localdomain Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h index 205b063..74a2e31 100644 --- a/arch/x86/include/asm/inat.h +++ b/arch/x86/include/asm/inat.h @@ -97,11 +97,12 @@ /* Attribute search APIs */ extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode); +extern int inat_get_last_prefix_id(insn_byte_t last_pfx); extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, - insn_byte_t last_pfx, + int lpfx_id, insn_attr_t esc_attr); extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm, - insn_byte_t last_pfx, + int lpfx_id, insn_attr_t esc_attr); extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, insn_byte_t vex_m, diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h index 74df3f1..48eb30a 100644 --- a/arch/x86/include/asm/insn.h +++ b/arch/x86/include/asm/insn.h @@ -96,12 +96,6 @@ struct insn { #define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */ #define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */ -/* The last prefix is needed for two-byte and three-byte opcodes */ -static inline insn_byte_t insn_last_prefix(struct insn *insn) -{ - return insn->prefixes.bytes[3]; -} - extern void insn_init(struct insn *insn, const void *kaddr, int x86_64); extern void insn_get_prefixes(struct insn *insn); extern void insn_get_opcode(struct insn *insn); @@ -160,6 +154,18 @@ static inline insn_byte_t insn_vex_p_bits(struct insn *insn) return X86_VEX_P(insn->vex_prefix.bytes[2]); } +/* Get the last prefix id from last prefix or VEX prefix */ +static inline int insn_last_prefix_id(struct insn *insn) +{ + if (insn_is_avx(insn)) + return insn_vex_p_bits(insn); /* VEX_p is a SIMD prefix id */ + + if (insn->prefixes.bytes[3]) + return inat_get_last_prefix_id(insn->prefixes.bytes[3]); + + return 0; +} + /* Offset of each field from kaddr */ static inline int insn_offset_rex_prefix(struct insn *insn) { diff --git a/arch/x86/lib/inat.c b/arch/x86/lib/inat.c index 88ad5fb..c1f01a8 100644 --- a/arch/x86/lib/inat.c +++ b/arch/x86/lib/inat.c @@ -29,46 +29,46 @@ insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode) return inat_primary_table[opcode]; } -insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, insn_byte_t last_pfx, +int inat_get_last_prefix_id(insn_byte_t last_pfx) +{ + insn_attr_t lpfx_attr; + + lpfx_attr = inat_get_opcode_attribute(last_pfx); + return inat_last_prefix_id(lpfx_attr); +} + +insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, int lpfx_id, insn_attr_t esc_attr) { const insn_attr_t *table; - insn_attr_t lpfx_attr; - int n, m = 0; + int n; n = inat_escape_id(esc_attr); - if (last_pfx) { - lpfx_attr = inat_get_opcode_attribute(last_pfx); - m = inat_last_prefix_id(lpfx_attr); - } + table = inat_escape_tables[n][0]; if (!table) return 0; - if (inat_has_variant(table[opcode]) && m) { - table = inat_escape_tables[n][m]; + if (inat_has_variant(table[opcode]) && lpfx_id) { + table = inat_escape_tables[n][lpfx_id]; if (!table) return 0; } return table[opcode]; } -insn_attr_t inat_get_group_attribute(insn_byte_t modrm, insn_byte_t last_pfx, +insn_attr_t inat_get_group_attribute(insn_byte_t modrm, int lpfx_id, insn_attr_t grp_attr) { const insn_attr_t *table; - insn_attr_t lpfx_attr; - int n, m = 0; + int n; n = inat_group_id(grp_attr); - if (last_pfx) { - lpfx_attr = inat_get_opcode_attribute(last_pfx); - m = inat_last_prefix_id(lpfx_attr); - } + table = inat_group_tables[n][0]; if (!table) return inat_group_common_attribute(grp_attr); - if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && m) { - table = inat_group_tables[n][m]; + if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && lpfx_id) { + table = inat_group_tables[n][lpfx_id]; if (!table) return inat_group_common_attribute(grp_attr); } diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c index 5a1f9f3..25feb1a 100644 --- a/arch/x86/lib/insn.c +++ b/arch/x86/lib/insn.c @@ -185,7 +185,8 @@ err_out: void insn_get_opcode(struct insn *insn) { struct insn_field *opcode = &insn->opcode; - insn_byte_t op, pfx; + insn_byte_t op; + int pfx_id; if (opcode->got) return; if (!insn->prefixes.got) @@ -212,8 +213,8 @@ void insn_get_opcode(struct insn *insn) /* Get escaped opcode */ op = get_next(insn_byte_t, insn); opcode->bytes[opcode->nbytes++] = op; - pfx = insn_last_prefix(insn); - insn->attr = inat_get_escape_attribute(op, pfx, insn->attr); + pfx_id = insn_last_prefix_id(insn); + insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr); } if (inat_must_vex(insn->attr)) insn->attr = 0; /* This instruction is bad */ @@ -235,7 +236,7 @@ err_out: void insn_get_modrm(struct insn *insn) { struct insn_field *modrm = &insn->modrm; - insn_byte_t pfx, mod; + insn_byte_t pfx_id, mod; if (modrm->got) return; if (!insn->opcode.got) @@ -246,8 +247,8 @@ void insn_get_modrm(struct insn *insn) modrm->value = mod; modrm->nbytes = 1; if (inat_is_group(insn->attr)) { - pfx = insn_last_prefix(insn); - insn->attr = inat_get_group_attribute(mod, pfx, + pfx_id = insn_last_prefix_id(insn); + insn->attr = inat_get_group_attribute(mod, pfx_id, insn->attr); if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) insn->attr = 0; /* This is bad */ -- cgit v0.10.2 From 2fbb90db1b8fcc78f43830f1a009f3af243c5f42 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 7 Feb 2012 09:32:43 -0500 Subject: tracing/rcu: Add trace_##name##__rcuidle() static tracepoint for inside rcu_idle_exit() sections Added is a new static inline function that lets *any* tracepoint be used inside a rcu_idle_exit() section. And this also solves the problem where the same tracepoint may be used inside a rcu_idle_exit() section as well as outside of one. I added a new tracepoint function with a "_rcuidle" extension. All tracepoints can be used with either the normal "trace_foobar()" function, or the "trace_foobar_rcuidle()" function when inside a rcu_idle_exit() section. All tracepoints defined by TRACE_EVENT() or any of the derivatives will have a "_rcuidle()" function also defined. When a tracepoint is used within an rcu_idle_exit() section, the "_rcuidle()" version must be used. This denotes that the tracepoint is within rcu_idle_exit() and it allows the rcu read locks within the tracepoint to still be valid, as this version takes us out of rcu_idle_exit(). Another nice aspect about this patch is that "static inline"s are not compiled into text when not used. So only the tracepoints that actually use the _rcuidle() version will have them defined in the actual text that is booted. Link: http://lkml.kernel.org/r/1328563113.2200.39.camel@gandalf.stny.rr.com> Acked-by: Paul E. McKenney Reviewed-by: Josh Triplett Signed-off-by: Steven Rostedt diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index df0a779..fc36da9 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -114,7 +114,7 @@ static inline void tracepoint_synchronize_unregister(void) * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto". */ -#define __DO_TRACE(tp, proto, args, cond) \ +#define __DO_TRACE(tp, proto, args, cond, prercu, postrcu) \ do { \ struct tracepoint_func *it_func_ptr; \ void *it_func; \ @@ -122,6 +122,7 @@ static inline void tracepoint_synchronize_unregister(void) \ if (!(cond)) \ return; \ + prercu; \ rcu_read_lock_sched_notrace(); \ it_func_ptr = rcu_dereference_sched((tp)->funcs); \ if (it_func_ptr) { \ @@ -132,6 +133,7 @@ static inline void tracepoint_synchronize_unregister(void) } while ((++it_func_ptr)->func); \ } \ rcu_read_unlock_sched_notrace(); \ + postrcu; \ } while (0) /* @@ -139,7 +141,7 @@ static inline void tracepoint_synchronize_unregister(void) * not add unwanted padding between the beginning of the section and the * structure. Force alignment to the same alignment as the section start. */ -#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ +#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ extern struct tracepoint __tracepoint_##name; \ static inline void trace_##name(proto) \ { \ @@ -147,7 +149,17 @@ static inline void tracepoint_synchronize_unregister(void) __DO_TRACE(&__tracepoint_##name, \ TP_PROTO(data_proto), \ TP_ARGS(data_args), \ - TP_CONDITION(cond)); \ + TP_CONDITION(cond),,); \ + } \ + static inline void trace_##name##_rcuidle(proto) \ + { \ + if (static_branch(&__tracepoint_##name.key)) \ + __DO_TRACE(&__tracepoint_##name, \ + TP_PROTO(data_proto), \ + TP_ARGS(data_args), \ + TP_CONDITION(cond), \ + rcu_idle_exit(), \ + rcu_idle_enter()); \ } \ static inline int \ register_trace_##name(void (*probe)(data_proto), void *data) \ @@ -190,9 +202,11 @@ static inline void tracepoint_synchronize_unregister(void) EXPORT_SYMBOL(__tracepoint_##name) #else /* !CONFIG_TRACEPOINTS */ -#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ +#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ static inline void trace_##name(proto) \ { } \ + static inline void trace_##name##_rcuidle(proto) \ + { } \ static inline int \ register_trace_##name(void (*probe)(data_proto), \ void *data) \ -- cgit v0.10.2 From 484546509ce5d49d43ec0a6eb2141c6bf3362bfc Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 7 Feb 2012 09:40:30 -0500 Subject: x86/tracing: Denote the power and cpuidle tracepoints as _rcuidle() The power and cpuidle tracepoints are called within a rcu_idle_exit() section, and must be denoted with the _rcuidle() version of the tracepoint. Acked-by: Paul E. McKenney Reviewed-by: Josh Triplett Signed-off-by: Steven Rostedt diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 15763af..44eefde 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -377,8 +377,8 @@ static inline int hlt_use_halt(void) void default_idle(void) { if (hlt_use_halt()) { - trace_power_start(POWER_CSTATE, 1, smp_processor_id()); - trace_cpu_idle(1, smp_processor_id()); + trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id()); + trace_cpu_idle_rcuidle(1, smp_processor_id()); current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we @@ -391,8 +391,8 @@ void default_idle(void) else local_irq_enable(); current_thread_info()->status |= TS_POLLING; - trace_power_end(smp_processor_id()); - trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); + trace_power_end_rcuidle(smp_processor_id()); + trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } else { local_irq_enable(); /* loop is done by the caller */ @@ -450,8 +450,8 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); static void mwait_idle(void) { if (!need_resched()) { - trace_power_start(POWER_CSTATE, 1, smp_processor_id()); - trace_cpu_idle(1, smp_processor_id()); + trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id()); + trace_cpu_idle_rcuidle(1, smp_processor_id()); if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) clflush((void *)¤t_thread_info()->flags); @@ -461,8 +461,8 @@ static void mwait_idle(void) __sti_mwait(0, 0); else local_irq_enable(); - trace_power_end(smp_processor_id()); - trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); + trace_power_end_rcuidle(smp_processor_id()); + trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } else local_irq_enable(); } @@ -474,13 +474,13 @@ static void mwait_idle(void) */ static void poll_idle(void) { - trace_power_start(POWER_CSTATE, 0, smp_processor_id()); - trace_cpu_idle(0, smp_processor_id()); + trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id()); + trace_cpu_idle_rcuidle(0, smp_processor_id()); local_irq_enable(); while (!need_resched()) cpu_relax(); - trace_power_end(smp_processor_id()); - trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); + trace_power_end_rcuidle(smp_processor_id()); + trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } /* diff --git a/include/trace/events/power.h b/include/trace/events/power.h index 1bcc2a8..14b3894 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -151,6 +151,8 @@ enum { events get removed */ static inline void trace_power_start(u64 type, u64 state, u64 cpuid) {}; static inline void trace_power_end(u64 cpuid) {}; +static inline void trace_power_start_rcuidle(u64 type, u64 state, u64 cpuid) {}; +static inline void trace_power_end_rcuidle(u64 cpuid) {}; static inline void trace_power_frequency(u64 type, u64 state, u64 cpuid) {}; #endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */ -- cgit v0.10.2 From 76027ea863fc02698da536b4970784eed3caa635 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 7 Feb 2012 09:46:01 -0500 Subject: cpuidle/tracing: Denote the tracepoints as being in rcu_idle_exit() section As the tracepoints in the cpuidle code are called when rcu_idle_exit() is in effect, the _rcuidle() version must be used, otherwise the rcu_read_lock()s that protect the tracepoint will not be honored. Cc: Len Brown Reviewed-by: Josh Triplett Reviewed-by: Paul E. McKenney Signed-off-by: Steven Rostedt diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 59f4261..6588f43 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -94,13 +94,13 @@ int cpuidle_idle_call(void) target_state = &drv->states[next_state]; - trace_power_start(POWER_CSTATE, next_state, dev->cpu); - trace_cpu_idle(next_state, dev->cpu); + trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu); + trace_cpu_idle_rcuidle(next_state, dev->cpu); entered_state = target_state->enter(dev, drv, next_state); - trace_power_end(dev->cpu); - trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); + trace_power_end_rcuidle(dev->cpu); + trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); if (entered_state >= 0) { /* Update cpuidle counters */ -- cgit v0.10.2 From 1e42e83fde5537266c1d1e7fd8c010b3028d50fc Mon Sep 17 00:00:00 2001 From: Geunsik Lim Date: Wed, 8 Feb 2012 19:05:36 +0900 Subject: ftrace: sched_switch plugin is deprecated Actually, sched_switch function tracer is merged into wakeup/wakeup_rt Update 'mini-HOWTO' for ftrace(Kernel function tracer). If we want to trace "sched:sched_switch" to trace sched_switch func, We may utilize event option.(e.g: trace-cmd list -e | grep sched) This patch is based on Linux-3.3.rc2-SMP-PREEMPT Link: http://lkml.kernel.org/r/1328695537-15081-1-git-send-email-geunsik.lim@gmail.com Cc: Randy Dunlap Signed-off-by: Geunsik Lim Signed-off-by: Steven Rostedt diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index a3f1bc5..10d5503 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2764,12 +2764,12 @@ static const char readme_msg[] = "tracing mini-HOWTO:\n\n" "# mount -t debugfs nodev /sys/kernel/debug\n\n" "# cat /sys/kernel/debug/tracing/available_tracers\n" - "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n" + "wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n" "# cat /sys/kernel/debug/tracing/current_tracer\n" "nop\n" - "# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n" + "# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n" "# cat /sys/kernel/debug/tracing/current_tracer\n" - "sched_switch\n" + "wakeup\n" "# cat /sys/kernel/debug/tracing/trace_options\n" "noprint-parent nosym-offset nosym-addr noverbose\n" "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n" -- cgit v0.10.2 From cdfb0d30e943f36c8a074a26c3d168a05bdbb372 Mon Sep 17 00:00:00 2001 From: Geunsik Lim Date: Wed, 8 Feb 2012 19:05:37 +0900 Subject: ftrace: Append wakeup_rt description of ftrace doc Append and update the description about wakeup/wakeup_rt usage. Link: http://lkml.kernel.org/r/1328695537-15081-2-git-send-email-geunsik.lim@gmail.com Cc: Randy Dunlap Signed-off-by: Geunsik Lim Signed-off-by: Steven Rostedt diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt index 1ebc24c..6f51fed 100644 --- a/Documentation/trace/ftrace.txt +++ b/Documentation/trace/ftrace.txt @@ -226,6 +226,13 @@ Here is the list of current tracers that may be configured. Traces and records the max latency that it takes for the highest priority task to get scheduled after it has been woken up. + Traces all tasks as an average developer would expect. + + "wakeup_rt" + + Traces and records the max latency that it takes for just + RT tasks (as the current "wakeup" does). This is useful + for those interested in wake up timings of RT tasks. "hw-branch-tracer" -- cgit v0.10.2 From 95100358491abaa2e9a5483811370059bbca4645 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 24 Nov 2011 20:03:08 +0100 Subject: printk/tracing: Add console output tracing Add a printk.console trace point to record any printk messages into the trace, regardless of the current console loglevel. This can help correlate (existing) printk debugging with other tracing. Link: http://lkml.kernel.org/r/1322161388.5366.54.camel@jlt3.sipsolutions.net Acked-by: Frederic Weisbecker Cc: Christoph Hellwig Cc: Ingo Molnar Acked-by: Peter Zijlstra Acked-by: Thomas Gleixner Signed-off-by: Johannes Berg Signed-off-by: Steven Rostedt diff --git a/include/trace/events/printk.h b/include/trace/events/printk.h new file mode 100644 index 0000000..94ec79c --- /dev/null +++ b/include/trace/events/printk.h @@ -0,0 +1,41 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM printk + +#if !defined(_TRACE_PRINTK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PRINTK_H + +#include + +TRACE_EVENT_CONDITION(console, + TP_PROTO(const char *log_buf, unsigned start, unsigned end, + unsigned log_buf_len), + + TP_ARGS(log_buf, start, end, log_buf_len), + + TP_CONDITION(start != end), + + TP_STRUCT__entry( + __dynamic_array(char, msg, end - start + 1) + ), + + TP_fast_assign( + if ((start & (log_buf_len - 1)) > (end & (log_buf_len - 1))) { + memcpy(__get_dynamic_array(msg), + log_buf + (start & (log_buf_len - 1)), + log_buf_len - (start & (log_buf_len - 1))); + memcpy((char *)__get_dynamic_array(msg) + + log_buf_len - (start & (log_buf_len - 1)), + log_buf, end & (log_buf_len - 1)); + } else + memcpy(__get_dynamic_array(msg), + log_buf + (start & (log_buf_len - 1)), + end - start); + ((char *)__get_dynamic_array(msg))[end - start] = 0; + ), + + TP_printk("%s", __get_str(msg)) +); +#endif /* _TRACE_PRINTK_H */ + +/* This part must be outside protection */ +#include diff --git a/kernel/printk.c b/kernel/printk.c index 13c0a11..cb8a6bd 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -44,6 +44,9 @@ #include +#define CREATE_TRACE_POINTS +#include + /* * Architectures can override it: */ @@ -542,6 +545,8 @@ MODULE_PARM_DESC(ignore_loglevel, "ignore loglevel setting, to" static void _call_console_drivers(unsigned start, unsigned end, int msg_log_level) { + trace_console(&LOG_BUF(0), start, end, log_buf_len); + if ((msg_log_level < console_loglevel || ignore_loglevel) && console_drivers && start != end) { if ((start & LOG_BUF_MASK) > (end & LOG_BUF_MASK)) { -- cgit v0.10.2 From 47b0edcb599ea6eb9ef16d3a08932a0e01485293 Mon Sep 17 00:00:00 2001 From: Thomas Meyer Date: Tue, 29 Nov 2011 22:08:00 +0100 Subject: tracing/trivial: Use kcalloc instead of kzalloc to allocate array The advantage of kcalloc is, that will prevent integer overflows which could result from the multiplication of number of elements and size and it is also a bit nicer to read. The semantic patch that makes this change is available in https://lkml.org/lkml/2011/11/25/107 Link: http://lkml.kernel.org/r/1322600880.1534.347.camel@localhost.localdomain Signed-off-by: Thomas Meyer Signed-off-by: Steven Rostedt diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index e2e0597..d1499e9 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1129,7 +1129,7 @@ static struct ftrace_hash *alloc_ftrace_hash(int size_bits) return NULL; size = 1 << size_bits; - hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL); + hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); if (!hash->buckets) { kfree(hash); diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 24aee71..76afaee 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -685,7 +685,7 @@ find_event_field(struct ftrace_event_call *call, char *name) static int __alloc_pred_stack(struct pred_stack *stack, int n_preds) { - stack->preds = kzalloc(sizeof(*stack->preds)*(n_preds + 1), GFP_KERNEL); + stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL); if (!stack->preds) return -ENOMEM; stack->index = n_preds; @@ -826,8 +826,7 @@ static int __alloc_preds(struct event_filter *filter, int n_preds) if (filter->preds) __free_preds(filter); - filter->preds = - kzalloc(sizeof(*filter->preds) * n_preds, GFP_KERNEL); + filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL); if (!filter->preds) return -ENOMEM; @@ -1486,7 +1485,7 @@ static int fold_pred(struct filter_pred *preds, struct filter_pred *root) children = count_leafs(preds, &preds[root->left]); children += count_leafs(preds, &preds[root->right]); - root->ops = kzalloc(sizeof(*root->ops) * children, GFP_KERNEL); + root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL); if (!root->ops) return -ENOMEM; diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index cb65454..4350015 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -468,8 +468,8 @@ int __init init_ftrace_syscalls(void) unsigned long addr; int i; - syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * - NR_syscalls, GFP_KERNEL); + syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata), + GFP_KERNEL); if (!syscalls_metadata) { WARN_ON(1); return -ENOMEM; -- cgit v0.10.2 From 6a5c13aff49ac9b3fea38d5f84b436718cb2780d Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Sun, 12 Feb 2012 19:45:24 +0900 Subject: perf tools: Fix build dependency of perf python extension The perf python extention (perf.so) file lacks its dependencies in the Makefile so that it cannot be refreshed if one of source files it depends is changed. Fix it by putting them in a separate file and processing it in both of Makefile and setup.py. Reported-by: Arnaldo Carvalho de Melo Cc: David Ahern Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1329043524-12470-1-git-send-email-namhyung@gmail.com Signed-off-by: Namhyung Kim Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 64df5de..8359fa1 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -183,7 +183,10 @@ SCRIPT_SH += perf-archive.sh grep-libs = $(filter -l%,$(1)) strip-libs = $(filter-out -l%,$(1)) -$(OUTPUT)python/perf.so: $(PYRF_OBJS) +PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources) +PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py + +$(OUTPUT)python/perf.so: $(PYRF_OBJS) $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(QUIET_GEN)CFLAGS='$(BASIC_CFLAGS)' $(PYTHON_WORD) util/setup.py \ --quiet build_ext; \ mkdir -p $(OUTPUT)python && \ diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources new file mode 100644 index 0000000..ff606f4 --- /dev/null +++ b/tools/perf/util/python-ext-sources @@ -0,0 +1,17 @@ +# +# List of files needed by perf python extention +# +# Each source file must be placed on its own line so that it can be +# processed by Makefile and util/setup.py accordingly. +# + +util/python.c +util/ctype.c +util/evlist.c +util/evsel.c +util/cpumap.c +util/thread_map.c +util/util.c +util/xyarray.c +util/cgroup.c +util/debugfs.c diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py index 36d4c56..d0f9f29 100644 --- a/tools/perf/util/setup.py +++ b/tools/perf/util/setup.py @@ -24,11 +24,11 @@ cflags += getenv('CFLAGS', '').split() build_lib = getenv('PYTHON_EXTBUILD_LIB') build_tmp = getenv('PYTHON_EXTBUILD_TMP') +ext_sources = [f.strip() for f in file('util/python-ext-sources') + if len(f.strip()) > 0 and f[0] != '#'] + perf = Extension('perf', - sources = ['util/python.c', 'util/ctype.c', 'util/evlist.c', - 'util/evsel.c', 'util/cpumap.c', 'util/thread_map.c', - 'util/util.c', 'util/xyarray.c', 'util/cgroup.c', - 'util/debugfs.c'], + sources = ext_sources, include_dirs = ['util/include'], extra_compile_args = cflags, ) -- cgit v0.10.2 From eca1c3e3f937307331fd1fd5ee5205e57f2131ca Mon Sep 17 00:00:00 2001 From: David Ahern Date: Mon, 13 Feb 2012 07:57:37 -0700 Subject: perf tools: Fix out of tree compiles For latest tip/perf/core tree Compiles are failing on: GEN common-cmds.h make: *** No rule to make target `../../arch/x86/lib/memset_64.S', needed by `builtin-annotate.o'. Stop. Resolve by adding memset.* to the tar file. Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1329145057-26302-1-git-send-email-dsahern@gmail.com Signed-off-by: David Ahern Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index 1078c5f..5476bc0 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST @@ -9,6 +9,7 @@ lib/rbtree.c include/linux/swab.h arch/*/include/asm/unistd*.h arch/*/lib/memcpy*.S +arch/*/lib/memset*.S include/linux/poison.h include/linux/magic.h include/linux/hw_breakpoint.h -- cgit v0.10.2 From b52956c961be3a04182ae7b776623531601e0fb7 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Wed, 8 Feb 2012 09:32:52 -0700 Subject: perf tools: Allow multiple threads or processes in record, stat, top Allow a user to collect events for multiple threads or processes using a comma separated list. e.g., collect data on a VM and its vhost thread: perf top -p 21483,21485 perf stat -p 21483,21485 -ddd perf record -p 21483,21485 or monitoring vcpu threads perf top -t 21488,21489 perf stat -t 21488,21489 -ddd perf record -t 21488,21489 Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1328718772-16688-1-git-send-email-dsahern@gmail.com Signed-off-by: David Ahern Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index ff9a66e..a5766b4 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -52,11 +52,11 @@ OPTIONS -p:: --pid=:: - Record events on existing process ID. + Record events on existing process ID (comma separated list). -t:: --tid=:: - Record events on existing thread ID. + Record events on existing thread ID (comma separated list). -u:: --uid=:: diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index 8966b9a..2fa173b 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt @@ -35,11 +35,11 @@ OPTIONS child tasks do not inherit counters -p:: --pid=:: - stat events on existing process id + stat events on existing process id (comma separated list) -t:: --tid=:: - stat events on existing thread id + stat events on existing thread id (comma separated list) -a:: diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt index ab1454e..4a5680c 100644 --- a/tools/perf/Documentation/perf-top.txt +++ b/tools/perf/Documentation/perf-top.txt @@ -72,11 +72,11 @@ Default is to monitor all CPUS. -p :: --pid=:: - Profile events on existing Process ID. + Profile events on existing Process ID (comma separated list). -t :: --tid=:: - Profile events on existing thread ID. + Profile events on existing thread ID (comma separated list). -u:: --uid=:: diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index d6d1c6c..08ed24b 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -645,8 +645,6 @@ static const char * const record_usage[] = { */ static struct perf_record record = { .opts = { - .target_pid = -1, - .target_tid = -1, .mmap_pages = UINT_MAX, .user_freq = UINT_MAX, .user_interval = ULLONG_MAX, @@ -670,9 +668,9 @@ const struct option record_options[] = { parse_events_option), OPT_CALLBACK(0, "filter", &record.evlist, "filter", "event filter", parse_filter), - OPT_INTEGER('p', "pid", &record.opts.target_pid, + OPT_STRING('p', "pid", &record.opts.target_pid, "pid", "record events on existing process id"), - OPT_INTEGER('t', "tid", &record.opts.target_tid, + OPT_STRING('t', "tid", &record.opts.target_tid, "tid", "record events on existing thread id"), OPT_INTEGER('r', "realtime", &record.realtime_prio, "collect data with this RT SCHED_FIFO priority"), @@ -739,7 +737,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) argc = parse_options(argc, argv, record_options, record_usage, PARSE_OPT_STOP_AT_NON_OPTION); - if (!argc && rec->opts.target_pid == -1 && rec->opts.target_tid == -1 && + if (!argc && !rec->opts.target_pid && !rec->opts.target_tid && !rec->opts.system_wide && !rec->opts.cpu_list && !rec->uid_str) usage_with_options(record_usage, record_options); @@ -785,7 +783,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) if (rec->uid_str != NULL && rec->opts.uid == UINT_MAX - 1) goto out_free_fd; - if (rec->opts.target_pid != -1) + if (rec->opts.target_pid) rec->opts.target_tid = rec->opts.target_pid; if (perf_evlist__create_maps(evsel_list, rec->opts.target_pid, diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index d14b37a..ea40e4e 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -182,8 +182,8 @@ static int run_count = 1; static bool no_inherit = false; static bool scale = true; static bool no_aggr = false; -static pid_t target_pid = -1; -static pid_t target_tid = -1; +static const char *target_pid; +static const char *target_tid; static pid_t child_pid = -1; static bool null_run = false; static int detailed_run = 0; @@ -296,7 +296,7 @@ static int create_perf_stat_counter(struct perf_evsel *evsel, if (system_wide) return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, group, group_fd); - if (target_pid == -1 && target_tid == -1) { + if (!target_pid && !target_tid) { attr->disabled = 1; attr->enable_on_exec = 1; } @@ -446,7 +446,7 @@ static int run_perf_stat(int argc __used, const char **argv) exit(-1); } - if (target_tid == -1 && target_pid == -1 && !system_wide) + if (!target_tid && !target_pid && !system_wide) evsel_list->threads->map[0] = child_pid; /* @@ -968,14 +968,14 @@ static void print_stat(int argc, const char **argv) if (!csv_output) { fprintf(output, "\n"); fprintf(output, " Performance counter stats for "); - if(target_pid == -1 && target_tid == -1) { + if (!target_pid && !target_tid) { fprintf(output, "\'%s", argv[0]); for (i = 1; i < argc; i++) fprintf(output, " %s", argv[i]); - } else if (target_pid != -1) - fprintf(output, "process id \'%d", target_pid); + } else if (target_pid) + fprintf(output, "process id \'%s", target_pid); else - fprintf(output, "thread id \'%d", target_tid); + fprintf(output, "thread id \'%s", target_tid); fprintf(output, "\'"); if (run_count > 1) @@ -1049,10 +1049,10 @@ static const struct option options[] = { "event filter", parse_filter), OPT_BOOLEAN('i', "no-inherit", &no_inherit, "child tasks do not inherit counters"), - OPT_INTEGER('p', "pid", &target_pid, - "stat events on existing process id"), - OPT_INTEGER('t', "tid", &target_tid, - "stat events on existing thread id"), + OPT_STRING('p', "pid", &target_pid, "pid", + "stat events on existing process id"), + OPT_STRING('t', "tid", &target_tid, "tid", + "stat events on existing thread id"), OPT_BOOLEAN('a', "all-cpus", &system_wide, "system-wide collection from all CPUs"), OPT_BOOLEAN('g', "group", &group, @@ -1190,7 +1190,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) } else if (big_num_opt == 0) /* User passed --no-big-num */ big_num = false; - if (!argc && target_pid == -1 && target_tid == -1) + if (!argc && !target_pid && !target_tid) usage_with_options(stat_usage, options); if (run_count <= 0) usage_with_options(stat_usage, options); @@ -1206,10 +1206,11 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) if (add_default_attributes()) goto out; - if (target_pid != -1) + if (target_pid) target_tid = target_pid; - evsel_list->threads = thread_map__new(target_pid, target_tid, UINT_MAX); + evsel_list->threads = thread_map__new_str(target_pid, + target_tid, UINT_MAX); if (evsel_list->threads == NULL) { pr_err("Problems finding threads of monitor\n"); usage_with_options(stat_usage, options); diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c index 70c4eb2..0f15195 100644 --- a/tools/perf/builtin-test.c +++ b/tools/perf/builtin-test.c @@ -1010,8 +1010,6 @@ realloc: static int test__PERF_RECORD(void) { struct perf_record_opts opts = { - .target_pid = -1, - .target_tid = -1, .no_delay = true, .freq = 10, .mmap_pages = 256, diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index d869b21..94d55cb 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -965,7 +965,7 @@ static int __cmd_top(struct perf_top *top) if (ret) goto out_delete; - if (top->target_tid != -1 || top->uid != UINT_MAX) + if (top->target_tid || top->uid != UINT_MAX) perf_event__synthesize_thread_map(&top->tool, top->evlist->threads, perf_event__process, &top->session->host_machine); @@ -1103,8 +1103,6 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) struct perf_top top = { .count_filter = 5, .delay_secs = 2, - .target_pid = -1, - .target_tid = -1, .uid = UINT_MAX, .freq = 1000, /* 1 KHz */ .sample_id_all_avail = true, @@ -1118,9 +1116,9 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) parse_events_option), OPT_INTEGER('c', "count", &top.default_interval, "event period to sample"), - OPT_INTEGER('p', "pid", &top.target_pid, + OPT_STRING('p', "pid", &top.target_pid, "pid", "profile events on existing process id"), - OPT_INTEGER('t', "tid", &top.target_tid, + OPT_STRING('t', "tid", &top.target_tid, "tid", "profile events on existing thread id"), OPT_BOOLEAN('a', "all-cpus", &top.system_wide, "system-wide collection from all CPUs"), @@ -1210,13 +1208,13 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) goto out_delete_evlist; /* CPU and PID are mutually exclusive */ - if (top.target_tid > 0 && top.cpu_list) { + if (top.target_tid && top.cpu_list) { printf("WARNING: PID switch overriding CPU\n"); sleep(1); top.cpu_list = NULL; } - if (top.target_pid != -1) + if (top.target_pid) top.target_tid = top.target_pid; if (perf_evlist__create_maps(top.evlist, top.target_pid, diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 92af168..deb17db 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -186,8 +186,8 @@ extern const char perf_version_string[]; void pthread__unblock_sigwinch(void); struct perf_record_opts { - pid_t target_pid; - pid_t target_tid; + const char *target_pid; + const char *target_tid; uid_t uid; bool call_graph; bool group; diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index a57a8cf..5c61dc5 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -593,15 +593,15 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, return perf_evlist__mmap_per_cpu(evlist, prot, mask); } -int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, - pid_t target_tid, uid_t uid, const char *cpu_list) +int perf_evlist__create_maps(struct perf_evlist *evlist, const char *target_pid, + const char *target_tid, uid_t uid, const char *cpu_list) { - evlist->threads = thread_map__new(target_pid, target_tid, uid); + evlist->threads = thread_map__new_str(target_pid, target_tid, uid); if (evlist->threads == NULL) return -1; - if (uid != UINT_MAX || (cpu_list == NULL && target_tid != -1)) + if (uid != UINT_MAX || (cpu_list == NULL && target_tid)) evlist->cpus = cpu_map__dummy_new(); else evlist->cpus = cpu_map__new(cpu_list); @@ -820,7 +820,7 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist, exit(-1); } - if (!opts->system_wide && opts->target_tid == -1 && opts->target_pid == -1) + if (!opts->system_wide && !opts->target_tid && !opts->target_pid) evlist->threads->map[0] = evlist->workload.pid; close(child_ready_pipe[1]); diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 1b4282b..21f1c9e 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -106,8 +106,8 @@ static inline void perf_evlist__set_maps(struct perf_evlist *evlist, evlist->threads = threads; } -int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, - pid_t tid, uid_t uid, const char *cpu_list); +int perf_evlist__create_maps(struct perf_evlist *evlist, const char *target_pid, + const char *tid, uid_t uid, const char *cpu_list); void perf_evlist__delete_maps(struct perf_evlist *evlist); int perf_evlist__set_filters(struct perf_evlist *evlist); diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 9a11f9e..f910f50 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -130,7 +130,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts) attr->mmap = track; attr->comm = track; - if (opts->target_pid == -1 && opts->target_tid == -1 && !opts->system_wide) { + if (!opts->target_pid && !opts->target_tid && !opts->system_wide) { attr->disabled = 1; attr->enable_on_exec = 1; } diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources index ff606f4..2884e67 100644 --- a/tools/perf/util/python-ext-sources +++ b/tools/perf/util/python-ext-sources @@ -15,3 +15,5 @@ util/util.c util/xyarray.c util/cgroup.c util/debugfs.c +util/strlist.c +../../lib/rbtree.c diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c index 3d4b6c5..e15983c 100644 --- a/tools/perf/util/thread_map.c +++ b/tools/perf/util/thread_map.c @@ -6,6 +6,8 @@ #include #include #include +#include "strlist.h" +#include #include "thread_map.h" /* Skip "." and ".." directories */ @@ -152,6 +154,132 @@ struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid) return thread_map__new_by_tid(tid); } +static struct thread_map *thread_map__new_by_pid_str(const char *pid_str) +{ + struct thread_map *threads = NULL, *nt; + char name[256]; + int items, total_tasks = 0; + struct dirent **namelist = NULL; + int i, j = 0; + pid_t pid, prev_pid = INT_MAX; + char *end_ptr; + struct str_node *pos; + struct strlist *slist = strlist__new(false, pid_str); + + if (!slist) + return NULL; + + strlist__for_each(pos, slist) { + pid = strtol(pos->s, &end_ptr, 10); + + if (pid == INT_MIN || pid == INT_MAX || + (*end_ptr != '\0' && *end_ptr != ',')) + goto out_free_threads; + + if (pid == prev_pid) + continue; + + sprintf(name, "/proc/%d/task", pid); + items = scandir(name, &namelist, filter, NULL); + if (items <= 0) + goto out_free_threads; + + total_tasks += items; + nt = realloc(threads, (sizeof(*threads) + + sizeof(pid_t) * total_tasks)); + if (nt == NULL) + goto out_free_threads; + + threads = nt; + + if (threads) { + for (i = 0; i < items; i++) + threads->map[j++] = atoi(namelist[i]->d_name); + threads->nr = total_tasks; + } + + for (i = 0; i < items; i++) + free(namelist[i]); + free(namelist); + + if (!threads) + break; + } + +out: + strlist__delete(slist); + return threads; + +out_free_threads: + free(threads); + threads = NULL; + goto out; +} + +static struct thread_map *thread_map__new_by_tid_str(const char *tid_str) +{ + struct thread_map *threads = NULL, *nt; + int ntasks = 0; + pid_t tid, prev_tid = INT_MAX; + char *end_ptr; + struct str_node *pos; + struct strlist *slist; + + /* perf-stat expects threads to be generated even if tid not given */ + if (!tid_str) { + threads = malloc(sizeof(*threads) + sizeof(pid_t)); + if (threads != NULL) { + threads->map[1] = -1; + threads->nr = 1; + } + return threads; + } + + slist = strlist__new(false, tid_str); + if (!slist) + return NULL; + + strlist__for_each(pos, slist) { + tid = strtol(pos->s, &end_ptr, 10); + + if (tid == INT_MIN || tid == INT_MAX || + (*end_ptr != '\0' && *end_ptr != ',')) + goto out_free_threads; + + if (tid == prev_tid) + continue; + + ntasks++; + nt = realloc(threads, sizeof(*threads) + sizeof(pid_t) * ntasks); + + if (nt == NULL) + goto out_free_threads; + + threads = nt; + threads->map[ntasks - 1] = tid; + threads->nr = ntasks; + } +out: + return threads; + +out_free_threads: + free(threads); + threads = NULL; + goto out; +} + +struct thread_map *thread_map__new_str(const char *pid, const char *tid, + uid_t uid) +{ + if (pid) + return thread_map__new_by_pid_str(pid); + + if (!tid && uid != UINT_MAX) + return thread_map__new_by_uid(uid); + + return thread_map__new_by_tid_str(tid); +} + void thread_map__delete(struct thread_map *threads) { free(threads); diff --git a/tools/perf/util/thread_map.h b/tools/perf/util/thread_map.h index c75ddba..7da80f1 100644 --- a/tools/perf/util/thread_map.h +++ b/tools/perf/util/thread_map.h @@ -13,6 +13,10 @@ struct thread_map *thread_map__new_by_pid(pid_t pid); struct thread_map *thread_map__new_by_tid(pid_t tid); struct thread_map *thread_map__new_by_uid(uid_t uid); struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid); + +struct thread_map *thread_map__new_str(const char *pid, + const char *tid, uid_t uid); + void thread_map__delete(struct thread_map *threads); size_t thread_map__fprintf(struct thread_map *threads, FILE *fp); diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c index e4370ca..09fe579 100644 --- a/tools/perf/util/top.c +++ b/tools/perf/util/top.c @@ -69,11 +69,11 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) ret += SNPRINTF(bf + ret, size - ret, "], "); - if (top->target_pid != -1) - ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %d", + if (top->target_pid) + ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %s", top->target_pid); - else if (top->target_tid != -1) - ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %d", + else if (top->target_tid) + ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %s", top->target_tid); else if (top->uid_str != NULL) ret += SNPRINTF(bf + ret, size - ret, " (uid: %s", @@ -85,7 +85,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)", top->evlist->cpus->nr > 1 ? "s" : "", top->cpu_list); else { - if (top->target_tid != -1) + if (top->target_tid) ret += SNPRINTF(bf + ret, size - ret, ")"); else ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)", diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h index def3e53..49eb848 100644 --- a/tools/perf/util/top.h +++ b/tools/perf/util/top.h @@ -23,7 +23,7 @@ struct perf_top { u64 guest_us_samples, guest_kernel_samples; int print_entries, count_filter, delay_secs; int freq; - pid_t target_pid, target_tid; + const char *target_pid, *target_tid; uid_t uid; bool hide_kernel_symbols, hide_user_symbols, zero; bool system_wide; diff --git a/tools/perf/util/usage.c b/tools/perf/util/usage.c index d0c0139..52bb07c 100644 --- a/tools/perf/util/usage.c +++ b/tools/perf/util/usage.c @@ -83,7 +83,7 @@ void warning(const char *warn, ...) va_end(params); } -uid_t parse_target_uid(const char *str, pid_t tid, pid_t pid) +uid_t parse_target_uid(const char *str, const char *tid, const char *pid) { struct passwd pwd, *result; char buf[1024]; @@ -91,8 +91,8 @@ uid_t parse_target_uid(const char *str, pid_t tid, pid_t pid) if (str == NULL) return UINT_MAX; - /* CPU and PID are mutually exclusive */ - if (tid > 0 || pid > 0) { + /* UID and PID are mutually exclusive */ + if (tid || pid) { ui__warning("PID/TID switch overriding UID\n"); sleep(1); return UINT_MAX; diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index 232d17e..7917b09 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -245,7 +245,7 @@ struct perf_event_attr; void event_attr_init(struct perf_event_attr *attr); -uid_t parse_target_uid(const char *str, pid_t tid, pid_t pid); +uid_t parse_target_uid(const char *str, const char *tid, const char *pid); #define _STR(x) #x #define STR(x) _STR(x) -- cgit v0.10.2 From 0c095715b388d19d7a0b7e8eaceeceb018f5b3d1 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 10 Feb 2012 18:05:04 +0100 Subject: perf top: Don't process samples with no valid machine object The perf sample processing code relies on a valid machine object. Make sure that this path is only entered when such a object exists. A counter for samples where no machine object exits is also introduced to give the user a message about these samples. Reported-by: David Ahern Reported-by: Jason Wang Cc: David Ahern Cc: Ingo Molnar Cc: Jason Wang Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1328893505-4115-2-git-send-email-joerg.roedel@amd.com Signed-off-by: Joerg Roedel Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 94d55cb..5a88c0d 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -677,6 +677,12 @@ static void perf_event__process_sample(struct perf_tool *tool, return; } + if (!machine) { + pr_err("%u unprocessable samples recorded.", + top->session->hists.stats.nr_unprocessable_samples++); + return; + } + if (event->header.misc & PERF_RECORD_MISC_EXACT_IP) top->exact_samples++; diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 0d48613..48e5acd 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -32,6 +32,7 @@ struct events_stats { u32 nr_unknown_events; u32 nr_invalid_chains; u32 nr_unknown_id; + u32 nr_unprocessable_samples; }; enum hist_column { diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 552c1c5..9f833cf 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -796,6 +796,10 @@ static int perf_session_deliver_event(struct perf_session *session, ++session->hists.stats.nr_unknown_id; return -1; } + if (machine == NULL) { + ++session->hists.stats.nr_unprocessable_samples; + return -1; + } return tool->sample(tool, event, sample, evsel, machine); case PERF_RECORD_MMAP: return tool->mmap(tool, event, sample, machine); @@ -964,6 +968,12 @@ static void perf_session__warn_about_errors(const struct perf_session *session, session->hists.stats.nr_invalid_chains, session->hists.stats.nr_events[PERF_RECORD_SAMPLE]); } + + if (session->hists.stats.nr_unprocessable_samples != 0) { + ui__warning("%u unprocessable samples recorded.\n" + "Do you have a KVM guest running and not using 'perf kvm'?\n", + session->hists.stats.nr_unprocessable_samples); + } } #define session_done() (*(volatile int *)(&session_done)) -- cgit v0.10.2 From c4a7dca92bbb9881a5d678720f1d0c2153499749 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 10 Feb 2012 18:05:05 +0100 Subject: perf tools: Change perf_guest default back to false Setting perf_guest to true by default makes no sense because the perf subcommands can not setup guest symbol information and thus not process and guest samples. The only exception is perf-kvm which changes the perf_guest value on its own. So change the default for perf_guest back to false. Cc: David Ahern Cc: Ingo Molnar Cc: Jason Wang Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1328893505-4115-3-git-send-email-joerg.roedel@amd.com Signed-off-by: Joerg Roedel Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index 8131410..fb25d13 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c @@ -6,7 +6,7 @@ * XXX We need to find a better place for these things... */ bool perf_host = true; -bool perf_guest = true; +bool perf_guest = false; void event_attr_init(struct perf_event_attr *attr) { -- cgit v0.10.2 From 2cd13b0f7dd3a5df68ff4cd82af2e1f7bc2a43ab Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 10 Feb 2012 10:10:15 +0900 Subject: perf tools: Implement islower/isupper macro into util.h The util.h header provides various ctype macros but lacks those two. Add them. Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1328836217-9118-1-git-send-email-namhyung.kim@lge.com Signed-off-by: Namhyung Kim Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index 7917b09..0f99f39 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -199,6 +199,8 @@ static inline int has_extension(const char *filename, const char *ext) #undef isalpha #undef isprint #undef isalnum +#undef islower +#undef isupper #undef tolower #undef toupper @@ -219,6 +221,8 @@ extern unsigned char sane_ctype[256]; #define isalpha(x) sane_istest(x,GIT_ALPHA) #define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT) #define isprint(x) sane_istest(x,GIT_PRINT) +#define islower(x) (sane_istest(x,GIT_ALPHA) && sane_istest(x,0x20)) +#define isupper(x) (sane_istest(x,GIT_ALPHA) && !sane_istest(x,0x20)) #define tolower(x) sane_case((unsigned char)(x), 0x20) #define toupper(x) sane_case((unsigned char)(x), 0) -- cgit v0.10.2 From 3bd2b8d1095d41254d3bdc3d62622c7fd5e143b8 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 10 Feb 2012 10:10:16 +0900 Subject: perf tools: ctype.c only wants util.h The implementation of sane ctype macros only depends on symbols in util.h not cache.h. Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1328836217-9118-2-git-send-email-namhyung.kim@lge.com Signed-off-by: Namhyung Kim Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/ctype.c b/tools/perf/util/ctype.c index 3507362..aada3ac 100644 --- a/tools/perf/util/ctype.c +++ b/tools/perf/util/ctype.c @@ -3,7 +3,7 @@ * * No surprises, and works with signed and unsigned chars. */ -#include "cache.h" +#include "util.h" enum { S = GIT_SPACE, -- cgit v0.10.2 From e334c726ca774d346cb7c4db487e80953a202b58 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 10 Feb 2012 10:10:17 +0900 Subject: perf tools: Get rid of ctype.h in symbol.c The ctype.h in symbol.c was needed because of isupper(). However we now have it in util.h, it can be changed to use our implementation. Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1328836217-9118-3-git-send-email-namhyung.kim@lge.com Signed-off-by: Namhyung Kim Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index fc6e12f..5dd83c3 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1,4 +1,3 @@ -#include #include #include #include @@ -12,6 +11,7 @@ #include #include #include "build-id.h" +#include "util.h" #include "debug.h" #include "symbol.h" #include "strlist.h" -- cgit v0.10.2 From 2837609fefbfe8f1248bfce09d9f0b44d5eb713d Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 27 Jan 2012 15:34:21 +0100 Subject: perf tools: Remove unused functions from debugfs object Following debugfs object functions are not referenced within the code: int debugfs_valid_entry(const char *path); int debugfs_umount(void); int debugfs_write(const char *entry, const char *value); int debugfs_read(const char *entry, char *buffer, size_t size); void debugfs_force_cleanup(void); int debugfs_make_path(const char *element, char *buffer, int size); Removing them. Cc: Corey Ashford Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1327674868-10486-3-git-send-email-jolsa@redhat.com Signed-off-by: Jiri Olsa Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/debugfs.c b/tools/perf/util/debugfs.c index ffc35e7..dd8b193 100644 --- a/tools/perf/util/debugfs.c +++ b/tools/perf/util/debugfs.c @@ -15,32 +15,6 @@ static const char *debugfs_known_mountpoints[] = { 0, }; -/* use this to force a umount */ -void debugfs_force_cleanup(void) -{ - debugfs_find_mountpoint(); - debugfs_premounted = 0; - debugfs_umount(); -} - -/* construct a full path to a debugfs element */ -int debugfs_make_path(const char *element, char *buffer, int size) -{ - int len; - - if (strlen(debugfs_mountpoint) == 0) { - buffer[0] = '\0'; - return -1; - } - - len = strlen(debugfs_mountpoint) + strlen(element) + 1; - if (len >= size) - return len+1; - - snprintf(buffer, size-1, "%s/%s", debugfs_mountpoint, element); - return 0; -} - static int debugfs_found; /* find the path to the mounted debugfs */ @@ -97,17 +71,6 @@ int debugfs_valid_mountpoint(const char *debugfs) return 0; } - -int debugfs_valid_entry(const char *path) -{ - struct stat st; - - if (stat(path, &st)) - return -errno; - - return 0; -} - static void debugfs_set_tracing_events_path(const char *mountpoint) { snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s", @@ -149,107 +112,3 @@ void debugfs_set_path(const char *mountpoint) snprintf(debugfs_mountpoint, sizeof(debugfs_mountpoint), "%s", mountpoint); debugfs_set_tracing_events_path(mountpoint); } - -/* umount the debugfs */ - -int debugfs_umount(void) -{ - char umountcmd[128]; - int ret; - - /* if it was already mounted, leave it */ - if (debugfs_premounted) - return 0; - - /* make sure it's a valid mount point */ - ret = debugfs_valid_mountpoint(debugfs_mountpoint); - if (ret) - return ret; - - snprintf(umountcmd, sizeof(umountcmd), - "/bin/umount %s", debugfs_mountpoint); - return system(umountcmd); -} - -int debugfs_write(const char *entry, const char *value) -{ - char path[PATH_MAX + 1]; - int ret, count; - int fd; - - /* construct the path */ - snprintf(path, sizeof(path), "%s/%s", debugfs_mountpoint, entry); - - /* verify that it exists */ - ret = debugfs_valid_entry(path); - if (ret) - return ret; - - /* get how many chars we're going to write */ - count = strlen(value); - - /* open the debugfs entry */ - fd = open(path, O_RDWR); - if (fd < 0) - return -errno; - - while (count > 0) { - /* write it */ - ret = write(fd, value, count); - if (ret <= 0) { - if (ret == EAGAIN) - continue; - close(fd); - return -errno; - } - count -= ret; - } - - /* close it */ - close(fd); - - /* return success */ - return 0; -} - -/* - * read a debugfs entry - * returns the number of chars read or a negative errno - */ -int debugfs_read(const char *entry, char *buffer, size_t size) -{ - char path[PATH_MAX + 1]; - int ret; - int fd; - - /* construct the path */ - snprintf(path, sizeof(path), "%s/%s", debugfs_mountpoint, entry); - - /* verify that it exists */ - ret = debugfs_valid_entry(path); - if (ret) - return ret; - - /* open the debugfs entry */ - fd = open(path, O_RDONLY); - if (fd < 0) - return -errno; - - do { - /* read it */ - ret = read(fd, buffer, size); - if (ret == 0) { - close(fd); - return EOF; - } - } while (ret < 0 && errno == EAGAIN); - - /* close it */ - close(fd); - - /* make *sure* there's a null character at the end */ - buffer[ret] = '\0'; - - /* return the number of chars read */ - return ret; -} diff --git a/tools/perf/util/debugfs.h b/tools/perf/util/debugfs.h index 4a878f7..68f3e87 100644 --- a/tools/perf/util/debugfs.h +++ b/tools/perf/util/debugfs.h @@ -3,14 +3,8 @@ const char *debugfs_find_mountpoint(void); int debugfs_valid_mountpoint(const char *debugfs); -int debugfs_valid_entry(const char *path); char *debugfs_mount(const char *mountpoint); -int debugfs_umount(void); void debugfs_set_path(const char *mountpoint); -int debugfs_write(const char *entry, const char *value); -int debugfs_read(const char *entry, char *buffer, size_t size); -void debugfs_force_cleanup(void); -int debugfs_make_path(const char *element, char *buffer, int size); extern char debugfs_mountpoint[]; extern char tracing_events_path[]; -- cgit v0.10.2 From e90fda0635401225ca7c2343bea2f6d279347d10 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 27 Jan 2012 15:34:22 +0100 Subject: perf tools: Add sysfs mountpoint interface Adding sysfs object to provide sysfs mount information in the same way as debugfs object does. The object provides following function: sysfs_find_mountpoint which returns the sysfs mount mount. Cc: Corey Ashford Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1327674868-10486-4-git-send-email-jolsa@redhat.com Signed-off-by: Jiri Olsa Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 8359fa1..e011b50 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -259,6 +259,7 @@ LIB_H += util/callchain.h LIB_H += util/build-id.h LIB_H += util/debug.h LIB_H += util/debugfs.h +LIB_H += util/sysfs.h LIB_H += util/event.h LIB_H += util/evsel.h LIB_H += util/evlist.h @@ -305,6 +306,7 @@ LIB_OBJS += $(OUTPUT)util/build-id.o LIB_OBJS += $(OUTPUT)util/config.o LIB_OBJS += $(OUTPUT)util/ctype.o LIB_OBJS += $(OUTPUT)util/debugfs.o +LIB_OBJS += $(OUTPUT)util/sysfs.o LIB_OBJS += $(OUTPUT)util/environment.o LIB_OBJS += $(OUTPUT)util/event.o LIB_OBJS += $(OUTPUT)util/evlist.o diff --git a/tools/perf/util/sysfs.c b/tools/perf/util/sysfs.c new file mode 100644 index 0000000..48c6902 --- /dev/null +++ b/tools/perf/util/sysfs.c @@ -0,0 +1,60 @@ + +#include "util.h" +#include "sysfs.h" + +static const char * const sysfs_known_mountpoints[] = { + "/sys", + 0, +}; + +static int sysfs_found; +char sysfs_mountpoint[PATH_MAX]; + +static int sysfs_valid_mountpoint(const char *sysfs) +{ + struct statfs st_fs; + + if (statfs(sysfs, &st_fs) < 0) + return -ENOENT; + else if (st_fs.f_type != (long) SYSFS_MAGIC) + return -ENOENT; + + return 0; +} + +const char *sysfs_find_mountpoint(void) +{ + const char * const *ptr; + char type[100]; + FILE *fp; + + if (sysfs_found) + return (const char *) sysfs_mountpoint; + + ptr = sysfs_known_mountpoints; + while (*ptr) { + if (sysfs_valid_mountpoint(*ptr) == 0) { + sysfs_found = 1; + strcpy(sysfs_mountpoint, *ptr); + return sysfs_mountpoint; + } + ptr++; + } + + /* give up and parse /proc/mounts */ + fp = fopen("/proc/mounts", "r"); + if (fp == NULL) + return NULL; + + while (!sysfs_found && + fscanf(fp, "%*s %" STR(PATH_MAX) "s %99s %*s %*d %*d\n", + sysfs_mountpoint, type) == 2) { + + if (strcmp(type, "sysfs") == 0) + sysfs_found = 1; + } + + fclose(fp); + + return sysfs_found ? sysfs_mountpoint : NULL; +} diff --git a/tools/perf/util/sysfs.h b/tools/perf/util/sysfs.h new file mode 100644 index 0000000..a813b72 --- /dev/null +++ b/tools/perf/util/sysfs.h @@ -0,0 +1,6 @@ +#ifndef __SYSFS_H__ +#define __SYSFS_H__ + +const char *sysfs_find_mountpoint(void); + +#endif /* __DEBUGFS_H__ */ -- cgit v0.10.2 From 850f8127fa3666737881aecb3b16e8ede85e58f4 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 27 Jan 2012 15:34:23 +0100 Subject: perf tools: Add bitmap_or function into bitmap object Adding implementation os bitmap_or function to the bitmap object. It is stolen from the kernel lib/bitmap.o object. It is used in upcomming patches. Cc: Corey Ashford Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1327674868-10486-5-git-send-email-jolsa@redhat.com Signed-off-by: Jiri Olsa Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/bitmap.c b/tools/perf/util/bitmap.c index 5e230ac..0a1adc1 100644 --- a/tools/perf/util/bitmap.c +++ b/tools/perf/util/bitmap.c @@ -19,3 +19,13 @@ int __bitmap_weight(const unsigned long *bitmap, int bits) return w; } + +void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits) +{ + int k; + int nr = BITS_TO_LONGS(bits); + + for (k = 0; k < nr; k++) + dst[k] = bitmap1[k] | bitmap2[k]; +} diff --git a/tools/perf/util/include/linux/bitmap.h b/tools/perf/util/include/linux/bitmap.h index eda4416..bb162e4 100644 --- a/tools/perf/util/include/linux/bitmap.h +++ b/tools/perf/util/include/linux/bitmap.h @@ -5,6 +5,8 @@ #include int __bitmap_weight(const unsigned long *bitmap, int bits); +void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits); #define BITMAP_LAST_WORD_MASK(nbits) \ ( \ @@ -32,4 +34,13 @@ static inline int bitmap_weight(const unsigned long *src, int nbits) return __bitmap_weight(src, nbits); } +static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, int nbits) +{ + if (small_const_nbits(nbits)) + *dst = *src1 | *src2; + else + __bitmap_or(dst, src1, src2, nbits); +} + #endif /* _PERF_BITOPS_H */ -- cgit v0.10.2 From 08d95bd256277f914e525aeb292da52f15173e7d Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Fri, 10 Feb 2012 15:41:55 +0100 Subject: perf tools: Moving code in header.c Needed for later changes. No modified functionality. Cc: Ingo Molnar Link: http://lkml.kernel.org/r/1328884916-5901-1-git-send-email-robert.richter@amd.com Signed-off-by: Robert Richter Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 6f4187d..5bb2d75 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1316,6 +1316,156 @@ static void print_cpuid(struct perf_header *ph, int fd, FILE *fp) free(str); } +static int __event_process_build_id(struct build_id_event *bev, + char *filename, + struct perf_session *session) +{ + int err = -1; + struct list_head *head; + struct machine *machine; + u16 misc; + struct dso *dso; + enum dso_kernel_type dso_type; + + machine = perf_session__findnew_machine(session, bev->pid); + if (!machine) + goto out; + + misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; + + switch (misc) { + case PERF_RECORD_MISC_KERNEL: + dso_type = DSO_TYPE_KERNEL; + head = &machine->kernel_dsos; + break; + case PERF_RECORD_MISC_GUEST_KERNEL: + dso_type = DSO_TYPE_GUEST_KERNEL; + head = &machine->kernel_dsos; + break; + case PERF_RECORD_MISC_USER: + case PERF_RECORD_MISC_GUEST_USER: + dso_type = DSO_TYPE_USER; + head = &machine->user_dsos; + break; + default: + goto out; + } + + dso = __dsos__findnew(head, filename); + if (dso != NULL) { + char sbuild_id[BUILD_ID_SIZE * 2 + 1]; + + dso__set_build_id(dso, &bev->build_id); + + if (filename[0] == '[') + dso->kernel = dso_type; + + build_id__sprintf(dso->build_id, sizeof(dso->build_id), + sbuild_id); + pr_debug("build id event received for %s: %s\n", + dso->long_name, sbuild_id); + } + + err = 0; +out: + return err; +} + +static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, + int input, u64 offset, u64 size) +{ + struct perf_session *session = container_of(header, struct perf_session, header); + struct { + struct perf_event_header header; + u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))]; + char filename[0]; + } old_bev; + struct build_id_event bev; + char filename[PATH_MAX]; + u64 limit = offset + size; + + while (offset < limit) { + ssize_t len; + + if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) + return -1; + + if (header->needs_swap) + perf_event_header__bswap(&old_bev.header); + + len = old_bev.header.size - sizeof(old_bev); + if (read(input, filename, len) != len) + return -1; + + bev.header = old_bev.header; + + /* + * As the pid is the missing value, we need to fill + * it properly. The header.misc value give us nice hint. + */ + bev.pid = HOST_KERNEL_ID; + if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER || + bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL) + bev.pid = DEFAULT_GUEST_KERNEL_ID; + + memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); + __event_process_build_id(&bev, filename, session); + + offset += bev.header.size; + } + + return 0; +} + +static int perf_header__read_build_ids(struct perf_header *header, + int input, u64 offset, u64 size) +{ + struct perf_session *session = container_of(header, struct perf_session, header); + struct build_id_event bev; + char filename[PATH_MAX]; + u64 limit = offset + size, orig_offset = offset; + int err = -1; + + while (offset < limit) { + ssize_t len; + + if (read(input, &bev, sizeof(bev)) != sizeof(bev)) + goto out; + + if (header->needs_swap) + perf_event_header__bswap(&bev.header); + + len = bev.header.size - sizeof(bev); + if (read(input, filename, len) != len) + goto out; + /* + * The a1645ce1 changeset: + * + * "perf: 'perf kvm' tool for monitoring guest performance from host" + * + * Added a field to struct build_id_event that broke the file + * format. + * + * Since the kernel build-id is the first entry, process the + * table using the old format if the well known + * '[kernel.kallsyms]' string for the kernel build-id has the + * first 4 characters chopped off (where the pid_t sits). + */ + if (memcmp(filename, "nel.kallsyms]", 13) == 0) { + if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1) + return -1; + return perf_header__read_build_ids_abi_quirk(header, input, offset, size); + } + + __event_process_build_id(&bev, filename, session); + + offset += bev.header.size; + } + err = 0; +out: + return err; +} + struct feature_ops { int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist); void (*print)(struct perf_header *h, int fd, FILE *fp); @@ -1735,156 +1885,6 @@ int perf_file_header__read(struct perf_file_header *header, return 0; } -static int __event_process_build_id(struct build_id_event *bev, - char *filename, - struct perf_session *session) -{ - int err = -1; - struct list_head *head; - struct machine *machine; - u16 misc; - struct dso *dso; - enum dso_kernel_type dso_type; - - machine = perf_session__findnew_machine(session, bev->pid); - if (!machine) - goto out; - - misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - - switch (misc) { - case PERF_RECORD_MISC_KERNEL: - dso_type = DSO_TYPE_KERNEL; - head = &machine->kernel_dsos; - break; - case PERF_RECORD_MISC_GUEST_KERNEL: - dso_type = DSO_TYPE_GUEST_KERNEL; - head = &machine->kernel_dsos; - break; - case PERF_RECORD_MISC_USER: - case PERF_RECORD_MISC_GUEST_USER: - dso_type = DSO_TYPE_USER; - head = &machine->user_dsos; - break; - default: - goto out; - } - - dso = __dsos__findnew(head, filename); - if (dso != NULL) { - char sbuild_id[BUILD_ID_SIZE * 2 + 1]; - - dso__set_build_id(dso, &bev->build_id); - - if (filename[0] == '[') - dso->kernel = dso_type; - - build_id__sprintf(dso->build_id, sizeof(dso->build_id), - sbuild_id); - pr_debug("build id event received for %s: %s\n", - dso->long_name, sbuild_id); - } - - err = 0; -out: - return err; -} - -static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, - int input, u64 offset, u64 size) -{ - struct perf_session *session = container_of(header, struct perf_session, header); - struct { - struct perf_event_header header; - u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))]; - char filename[0]; - } old_bev; - struct build_id_event bev; - char filename[PATH_MAX]; - u64 limit = offset + size; - - while (offset < limit) { - ssize_t len; - - if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) - return -1; - - if (header->needs_swap) - perf_event_header__bswap(&old_bev.header); - - len = old_bev.header.size - sizeof(old_bev); - if (read(input, filename, len) != len) - return -1; - - bev.header = old_bev.header; - - /* - * As the pid is the missing value, we need to fill - * it properly. The header.misc value give us nice hint. - */ - bev.pid = HOST_KERNEL_ID; - if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER || - bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL) - bev.pid = DEFAULT_GUEST_KERNEL_ID; - - memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); - __event_process_build_id(&bev, filename, session); - - offset += bev.header.size; - } - - return 0; -} - -static int perf_header__read_build_ids(struct perf_header *header, - int input, u64 offset, u64 size) -{ - struct perf_session *session = container_of(header, struct perf_session, header); - struct build_id_event bev; - char filename[PATH_MAX]; - u64 limit = offset + size, orig_offset = offset; - int err = -1; - - while (offset < limit) { - ssize_t len; - - if (read(input, &bev, sizeof(bev)) != sizeof(bev)) - goto out; - - if (header->needs_swap) - perf_event_header__bswap(&bev.header); - - len = bev.header.size - sizeof(bev); - if (read(input, filename, len) != len) - goto out; - /* - * The a1645ce1 changeset: - * - * "perf: 'perf kvm' tool for monitoring guest performance from host" - * - * Added a field to struct build_id_event that broke the file - * format. - * - * Since the kernel build-id is the first entry, process the - * table using the old format if the well known - * '[kernel.kallsyms]' string for the kernel build-id has the - * first 4 characters chopped off (where the pid_t sits). - */ - if (memcmp(filename, "nel.kallsyms]", 13) == 0) { - if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1) - return -1; - return perf_header__read_build_ids_abi_quirk(header, input, offset, size); - } - - __event_process_build_id(&bev, filename, session); - - offset += bev.header.size; - } - err = 0; -out: - return err; -} - static int perf_file_section__process(struct perf_file_section *section, struct perf_header *ph, int feat, int fd, void *data __used) -- cgit v0.10.2 From f1c67db7e351bf9fd328e368ba045cbeca11d513 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Fri, 10 Feb 2012 15:41:56 +0100 Subject: perf tools: Factor out feature op to process header sections There is individual code for each feature to process header sections. Adding a function pointer .process to struct feature_ops for keeping the implementation in separate functions. Code to process header sections is now a generic function. Cc: Ingo Molnar Link: http://lkml.kernel.org/r/1328884916-5901-2-git-send-email-robert.richter@amd.com Signed-off-by: Robert Richter Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 5bb2d75..9f867d9 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1466,25 +1466,48 @@ out: return err; } +static int process_trace_info(struct perf_file_section *section __unused, + struct perf_header *ph __unused, + int feat __unused, int fd) +{ + trace_report(fd, false); + return 0; +} + +static int process_build_id(struct perf_file_section *section, + struct perf_header *ph, + int feat __unused, int fd) +{ + if (perf_header__read_build_ids(ph, fd, section->offset, section->size)) + pr_debug("Failed to read buildids, continuing...\n"); + return 0; +} + struct feature_ops { int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist); void (*print)(struct perf_header *h, int fd, FILE *fp); + int (*process)(struct perf_file_section *section, + struct perf_header *h, int feat, int fd); const char *name; bool full_only; }; #define FEAT_OPA(n, func) \ [n] = { .name = #n, .write = write_##func, .print = print_##func } +#define FEAT_OPP(n, func) \ + [n] = { .name = #n, .write = write_##func, .print = print_##func, \ + .process = process_##func } #define FEAT_OPF(n, func) \ - [n] = { .name = #n, .write = write_##func, .print = print_##func, .full_only = true } + [n] = { .name = #n, .write = write_##func, .print = print_##func, \ + .full_only = true } /* feature_ops not implemented: */ #define print_trace_info NULL #define print_build_id NULL static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { - FEAT_OPA(HEADER_TRACE_INFO, trace_info), - FEAT_OPA(HEADER_BUILD_ID, build_id), + FEAT_OPP(HEADER_TRACE_INFO, trace_info), + FEAT_OPP(HEADER_BUILD_ID, build_id), FEAT_OPA(HEADER_HOSTNAME, hostname), FEAT_OPA(HEADER_OSRELEASE, osrelease), FEAT_OPA(HEADER_VERSION, version), @@ -1900,19 +1923,10 @@ static int perf_file_section__process(struct perf_file_section *section, return 0; } - switch (feat) { - case HEADER_TRACE_INFO: - trace_report(fd, false); - break; - case HEADER_BUILD_ID: - if (perf_header__read_build_ids(ph, fd, section->offset, section->size)) - pr_debug("Failed to read buildids, continuing...\n"); - break; - default: - break; - } + if (!feat_ops[feat].process) + return 0; - return 0; + return feat_ops[feat].process(section, ph, feat, fd); } static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, -- cgit v0.10.2 From 7e1ccd3804281fc0755eb726b654469c40a96d89 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 9 Feb 2012 16:12:38 +0100 Subject: perf tools: cleanup initialization of attr->size The perf_event_attr size needs to be initialized in all cases because it captures the ABI version. This patch moves the initialization of the field from the perf_event_open() syscall stub to its proper location in the event_attr_init(). Cc: Ingo Molnar Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20120209151238.GA10272@quad Signed-off-by: Stephane Eranian Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/perf.h b/tools/perf/perf.h index deb17db..03a0456 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -167,7 +167,6 @@ sys_perf_event_open(struct perf_event_attr *attr, pid_t pid, int cpu, int group_fd, unsigned long flags) { - attr->size = sizeof(*attr); return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags); } diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index fb25d13..8109a90 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c @@ -14,6 +14,8 @@ void event_attr_init(struct perf_event_attr *attr) attr->exclude_host = 1; if (!perf_guest) attr->exclude_guest = 1; + /* to capture ABI version */ + attr->size = sizeof(*attr); } int mkdir_p(char *path, mode_t mode) -- cgit v0.10.2 From 0c9781280fb672ca09c997df3f14ba506bbdb977 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 14 Feb 2012 14:05:30 -0200 Subject: perf tools: Handle kernels that don't support attr.exclude_{guest,host} Just fall back to resetting those fields, if set, warning the user that that feature is not available. If guest samples appear they will just be discarded because no struct machine will be found and thus the event will be accounted as not handled and dropped, see 0c09571. Reported-by: Namhyung Kim Tested-by: Joerg Roedel Cc: David Ahern Cc: Frederic Weisbecker Cc: Joerg Roedel Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-vuwxig36mzprl5n7nzvnxxsh@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 08ed24b..d6c10e8 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -205,6 +205,9 @@ static void perf_record__open(struct perf_record *rec) if (opts->group && pos != first) group_fd = first->fd; +fallback_missing_features: + if (opts->exclude_guest_missing) + attr->exclude_guest = attr->exclude_host = 0; retry_sample_id: attr->sample_id_all = opts->sample_id_all_avail ? 1 : 0; try_again: @@ -218,15 +221,23 @@ try_again: } else if (err == ENODEV && opts->cpu_list) { die("No such device - did you specify" " an out-of-range profile CPU?\n"); - } else if (err == EINVAL && opts->sample_id_all_avail) { - /* - * Old kernel, no attr->sample_id_type_all field - */ - opts->sample_id_all_avail = false; - if (!opts->sample_time && !opts->raw_samples && !time_needed) - attr->sample_type &= ~PERF_SAMPLE_TIME; - - goto retry_sample_id; + } else if (err == EINVAL) { + if (!opts->exclude_guest_missing && + (attr->exclude_guest || attr->exclude_host)) { + pr_debug("Old kernel, cannot exclude " + "guest or host samples.\n"); + opts->exclude_guest_missing = true; + goto fallback_missing_features; + } else if (opts->sample_id_all_avail) { + /* + * Old kernel, no attr->sample_id_type_all field + */ + opts->sample_id_all_avail = false; + if (!opts->sample_time && !opts->raw_samples && !time_needed) + attr->sample_type &= ~PERF_SAMPLE_TIME; + + goto retry_sample_id; + } } /* diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 5a88c0d..02e11ff 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -872,6 +872,9 @@ static void perf_top__start_counters(struct perf_top *top) attr->mmap = 1; attr->comm = 1; attr->inherit = top->inherit; +fallback_missing_features: + if (top->exclude_guest_missing) + attr->exclude_guest = attr->exclude_host = 0; retry_sample_id: attr->sample_id_all = top->sample_id_all_avail ? 1 : 0; try_again: @@ -883,12 +886,20 @@ try_again: if (err == EPERM || err == EACCES) { ui__error_paranoid(); goto out_err; - } else if (err == EINVAL && top->sample_id_all_avail) { - /* - * Old kernel, no attr->sample_id_type_all field - */ - top->sample_id_all_avail = false; - goto retry_sample_id; + } else if (err == EINVAL) { + if (!top->exclude_guest_missing && + (attr->exclude_guest || attr->exclude_host)) { + pr_debug("Old kernel, cannot exclude " + "guest or host samples.\n"); + top->exclude_guest_missing = true; + goto fallback_missing_features; + } else if (top->sample_id_all_avail) { + /* + * Old kernel, no attr->sample_id_type_all field + */ + top->sample_id_all_avail = false; + goto retry_sample_id; + } } /* * If it's cycles then fall back to hrtimer diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 03a0456..8b9c436 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -199,6 +199,7 @@ struct perf_record_opts { bool sample_address; bool sample_time; bool sample_id_all_avail; + bool exclude_guest_missing; bool system_wide; bool period; unsigned int freq; diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h index 49eb848..7dea891 100644 --- a/tools/perf/util/top.h +++ b/tools/perf/util/top.h @@ -35,6 +35,7 @@ struct perf_top { bool inherit; bool group; bool sample_id_all_avail; + bool exclude_guest_missing; bool dump_symtab; const char *cpu_list; struct hist_entry *sym_filter_entry; -- cgit v0.10.2 From 808e122630d45a7f036d25582474d70548a87e2c Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 14 Feb 2012 14:18:57 -0200 Subject: perf tools: Invert the sample_id_all logic Instead of requiring that users of perf_record_opts set .sample_id_all_avail to true, just invert the logic, using .sample_id_all_missing, that doesn't need to be explicitely initialized since gcc will zero members ommitted in a struct initialization. Just like the newly introduced .exclude_{guest,host} feature test. Cc: David Ahern Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-ab772uzk78cwybihf0vt7kxw@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index d6c10e8..75d230f 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -209,7 +209,7 @@ fallback_missing_features: if (opts->exclude_guest_missing) attr->exclude_guest = attr->exclude_host = 0; retry_sample_id: - attr->sample_id_all = opts->sample_id_all_avail ? 1 : 0; + attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1; try_again: if (perf_evsel__open(pos, evlist->cpus, evlist->threads, opts->group, group_fd) < 0) { @@ -228,11 +228,11 @@ try_again: "guest or host samples.\n"); opts->exclude_guest_missing = true; goto fallback_missing_features; - } else if (opts->sample_id_all_avail) { + } else if (!opts->sample_id_all_missing) { /* * Old kernel, no attr->sample_id_type_all field */ - opts->sample_id_all_avail = false; + opts->sample_id_all_missing = true; if (!opts->sample_time && !opts->raw_samples && !time_needed) attr->sample_type &= ~PERF_SAMPLE_TIME; @@ -660,7 +660,6 @@ static struct perf_record record = { .user_freq = UINT_MAX, .user_interval = ULLONG_MAX, .freq = 1000, - .sample_id_all_avail = true, }, .write_mode = WRITE_FORCE, .file_new = true, diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c index 0f15195..3e087ce 100644 --- a/tools/perf/builtin-test.c +++ b/tools/perf/builtin-test.c @@ -1013,7 +1013,6 @@ static int test__PERF_RECORD(void) .no_delay = true, .freq = 10, .mmap_pages = 256, - .sample_id_all_avail = true, }; cpu_set_t *cpu_mask = NULL; size_t cpu_mask_size = 0; diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 02e11ff..e3c63ae 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -876,7 +876,7 @@ fallback_missing_features: if (top->exclude_guest_missing) attr->exclude_guest = attr->exclude_host = 0; retry_sample_id: - attr->sample_id_all = top->sample_id_all_avail ? 1 : 0; + attr->sample_id_all = top->sample_id_all_missing ? 0 : 1; try_again: if (perf_evsel__open(counter, top->evlist->cpus, top->evlist->threads, top->group, @@ -893,11 +893,11 @@ try_again: "guest or host samples.\n"); top->exclude_guest_missing = true; goto fallback_missing_features; - } else if (top->sample_id_all_avail) { + } else if (!top->sample_id_all_missing) { /* * Old kernel, no attr->sample_id_type_all field */ - top->sample_id_all_avail = false; + top->sample_id_all_missing = true; goto retry_sample_id; } } @@ -1122,7 +1122,6 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) .delay_secs = 2, .uid = UINT_MAX, .freq = 1000, /* 1 KHz */ - .sample_id_all_avail = true, .mmap_pages = 128, .sym_pcnt_filter = 5, }; diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 8b9c436..f0227e9 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -198,7 +198,7 @@ struct perf_record_opts { bool raw_samples; bool sample_address; bool sample_time; - bool sample_id_all_avail; + bool sample_id_all_missing; bool exclude_guest_missing; bool system_wide; bool period; diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index f910f50..302d49a 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -68,7 +68,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts) struct perf_event_attr *attr = &evsel->attr; int track = !evsel->idx; /* only the first counter needs these */ - attr->sample_id_all = opts->sample_id_all_avail ? 1 : 0; + attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1; attr->inherit = !opts->no_inherit; attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING | @@ -111,7 +111,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts) if (opts->period) attr->sample_type |= PERF_SAMPLE_PERIOD; - if (opts->sample_id_all_avail && + if (!opts->sample_id_all_missing && (opts->sample_time || opts->system_wide || !opts->no_inherit || opts->cpu_list)) attr->sample_type |= PERF_SAMPLE_TIME; diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h index 7dea891..ce61cb2 100644 --- a/tools/perf/util/top.h +++ b/tools/perf/util/top.h @@ -34,7 +34,7 @@ struct perf_top { bool vmlinux_warned; bool inherit; bool group; - bool sample_id_all_avail; + bool sample_id_all_missing; bool exclude_guest_missing; bool dump_symtab; const char *cpu_list; -- cgit v0.10.2 From a5a178e1ae0192e405830f1bba84548992124e88 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Sun, 4 Sep 2011 16:08:59 +0100 Subject: perf tools: Allow expressions in __print_symbolic() fields The __print_symbolic() function takes a sequence of key-value pairs for pretty-printing a constant. The new kvm:kvm_exit print fmt uses the expression: __print_symbolic(..., { 0x040 + 1, "DB excp" }, ...) Currently only atoms are supported and this print fmt fails to parse. This patch adds support for expressions instead of just atoms so that 0x040 + 1 is parsed successfully. Also add arg_num_eval() support for the '+' operator. Acked-by: Steven Rostedt Cc: Ingo Molnar Cc: Steven Rostedt Link: http://lkml.kernel.org/r/1315148939-14313-1-git-send-email-stefanha@linux.vnet.ibm.com Signed-off-by: Stefan Hajnoczi Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index e0a4f65..a4088ce 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -1423,6 +1423,11 @@ static long long arg_num_eval(struct print_arg *arg) die("unknown op '%s'", arg->op.op); } break; + case '+': + left = arg_num_eval(arg->op.left); + right = arg_num_eval(arg->op.right); + val = left + right; + break; default: die("unknown op '%s'", arg->op.op); } @@ -1483,6 +1488,13 @@ process_fields(struct event *event, struct print_flag_sym **list, char **tok) free_token(token); type = process_arg(event, arg, &token); + + if (type == EVENT_OP) + type = process_op(event, arg, &token); + + if (type == EVENT_ERROR) + goto out_free; + if (test_type_token(type, token, EVENT_DELIM, ",")) goto out_free; -- cgit v0.10.2 From dfd3b1e3e8a580bcbf854d46ae0f22c8333e82d8 Mon Sep 17 00:00:00 2001 From: Danny Kukawka Date: Thu, 16 Feb 2012 14:54:19 +0100 Subject: perf tools: Remove duplicated string.h includes tools/perf/util/probe-event.c included 'string.h' twice, remove the duplicate. Acked-by: Masami Hiramatsu Cc: Danny Kukawka Cc: Ingo Molnar Cc: Jovi Zhang Cc: Masami Hiramatsu Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Steven Rostedt Link: http://lkml.kernel.org/r/1329400459-31570-1-git-send-email-danny.kukawka@bisect.de Signed-off-by: Danny Kukawka Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index c1a513e..8379252 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -34,7 +34,6 @@ #include "util.h" #include "event.h" -#include "string.h" #include "strlist.h" #include "debug.h" #include "cache.h" -- cgit v0.10.2 From e404b321dbb2d6e438522b7dce9c1d0c6a8c5275 Mon Sep 17 00:00:00 2001 From: Andrey Vagin Date: Sun, 19 Feb 2012 14:16:07 +0300 Subject: tracing: Don't print an extra separator of flags If __print_flags() is used after another __print_*() function, the temp seq_file buffer will not be empty on entry, and the delimiter will be printed even though there's just one field. We get something like: |S instead of just: S This is because the length of the temp seq buffer is used to determine if the delimiter is printed or not. But this algorithm fails when the seq buffer is not empty on entry, and the delimiter will be printed because it thinks that a previous field was already printed. Link: http://lkml.kernel.org/r/1329650167-480655-1-git-send-email-avagin@openvz.org Signed-off-by: Andrew Vagin Cc: Ingo Molnar Cc: Frederic Weisbecker Signed-off-by: Steven Rostedt diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 0d6ff35..3efd718 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -300,7 +300,7 @@ ftrace_print_flags_seq(struct trace_seq *p, const char *delim, unsigned long mask; const char *str; const char *ret = p->buffer + p->len; - int i; + int i, first = 1; for (i = 0; flag_array[i].name && flags; i++) { @@ -310,8 +310,10 @@ ftrace_print_flags_seq(struct trace_seq *p, const char *delim, str = flag_array[i].name; flags &= ~mask; - if (p->len && delim) + if (!first && delim) trace_seq_puts(p, delim); + else + first = 0; trace_seq_puts(p, str); } -- cgit v0.10.2 From 5b34926114e39e12005031269613d2b13194aeba Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 20 Feb 2012 20:37:32 -0500 Subject: tracing: Don't use p->len field to determine output in __print_*() functions If more than one __print_*() function is used in a tracepoint (__print_flags(), __print_symbols(), etc), then the temp seq buffer will not be zero on entry. Using the temp seq buffer's length to know if data has been printed or not in the current function is incorrect and may produce incorrect results. Currently, no in-tree tracepoint causes this bug, but new ones may be created. Cc: Andrew Vagin Signed-off-by: Steven Rostedt diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 3efd718..c5a0187 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -319,7 +319,7 @@ ftrace_print_flags_seq(struct trace_seq *p, const char *delim, /* check for left over flags */ if (flags) { - if (p->len && delim) + if (!first && delim) trace_seq_puts(p, delim); trace_seq_printf(p, "0x%lx", flags); } @@ -346,7 +346,7 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, break; } - if (!p->len) + if (ret == (const char *)(p->buffer + p->len)) trace_seq_printf(p, "0x%lx", val); trace_seq_putc(p, 0); @@ -372,7 +372,7 @@ ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, break; } - if (!p->len) + if (ret == (const char *)(p->buffer + p->len)) trace_seq_printf(p, "0x%llx", val); trace_seq_putc(p, 0); -- cgit v0.10.2 From e248491ac283b516958ca9ab62c8e74b6718bca8 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 15 Feb 2012 15:51:48 +0100 Subject: ftrace: Add enable/disable ftrace_ops control interface Adding a way to temporarily enable/disable ftrace_ops. The change follows the same way as 'global' ftrace_ops are done. Introducing 2 global ftrace_ops - control_ops and ftrace_control_list which take over all ftrace_ops registered with FTRACE_OPS_FL_CONTROL flag. In addition new per cpu flag called 'disabled' is also added to ftrace_ops to provide the control information for each cpu. When ftrace_ops with FTRACE_OPS_FL_CONTROL is registered, it is set as disabled for all cpus. The ftrace_control_list contains all the registered 'control' ftrace_ops. The control_ops provides function which iterates ftrace_control_list and does the check for 'disabled' flag on current cpu. Adding 3 inline functions: ftrace_function_local_disable/ftrace_function_local_enable - enable/disable the ftrace_ops on current cpu ftrace_function_local_disabled - get disabled ftrace_ops::disabled value for current cpu Link: http://lkml.kernel.org/r/1329317514-8131-2-git-send-email-jolsa@redhat.com Acked-by: Frederic Weisbecker Signed-off-by: Jiri Olsa Signed-off-by: Steven Rostedt diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index f33fb3b..64a309d 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -31,16 +31,33 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); +/* + * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are + * set in the flags member. + * + * ENABLED - set/unset when ftrace_ops is registered/unregistered + * GLOBAL - set manualy by ftrace_ops user to denote the ftrace_ops + * is part of the global tracers sharing the same filter + * via set_ftrace_* debugfs files. + * DYNAMIC - set when ftrace_ops is registered to denote dynamically + * allocated ftrace_ops which need special care + * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops + * could be controled by following calls: + * ftrace_function_local_enable + * ftrace_function_local_disable + */ enum { FTRACE_OPS_FL_ENABLED = 1 << 0, FTRACE_OPS_FL_GLOBAL = 1 << 1, FTRACE_OPS_FL_DYNAMIC = 1 << 2, + FTRACE_OPS_FL_CONTROL = 1 << 3, }; struct ftrace_ops { ftrace_func_t func; struct ftrace_ops *next; unsigned long flags; + int __percpu *disabled; #ifdef CONFIG_DYNAMIC_FTRACE struct ftrace_hash *notrace_hash; struct ftrace_hash *filter_hash; @@ -97,6 +114,55 @@ int register_ftrace_function(struct ftrace_ops *ops); int unregister_ftrace_function(struct ftrace_ops *ops); void clear_ftrace_function(void); +/** + * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu + * + * This function enables tracing on current cpu by decreasing + * the per cpu control variable. + * It must be called with preemption disabled and only on ftrace_ops + * registered with FTRACE_OPS_FL_CONTROL. If called without preemption + * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. + */ +static inline void ftrace_function_local_enable(struct ftrace_ops *ops) +{ + if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) + return; + + (*this_cpu_ptr(ops->disabled))--; +} + +/** + * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu + * + * This function enables tracing on current cpu by decreasing + * the per cpu control variable. + * It must be called with preemption disabled and only on ftrace_ops + * registered with FTRACE_OPS_FL_CONTROL. If called without preemption + * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. + */ +static inline void ftrace_function_local_disable(struct ftrace_ops *ops) +{ + if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) + return; + + (*this_cpu_ptr(ops->disabled))++; +} + +/** + * ftrace_function_local_disabled - returns ftrace_ops disabled value + * on current cpu + * + * This function returns value of ftrace_ops::disabled on current cpu. + * It must be called with preemption disabled and only on ftrace_ops + * registered with FTRACE_OPS_FL_CONTROL. If called without preemption + * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. + */ +static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) +{ + WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)); + return *this_cpu_ptr(ops->disabled); +} + extern void ftrace_stub(unsigned long a0, unsigned long a1); #else /* !CONFIG_FUNCTION_TRACER */ diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index d1499e9..f615f97 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -62,6 +62,8 @@ #define FTRACE_HASH_DEFAULT_BITS 10 #define FTRACE_HASH_MAX_BITS 12 +#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL) + /* ftrace_enabled is a method to turn ftrace on or off */ int ftrace_enabled __read_mostly; static int last_ftrace_enabled; @@ -89,12 +91,14 @@ static struct ftrace_ops ftrace_list_end __read_mostly = { }; static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; +static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub; ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; static struct ftrace_ops global_ops; +static struct ftrace_ops control_ops; static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); @@ -168,6 +172,32 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) } #endif +static void control_ops_disable_all(struct ftrace_ops *ops) +{ + int cpu; + + for_each_possible_cpu(cpu) + *per_cpu_ptr(ops->disabled, cpu) = 1; +} + +static int control_ops_alloc(struct ftrace_ops *ops) +{ + int __percpu *disabled; + + disabled = alloc_percpu(int); + if (!disabled) + return -ENOMEM; + + ops->disabled = disabled; + control_ops_disable_all(ops); + return 0; +} + +static void control_ops_free(struct ftrace_ops *ops) +{ + free_percpu(ops->disabled); +} + static void update_global_ops(void) { ftrace_func_t func; @@ -259,6 +289,26 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) return 0; } +static void add_ftrace_list_ops(struct ftrace_ops **list, + struct ftrace_ops *main_ops, + struct ftrace_ops *ops) +{ + int first = *list == &ftrace_list_end; + add_ftrace_ops(list, ops); + if (first) + add_ftrace_ops(&ftrace_ops_list, main_ops); +} + +static int remove_ftrace_list_ops(struct ftrace_ops **list, + struct ftrace_ops *main_ops, + struct ftrace_ops *ops) +{ + int ret = remove_ftrace_ops(list, ops); + if (!ret && *list == &ftrace_list_end) + ret = remove_ftrace_ops(&ftrace_ops_list, main_ops); + return ret; +} + static int __register_ftrace_function(struct ftrace_ops *ops) { if (ftrace_disabled) @@ -270,15 +320,20 @@ static int __register_ftrace_function(struct ftrace_ops *ops) if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) return -EBUSY; + /* We don't support both control and global flags set. */ + if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK) + return -EINVAL; + if (!core_kernel_data((unsigned long)ops)) ops->flags |= FTRACE_OPS_FL_DYNAMIC; if (ops->flags & FTRACE_OPS_FL_GLOBAL) { - int first = ftrace_global_list == &ftrace_list_end; - add_ftrace_ops(&ftrace_global_list, ops); + add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops); ops->flags |= FTRACE_OPS_FL_ENABLED; - if (first) - add_ftrace_ops(&ftrace_ops_list, &global_ops); + } else if (ops->flags & FTRACE_OPS_FL_CONTROL) { + if (control_ops_alloc(ops)) + return -ENOMEM; + add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops); } else add_ftrace_ops(&ftrace_ops_list, ops); @@ -302,11 +357,23 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) return -EINVAL; if (ops->flags & FTRACE_OPS_FL_GLOBAL) { - ret = remove_ftrace_ops(&ftrace_global_list, ops); - if (!ret && ftrace_global_list == &ftrace_list_end) - ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops); + ret = remove_ftrace_list_ops(&ftrace_global_list, + &global_ops, ops); if (!ret) ops->flags &= ~FTRACE_OPS_FL_ENABLED; + } else if (ops->flags & FTRACE_OPS_FL_CONTROL) { + ret = remove_ftrace_list_ops(&ftrace_control_list, + &control_ops, ops); + if (!ret) { + /* + * The ftrace_ops is now removed from the list, + * so there'll be no new users. We must ensure + * all current users are done before we free + * the control data. + */ + synchronize_sched(); + control_ops_free(ops); + } } else ret = remove_ftrace_ops(&ftrace_ops_list, ops); @@ -3874,6 +3941,36 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) #endif /* CONFIG_DYNAMIC_FTRACE */ static void +ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip) +{ + struct ftrace_ops *op; + + if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT))) + return; + + /* + * Some of the ops may be dynamically allocated, + * they must be freed after a synchronize_sched(). + */ + preempt_disable_notrace(); + trace_recursion_set(TRACE_CONTROL_BIT); + op = rcu_dereference_raw(ftrace_control_list); + while (op != &ftrace_list_end) { + if (!ftrace_function_local_disabled(op) && + ftrace_ops_test(op, ip)) + op->func(ip, parent_ip); + + op = rcu_dereference_raw(op->next); + }; + trace_recursion_clear(TRACE_CONTROL_BIT); + preempt_enable_notrace(); +} + +static struct ftrace_ops control_ops = { + .func = ftrace_ops_control_func, +}; + +static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) { struct ftrace_ops *op; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index b93ecba..55c6ea0 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -288,6 +288,8 @@ struct tracer { /* for function tracing recursion */ #define TRACE_INTERNAL_BIT (1<<11) #define TRACE_GLOBAL_BIT (1<<12) +#define TRACE_CONTROL_BIT (1<<13) + /* * Abuse of the trace_recursion. * As we need a way to maintain state if we are tracing the function -- cgit v0.10.2 From ceec0b6fc7cd43b38a40c2d40223f9cd0616f0cd Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 15 Feb 2012 15:51:49 +0100 Subject: ftrace, perf: Add open/close tracepoint perf registration actions Adding TRACE_REG_PERF_OPEN and TRACE_REG_PERF_CLOSE to differentiate register/unregister from open/close actions. The register/unregister actions are invoked for the first/last tracepoint user when opening/closing the event. The open/close actions are invoked for each tracepoint user when opening/closing the event. Link: http://lkml.kernel.org/r/1329317514-8131-3-git-send-email-jolsa@redhat.com Acked-by: Frederic Weisbecker Signed-off-by: Jiri Olsa Signed-off-by: Steven Rostedt diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index c3da42d..195e360 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -146,6 +146,8 @@ enum trace_reg { TRACE_REG_UNREGISTER, TRACE_REG_PERF_REGISTER, TRACE_REG_PERF_UNREGISTER, + TRACE_REG_PERF_OPEN, + TRACE_REG_PERF_CLOSE, }; struct ftrace_event_call; @@ -157,7 +159,7 @@ struct ftrace_event_class { void *perf_probe; #endif int (*reg)(struct ftrace_event_call *event, - enum trace_reg type); + enum trace_reg type, void *data); int (*define_fields)(struct ftrace_event_call *); struct list_head *(*get_fields)(struct ftrace_event_call *); struct list_head fields; @@ -165,7 +167,7 @@ struct ftrace_event_class { }; extern int ftrace_event_reg(struct ftrace_event_call *event, - enum trace_reg type); + enum trace_reg type, void *data); enum { TRACE_EVENT_FL_ENABLED_BIT, diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 19a359d..0cfcc37 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -44,23 +44,17 @@ static int perf_trace_event_perm(struct ftrace_event_call *tp_event, return 0; } -static int perf_trace_event_init(struct ftrace_event_call *tp_event, - struct perf_event *p_event) +static int perf_trace_event_reg(struct ftrace_event_call *tp_event, + struct perf_event *p_event) { struct hlist_head __percpu *list; - int ret; + int ret = -ENOMEM; int cpu; - ret = perf_trace_event_perm(tp_event, p_event); - if (ret) - return ret; - p_event->tp_event = tp_event; if (tp_event->perf_refcount++ > 0) return 0; - ret = -ENOMEM; - list = alloc_percpu(struct hlist_head); if (!list) goto fail; @@ -83,7 +77,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event, } } - ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER); + ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL); if (ret) goto fail; @@ -108,6 +102,69 @@ fail: return ret; } +static void perf_trace_event_unreg(struct perf_event *p_event) +{ + struct ftrace_event_call *tp_event = p_event->tp_event; + int i; + + if (--tp_event->perf_refcount > 0) + goto out; + + tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL); + + /* + * Ensure our callback won't be called anymore. The buffers + * will be freed after that. + */ + tracepoint_synchronize_unregister(); + + free_percpu(tp_event->perf_events); + tp_event->perf_events = NULL; + + if (!--total_ref_count) { + for (i = 0; i < PERF_NR_CONTEXTS; i++) { + free_percpu(perf_trace_buf[i]); + perf_trace_buf[i] = NULL; + } + } +out: + module_put(tp_event->mod); +} + +static int perf_trace_event_open(struct perf_event *p_event) +{ + struct ftrace_event_call *tp_event = p_event->tp_event; + return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event); +} + +static void perf_trace_event_close(struct perf_event *p_event) +{ + struct ftrace_event_call *tp_event = p_event->tp_event; + tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event); +} + +static int perf_trace_event_init(struct ftrace_event_call *tp_event, + struct perf_event *p_event) +{ + int ret; + + ret = perf_trace_event_perm(tp_event, p_event); + if (ret) + return ret; + + ret = perf_trace_event_reg(tp_event, p_event); + if (ret) + return ret; + + ret = perf_trace_event_open(p_event); + if (ret) { + perf_trace_event_unreg(p_event); + return ret; + } + + return 0; +} + int perf_trace_init(struct perf_event *p_event) { struct ftrace_event_call *tp_event; @@ -130,6 +187,14 @@ int perf_trace_init(struct perf_event *p_event) return ret; } +void perf_trace_destroy(struct perf_event *p_event) +{ + mutex_lock(&event_mutex); + perf_trace_event_close(p_event); + perf_trace_event_unreg(p_event); + mutex_unlock(&event_mutex); +} + int perf_trace_add(struct perf_event *p_event, int flags) { struct ftrace_event_call *tp_event = p_event->tp_event; @@ -154,37 +219,6 @@ void perf_trace_del(struct perf_event *p_event, int flags) hlist_del_rcu(&p_event->hlist_entry); } -void perf_trace_destroy(struct perf_event *p_event) -{ - struct ftrace_event_call *tp_event = p_event->tp_event; - int i; - - mutex_lock(&event_mutex); - if (--tp_event->perf_refcount > 0) - goto out; - - tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER); - - /* - * Ensure our callback won't be called anymore. The buffers - * will be freed after that. - */ - tracepoint_synchronize_unregister(); - - free_percpu(tp_event->perf_events); - tp_event->perf_events = NULL; - - if (!--total_ref_count) { - for (i = 0; i < PERF_NR_CONTEXTS; i++) { - free_percpu(perf_trace_buf[i]); - perf_trace_buf[i] = NULL; - } - } -out: - module_put(tp_event->mod); - mutex_unlock(&event_mutex); -} - __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, struct pt_regs *regs, int *rctxp) { diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index c212a7f..5138fea 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -147,7 +147,8 @@ int trace_event_raw_init(struct ftrace_event_call *call) } EXPORT_SYMBOL_GPL(trace_event_raw_init); -int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type) +int ftrace_event_reg(struct ftrace_event_call *call, + enum trace_reg type, void *data) { switch (type) { case TRACE_REG_REGISTER: @@ -170,6 +171,9 @@ int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type) call->class->perf_probe, call); return 0; + case TRACE_REG_PERF_OPEN: + case TRACE_REG_PERF_CLOSE: + return 0; #endif } return 0; @@ -209,7 +213,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call, tracing_stop_cmdline_record(); call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD; } - call->class->reg(call, TRACE_REG_UNREGISTER); + call->class->reg(call, TRACE_REG_UNREGISTER, NULL); } break; case 1: @@ -218,7 +222,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call, tracing_start_cmdline_record(); call->flags |= TRACE_EVENT_FL_RECORDED_CMD; } - ret = call->class->reg(call, TRACE_REG_REGISTER); + ret = call->class->reg(call, TRACE_REG_REGISTER, NULL); if (ret) { tracing_stop_cmdline_record(); pr_info("event trace: Could not enable event " diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 00d527c..5667f89 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1892,7 +1892,8 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, #endif /* CONFIG_PERF_EVENTS */ static __kprobes -int kprobe_register(struct ftrace_event_call *event, enum trace_reg type) +int kprobe_register(struct ftrace_event_call *event, + enum trace_reg type, void *data) { struct trace_probe *tp = (struct trace_probe *)event->data; @@ -1909,6 +1910,9 @@ int kprobe_register(struct ftrace_event_call *event, enum trace_reg type) case TRACE_REG_PERF_UNREGISTER: disable_trace_probe(tp, TP_FLAG_PROFILE); return 0; + case TRACE_REG_PERF_OPEN: + case TRACE_REG_PERF_CLOSE: + return 0; #endif } return 0; diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 4350015..e23515f 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -17,9 +17,9 @@ static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); static int syscall_enter_register(struct ftrace_event_call *event, - enum trace_reg type); + enum trace_reg type, void *data); static int syscall_exit_register(struct ftrace_event_call *event, - enum trace_reg type); + enum trace_reg type, void *data); static int syscall_enter_define_fields(struct ftrace_event_call *call); static int syscall_exit_define_fields(struct ftrace_event_call *call); @@ -649,7 +649,7 @@ void perf_sysexit_disable(struct ftrace_event_call *call) #endif /* CONFIG_PERF_EVENTS */ static int syscall_enter_register(struct ftrace_event_call *event, - enum trace_reg type) + enum trace_reg type, void *data) { switch (type) { case TRACE_REG_REGISTER: @@ -664,13 +664,16 @@ static int syscall_enter_register(struct ftrace_event_call *event, case TRACE_REG_PERF_UNREGISTER: perf_sysenter_disable(event); return 0; + case TRACE_REG_PERF_OPEN: + case TRACE_REG_PERF_CLOSE: + return 0; #endif } return 0; } static int syscall_exit_register(struct ftrace_event_call *event, - enum trace_reg type) + enum trace_reg type, void *data) { switch (type) { case TRACE_REG_REGISTER: @@ -685,6 +688,9 @@ static int syscall_exit_register(struct ftrace_event_call *event, case TRACE_REG_PERF_UNREGISTER: perf_sysexit_disable(event); return 0; + case TRACE_REG_PERF_OPEN: + case TRACE_REG_PERF_CLOSE: + return 0; #endif } return 0; -- cgit v0.10.2 From 489c75c3b333dfda4c8d2b7ad1b00e5da024bfa7 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 15 Feb 2012 15:51:50 +0100 Subject: ftrace, perf: Add add/del tracepoint perf registration actions Adding TRACE_REG_PERF_ADD and TRACE_REG_PERF_DEL to handle perf event schedule in/out actions. The add action is invoked for when the perf event is scheduled in, while the del action is invoked when the event is scheduled out. Link: http://lkml.kernel.org/r/1329317514-8131-4-git-send-email-jolsa@redhat.com Acked-by: Frederic Weisbecker Signed-off-by: Jiri Olsa Signed-off-by: Steven Rostedt diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 195e360..2bf677c 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -148,6 +148,8 @@ enum trace_reg { TRACE_REG_PERF_UNREGISTER, TRACE_REG_PERF_OPEN, TRACE_REG_PERF_CLOSE, + TRACE_REG_PERF_ADD, + TRACE_REG_PERF_DEL, }; struct ftrace_event_call; diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 0cfcc37..d72af0b 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -211,12 +211,14 @@ int perf_trace_add(struct perf_event *p_event, int flags) list = this_cpu_ptr(pcpu_list); hlist_add_head_rcu(&p_event->hlist_entry, list); - return 0; + return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event); } void perf_trace_del(struct perf_event *p_event, int flags) { + struct ftrace_event_call *tp_event = p_event->tp_event; hlist_del_rcu(&p_event->hlist_entry); + tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event); } __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 5138fea..079a93a 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -173,6 +173,8 @@ int ftrace_event_reg(struct ftrace_event_call *call, return 0; case TRACE_REG_PERF_OPEN: case TRACE_REG_PERF_CLOSE: + case TRACE_REG_PERF_ADD: + case TRACE_REG_PERF_DEL: return 0; #endif } diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 5667f89..580a05e 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1912,6 +1912,8 @@ int kprobe_register(struct ftrace_event_call *event, return 0; case TRACE_REG_PERF_OPEN: case TRACE_REG_PERF_CLOSE: + case TRACE_REG_PERF_ADD: + case TRACE_REG_PERF_DEL: return 0; #endif } diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index e23515f..96fc733 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -666,6 +666,8 @@ static int syscall_enter_register(struct ftrace_event_call *event, return 0; case TRACE_REG_PERF_OPEN: case TRACE_REG_PERF_CLOSE: + case TRACE_REG_PERF_ADD: + case TRACE_REG_PERF_DEL: return 0; #endif } @@ -690,6 +692,8 @@ static int syscall_exit_register(struct ftrace_event_call *event, return 0; case TRACE_REG_PERF_OPEN: case TRACE_REG_PERF_CLOSE: + case TRACE_REG_PERF_ADD: + case TRACE_REG_PERF_DEL: return 0; #endif } -- cgit v0.10.2 From e59a0bff3ecf389951e3c9378ddfd00f6448bfaa Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 15 Feb 2012 15:51:51 +0100 Subject: ftrace: Add FTRACE_ENTRY_REG macro to allow event registration Adding FTRACE_ENTRY_REG macro so particular ftrace entries could specify registration function and thus become accesible via perf. This will be used in upcomming patch for function trace. Link: http://lkml.kernel.org/r/1329317514-8131-5-git-send-email-jolsa@redhat.com Acked-by: Frederic Weisbecker Signed-off-by: Jiri Olsa Signed-off-by: Steven Rostedt diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 55c6ea0..638476a 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -68,6 +68,10 @@ enum trace_type { #undef FTRACE_ENTRY_DUP #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk) +#undef FTRACE_ENTRY_REG +#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \ + FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) + #include "trace_entries.h" /* diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index bbeec31..f74de86 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c @@ -18,6 +18,14 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM ftrace +/* + * The FTRACE_ENTRY_REG macro allows ftrace entry to define register + * function and thus become accesible via perf. + */ +#undef FTRACE_ENTRY_REG +#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \ + FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) + /* not needed for this file */ #undef __field_struct #define __field_struct(type, item) @@ -152,13 +160,14 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \ #undef F_printk #define F_printk(fmt, args...) #fmt ", " __stringify(args) -#undef FTRACE_ENTRY -#define FTRACE_ENTRY(call, struct_name, etype, tstruct, print) \ +#undef FTRACE_ENTRY_REG +#define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, regfn)\ \ struct ftrace_event_class event_class_ftrace_##call = { \ .system = __stringify(TRACE_SYSTEM), \ .define_fields = ftrace_define_fields_##call, \ .fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\ + .reg = regfn, \ }; \ \ struct ftrace_event_call __used event_##call = { \ @@ -170,4 +179,9 @@ struct ftrace_event_call __used event_##call = { \ struct ftrace_event_call __used \ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; +#undef FTRACE_ENTRY +#define FTRACE_ENTRY(call, struct_name, etype, tstruct, print) \ + FTRACE_ENTRY_REG(call, struct_name, etype, \ + PARAMS(tstruct), PARAMS(print), NULL) + #include "trace_entries.h" -- cgit v0.10.2 From ced39002f5ea736b716ae233fb68b26d59783912 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 15 Feb 2012 15:51:52 +0100 Subject: ftrace, perf: Add support to use function tracepoint in perf Adding perf registration support for the ftrace function event, so it is now possible to register it via perf interface. The perf_event struct statically contains ftrace_ops as a handle for function tracer. The function tracer is registered/unregistered in open/close actions. To be efficient, we enable/disable ftrace_ops each time the traced process is scheduled in/out (via TRACE_REG_PERF_(ADD|DELL) handlers). This way tracing is enabled only when the process is running. Intentionally using this way instead of the event's hw state PERF_HES_STOPPED, which would not disable the ftrace_ops. It is now possible to use function trace within perf commands like: perf record -e ftrace:function ls perf stat -e ftrace:function ls Allowed only for root. Link: http://lkml.kernel.org/r/1329317514-8131-6-git-send-email-jolsa@redhat.com Acked-by: Frederic Weisbecker Signed-off-by: Jiri Olsa Signed-off-by: Steven Rostedt diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 412b790..92a056f 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -859,6 +859,9 @@ struct perf_event { #ifdef CONFIG_EVENT_TRACING struct ftrace_event_call *tp_event; struct event_filter *filter; +#ifdef CONFIG_FUNCTION_TRACER + struct ftrace_ops ftrace_ops; +#endif #endif #ifdef CONFIG_CGROUP_PERF diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 638476a..76a1c50 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -595,6 +595,8 @@ static inline int ftrace_trace_task(struct task_struct *task) static inline int ftrace_is_dead(void) { return 0; } #endif +int ftrace_event_is_function(struct ftrace_event_call *call); + /* * struct trace_parser - servers for reading the user input separated by spaces * @cont: set if the input is not complete - no final space char was found @@ -832,4 +834,13 @@ extern const char *__stop___trace_bprintk_fmt[]; FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) #include "trace_entries.h" +#ifdef CONFIG_PERF_EVENTS +#ifdef CONFIG_FUNCTION_TRACER +int perf_ftrace_event_register(struct ftrace_event_call *call, + enum trace_reg type, void *data); +#else +#define perf_ftrace_event_register NULL +#endif /* CONFIG_FUNCTION_TRACER */ +#endif /* CONFIG_PERF_EVENTS */ + #endif /* _LINUX_KERNEL_TRACE_H */ diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index 9336590..47db7ed 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -55,7 +55,7 @@ /* * Function trace entry - function address and parent function address: */ -FTRACE_ENTRY(function, ftrace_entry, +FTRACE_ENTRY_REG(function, ftrace_entry, TRACE_FN, @@ -64,7 +64,9 @@ FTRACE_ENTRY(function, ftrace_entry, __field( unsigned long, parent_ip ) ), - F_printk(" %lx <-- %lx", __entry->ip, __entry->parent_ip) + F_printk(" %lx <-- %lx", __entry->ip, __entry->parent_ip), + + perf_ftrace_event_register ); /* Function call entry */ diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index d72af0b..fdeeb5c 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -24,6 +24,11 @@ static int total_ref_count; static int perf_trace_event_perm(struct ftrace_event_call *tp_event, struct perf_event *p_event) { + /* The ftrace function trace is allowed only for root. */ + if (ftrace_event_is_function(tp_event) && + perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) + return -EPERM; + /* No tracing, just counting, so no obvious leak */ if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW)) return 0; @@ -250,3 +255,84 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, return raw_data; } EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); + +#ifdef CONFIG_FUNCTION_TRACER +static void +perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip) +{ + struct ftrace_entry *entry; + struct hlist_head *head; + struct pt_regs regs; + int rctx; + +#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \ + sizeof(u64)) - sizeof(u32)) + + BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE); + + perf_fetch_caller_regs(®s); + + entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx); + if (!entry) + return; + + entry->ip = ip; + entry->parent_ip = parent_ip; + + head = this_cpu_ptr(event_function.perf_events); + perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0, + 1, ®s, head); + +#undef ENTRY_SIZE +} + +static int perf_ftrace_function_register(struct perf_event *event) +{ + struct ftrace_ops *ops = &event->ftrace_ops; + + ops->flags |= FTRACE_OPS_FL_CONTROL; + ops->func = perf_ftrace_function_call; + return register_ftrace_function(ops); +} + +static int perf_ftrace_function_unregister(struct perf_event *event) +{ + struct ftrace_ops *ops = &event->ftrace_ops; + return unregister_ftrace_function(ops); +} + +static void perf_ftrace_function_enable(struct perf_event *event) +{ + ftrace_function_local_enable(&event->ftrace_ops); +} + +static void perf_ftrace_function_disable(struct perf_event *event) +{ + ftrace_function_local_disable(&event->ftrace_ops); +} + +int perf_ftrace_event_register(struct ftrace_event_call *call, + enum trace_reg type, void *data) +{ + switch (type) { + case TRACE_REG_REGISTER: + case TRACE_REG_UNREGISTER: + break; + case TRACE_REG_PERF_REGISTER: + case TRACE_REG_PERF_UNREGISTER: + return 0; + case TRACE_REG_PERF_OPEN: + return perf_ftrace_function_register(data); + case TRACE_REG_PERF_CLOSE: + return perf_ftrace_function_unregister(data); + case TRACE_REG_PERF_ADD: + perf_ftrace_function_enable(data); + return 0; + case TRACE_REG_PERF_DEL: + perf_ftrace_function_disable(data); + return 0; + } + + return -EINVAL; +} +#endif /* CONFIG_FUNCTION_TRACER */ diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index f74de86..a3dbee6 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c @@ -184,4 +184,9 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; FTRACE_ENTRY_REG(call, struct_name, etype, \ PARAMS(tstruct), PARAMS(print), NULL) +int ftrace_event_is_function(struct ftrace_event_call *call) +{ + return call == &event_function; +} + #include "trace_entries.h" -- cgit v0.10.2 From 02aa3162edaa166a01d193f80ccde890be8b55da Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 15 Feb 2012 15:51:53 +0100 Subject: ftrace: Allow to specify filter field type for ftrace events Adding FILTER_TRACE_FN event field type for function tracepoint event, so it can be properly recognized within filtering code. Currently all fields of ftrace subsystem events share the common field type FILTER_OTHER. Since the function trace fields need special care within the filtering code we need to recognize it properly, hence adding the FILTER_TRACE_FN event type. Adding filter parameter to the FTRACE_ENTRY macro, to specify the filter field type for the event. Link: http://lkml.kernel.org/r/1329317514-8131-7-git-send-email-jolsa@redhat.com Signed-off-by: Jiri Olsa Signed-off-by: Steven Rostedt diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 2bf677c..dd478fc 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -245,6 +245,7 @@ enum { FILTER_STATIC_STRING, FILTER_DYN_STRING, FILTER_PTR_STRING, + FILTER_TRACE_FN, }; #define EVENT_STORAGE_SIZE 128 diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 76a1c50..29f93cd 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -56,21 +56,23 @@ enum trace_type { #define F_STRUCT(args...) args #undef FTRACE_ENTRY -#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ - struct struct_name { \ - struct trace_entry ent; \ - tstruct \ +#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ + struct struct_name { \ + struct trace_entry ent; \ + tstruct \ } #undef TP_ARGS #define TP_ARGS(args...) args #undef FTRACE_ENTRY_DUP -#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk) +#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter) #undef FTRACE_ENTRY_REG -#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \ - FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) +#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \ + filter, regfn) \ + FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ + filter) #include "trace_entries.h" @@ -826,12 +828,13 @@ extern const char *__start___trace_bprintk_fmt[]; extern const char *__stop___trace_bprintk_fmt[]; #undef FTRACE_ENTRY -#define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ +#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ extern struct ftrace_event_call \ __attribute__((__aligned__(4))) event_##call; #undef FTRACE_ENTRY_DUP -#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ - FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) +#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ + FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ + filter) #include "trace_entries.h" #ifdef CONFIG_PERF_EVENTS diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index 47db7ed..d91eb05 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -66,6 +66,8 @@ FTRACE_ENTRY_REG(function, ftrace_entry, F_printk(" %lx <-- %lx", __entry->ip, __entry->parent_ip), + FILTER_TRACE_FN, + perf_ftrace_event_register ); @@ -80,7 +82,9 @@ FTRACE_ENTRY(funcgraph_entry, ftrace_graph_ent_entry, __field_desc( int, graph_ent, depth ) ), - F_printk("--> %lx (%d)", __entry->func, __entry->depth) + F_printk("--> %lx (%d)", __entry->func, __entry->depth), + + FILTER_OTHER ); /* Function return entry */ @@ -100,7 +104,9 @@ FTRACE_ENTRY(funcgraph_exit, ftrace_graph_ret_entry, F_printk("<-- %lx (%d) (start: %llx end: %llx) over: %d", __entry->func, __entry->depth, __entry->calltime, __entry->rettime, - __entry->depth) + __entry->depth), + + FILTER_OTHER ); /* @@ -129,8 +135,9 @@ FTRACE_ENTRY(context_switch, ctx_switch_entry, F_printk("%u:%u:%u ==> %u:%u:%u [%03u]", __entry->prev_pid, __entry->prev_prio, __entry->prev_state, __entry->next_pid, __entry->next_prio, __entry->next_state, - __entry->next_cpu - ) + __entry->next_cpu), + + FILTER_OTHER ); /* @@ -148,8 +155,9 @@ FTRACE_ENTRY_DUP(wakeup, ctx_switch_entry, F_printk("%u:%u:%u ==+ %u:%u:%u [%03u]", __entry->prev_pid, __entry->prev_prio, __entry->prev_state, __entry->next_pid, __entry->next_prio, __entry->next_state, - __entry->next_cpu - ) + __entry->next_cpu), + + FILTER_OTHER ); /* @@ -171,7 +179,9 @@ FTRACE_ENTRY(kernel_stack, stack_entry, "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n", __entry->caller[0], __entry->caller[1], __entry->caller[2], __entry->caller[3], __entry->caller[4], __entry->caller[5], - __entry->caller[6], __entry->caller[7]) + __entry->caller[6], __entry->caller[7]), + + FILTER_OTHER ); FTRACE_ENTRY(user_stack, userstack_entry, @@ -187,7 +197,9 @@ FTRACE_ENTRY(user_stack, userstack_entry, "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n", __entry->caller[0], __entry->caller[1], __entry->caller[2], __entry->caller[3], __entry->caller[4], __entry->caller[5], - __entry->caller[6], __entry->caller[7]) + __entry->caller[6], __entry->caller[7]), + + FILTER_OTHER ); /* @@ -204,7 +216,9 @@ FTRACE_ENTRY(bprint, bprint_entry, ), F_printk("%08lx fmt:%p", - __entry->ip, __entry->fmt) + __entry->ip, __entry->fmt), + + FILTER_OTHER ); FTRACE_ENTRY(print, print_entry, @@ -217,7 +231,9 @@ FTRACE_ENTRY(print, print_entry, ), F_printk("%08lx %s", - __entry->ip, __entry->buf) + __entry->ip, __entry->buf), + + FILTER_OTHER ); FTRACE_ENTRY(mmiotrace_rw, trace_mmiotrace_rw, @@ -236,7 +252,9 @@ FTRACE_ENTRY(mmiotrace_rw, trace_mmiotrace_rw, F_printk("%lx %lx %lx %d %x %x", (unsigned long)__entry->phys, __entry->value, __entry->pc, - __entry->map_id, __entry->opcode, __entry->width) + __entry->map_id, __entry->opcode, __entry->width), + + FILTER_OTHER ); FTRACE_ENTRY(mmiotrace_map, trace_mmiotrace_map, @@ -254,7 +272,9 @@ FTRACE_ENTRY(mmiotrace_map, trace_mmiotrace_map, F_printk("%lx %lx %lx %d %x", (unsigned long)__entry->phys, __entry->virt, __entry->len, - __entry->map_id, __entry->opcode) + __entry->map_id, __entry->opcode), + + FILTER_OTHER ); @@ -274,6 +294,8 @@ FTRACE_ENTRY(branch, trace_branch, F_printk("%u:%s:%s (%u)", __entry->line, - __entry->func, __entry->file, __entry->correct) + __entry->func, __entry->file, __entry->correct), + + FILTER_OTHER ); diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 76afaee..3da3d0e 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -899,6 +899,11 @@ int filter_assign_type(const char *type) return FILTER_OTHER; } +static bool is_function_field(struct ftrace_event_field *field) +{ + return field->filter_type == FILTER_TRACE_FN; +} + static bool is_string_field(struct ftrace_event_field *field) { return field->filter_type == FILTER_DYN_STRING || @@ -986,7 +991,7 @@ static int init_pred(struct filter_parse_state *ps, fn = filter_pred_strloc; else fn = filter_pred_pchar; - } else { + } else if (!is_function_field(field)) { if (field->is_signed) ret = strict_strtoll(pred->regex.pattern, 0, &val); else diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index a3dbee6..7b46c9b 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c @@ -23,8 +23,10 @@ * function and thus become accesible via perf. */ #undef FTRACE_ENTRY_REG -#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \ - FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) +#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \ + filter, regfn) \ + FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ + filter) /* not needed for this file */ #undef __field_struct @@ -52,21 +54,22 @@ #define F_printk(fmt, args...) fmt, args #undef FTRACE_ENTRY -#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ -struct ____ftrace_##name { \ - tstruct \ -}; \ -static void __always_unused ____ftrace_check_##name(void) \ -{ \ - struct ____ftrace_##name *__entry = NULL; \ - \ - /* force compile-time check on F_printk() */ \ - printk(print); \ +#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ +struct ____ftrace_##name { \ + tstruct \ +}; \ +static void __always_unused ____ftrace_check_##name(void) \ +{ \ + struct ____ftrace_##name *__entry = NULL; \ + \ + /* force compile-time check on F_printk() */ \ + printk(print); \ } #undef FTRACE_ENTRY_DUP -#define FTRACE_ENTRY_DUP(name, struct_name, id, tstruct, print) \ - FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) +#define FTRACE_ENTRY_DUP(name, struct_name, id, tstruct, print, filter) \ + FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ + filter) #include "trace_entries.h" @@ -75,7 +78,7 @@ static void __always_unused ____ftrace_check_##name(void) \ ret = trace_define_field(event_call, #type, #item, \ offsetof(typeof(field), item), \ sizeof(field.item), \ - is_signed_type(type), FILTER_OTHER); \ + is_signed_type(type), filter_type); \ if (ret) \ return ret; @@ -85,7 +88,7 @@ static void __always_unused ____ftrace_check_##name(void) \ offsetof(typeof(field), \ container.item), \ sizeof(field.container.item), \ - is_signed_type(type), FILTER_OTHER); \ + is_signed_type(type), filter_type); \ if (ret) \ return ret; @@ -99,7 +102,7 @@ static void __always_unused ____ftrace_check_##name(void) \ ret = trace_define_field(event_call, event_storage, #item, \ offsetof(typeof(field), item), \ sizeof(field.item), \ - is_signed_type(type), FILTER_OTHER); \ + is_signed_type(type), filter_type); \ mutex_unlock(&event_storage_mutex); \ if (ret) \ return ret; \ @@ -112,7 +115,7 @@ static void __always_unused ____ftrace_check_##name(void) \ offsetof(typeof(field), \ container.item), \ sizeof(field.container.item), \ - is_signed_type(type), FILTER_OTHER); \ + is_signed_type(type), filter_type); \ if (ret) \ return ret; @@ -120,17 +123,18 @@ static void __always_unused ____ftrace_check_##name(void) \ #define __dynamic_array(type, item) \ ret = trace_define_field(event_call, #type, #item, \ offsetof(typeof(field), item), \ - 0, is_signed_type(type), FILTER_OTHER);\ + 0, is_signed_type(type), filter_type);\ if (ret) \ return ret; #undef FTRACE_ENTRY -#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ +#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ int \ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \ { \ struct struct_name field; \ int ret; \ + int filter_type = filter; \ \ tstruct; \ \ @@ -161,7 +165,8 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \ #define F_printk(fmt, args...) #fmt ", " __stringify(args) #undef FTRACE_ENTRY_REG -#define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, regfn)\ +#define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, filter,\ + regfn) \ \ struct ftrace_event_class event_class_ftrace_##call = { \ .system = __stringify(TRACE_SYSTEM), \ @@ -180,9 +185,9 @@ struct ftrace_event_call __used \ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; #undef FTRACE_ENTRY -#define FTRACE_ENTRY(call, struct_name, etype, tstruct, print) \ +#define FTRACE_ENTRY(call, struct_name, etype, tstruct, print, filter) \ FTRACE_ENTRY_REG(call, struct_name, etype, \ - PARAMS(tstruct), PARAMS(print), NULL) + PARAMS(tstruct), PARAMS(print), filter, NULL) int ftrace_event_is_function(struct ftrace_event_call *call) { -- cgit v0.10.2 From 5500fa51199aee770ce53718853732600543619e Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 15 Feb 2012 15:51:54 +0100 Subject: ftrace, perf: Add filter support for function trace event Adding support to filter function trace event via perf interface. It is now possible to use filter interface in the perf tool like: perf record -e ftrace:function --filter="(ip == mm_*)" ls The filter syntax is restricted to the the 'ip' field only, and following operators are accepted '==' '!=' '||', ending up with the filter strings like: ip == f1[, ]f2 ... || ip != f3[, ]f4 ... with comma ',' or space ' ' as a function separator. If the space ' ' is used as a separator, the right side of the assignment needs to be enclosed in double quotes '"', e.g.: perf record -e ftrace:function --filter '(ip == do_execve,sys_*,ext*)' ls perf record -e ftrace:function --filter '(ip == "do_execve,sys_*,ext*")' ls perf record -e ftrace:function --filter '(ip == "do_execve sys_* ext*")' ls The '==' operator adds trace filter with same effect as would be added via set_ftrace_filter file. The '!=' operator adds trace filter with same effect as would be added via set_ftrace_notrace file. The right side of the '!=', '==' operators is list of functions or regexp. to be added to filter separated by space. The '||' operator is used for connecting multiple filter definitions together. It is possible to have more than one '==' and '!=' operators within one filter string. Link: http://lkml.kernel.org/r/1329317514-8131-8-git-send-email-jolsa@redhat.com Signed-off-by: Jiri Olsa Signed-off-by: Steven Rostedt diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 64a309d..72a6cab 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -250,6 +250,7 @@ int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, int len, int reset); void ftrace_set_global_filter(unsigned char *buf, int len, int reset); void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); +void ftrace_free_filter(struct ftrace_ops *ops); int register_ftrace_command(struct ftrace_func_command *cmd); int unregister_ftrace_command(struct ftrace_func_command *cmd); @@ -380,9 +381,6 @@ extern void ftrace_enable_daemon(void); #else static inline int skip_trace(unsigned long ip) { return 0; } static inline int ftrace_force_update(void) { return 0; } -static inline void ftrace_set_filter(unsigned char *buf, int len, int reset) -{ -} static inline void ftrace_disable_daemon(void) { } static inline void ftrace_enable_daemon(void) { } static inline void ftrace_release_mod(struct module *mod) {} @@ -406,6 +404,9 @@ static inline int ftrace_text_reserved(void *start, void *end) */ #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) #define ftrace_set_early_filter(ops, buf, enable) do { } while (0) +#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) +#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) +#define ftrace_free_filter(ops) do { } while (0) static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) { return -ENODEV; } diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index f615f97..867bd1d 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1186,6 +1186,12 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash) call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); } +void ftrace_free_filter(struct ftrace_ops *ops) +{ + free_ftrace_hash(ops->filter_hash); + free_ftrace_hash(ops->notrace_hash); +} + static struct ftrace_hash *alloc_ftrace_hash(int size_bits) { struct ftrace_hash *hash; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 29f93cd..54faec7 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -776,9 +776,7 @@ struct filter_pred { u64 val; struct regex regex; unsigned short *ops; -#ifdef CONFIG_FTRACE_STARTUP_TEST struct ftrace_event_field *field; -#endif int offset; int not; int op; diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index fdeeb5c..fee3752 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -298,7 +298,9 @@ static int perf_ftrace_function_register(struct perf_event *event) static int perf_ftrace_function_unregister(struct perf_event *event) { struct ftrace_ops *ops = &event->ftrace_ops; - return unregister_ftrace_function(ops); + int ret = unregister_ftrace_function(ops); + ftrace_free_filter(ops); + return ret; } static void perf_ftrace_function_enable(struct perf_event *event) diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 3da3d0e..431dba8 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -81,6 +81,7 @@ enum { FILT_ERR_TOO_MANY_PREDS, FILT_ERR_MISSING_FIELD, FILT_ERR_INVALID_FILTER, + FILT_ERR_IP_FIELD_ONLY, }; static char *err_text[] = { @@ -96,6 +97,7 @@ static char *err_text[] = { "Too many terms in predicate expression", "Missing field name and/or value", "Meaningless filter expression", + "Only 'ip' field is supported for function trace", }; struct opstack_op { @@ -991,7 +993,12 @@ static int init_pred(struct filter_parse_state *ps, fn = filter_pred_strloc; else fn = filter_pred_pchar; - } else if (!is_function_field(field)) { + } else if (is_function_field(field)) { + if (strcmp(field->name, "ip")) { + parse_error(ps, FILT_ERR_IP_FIELD_ONLY, 0); + return -EINVAL; + } + } else { if (field->is_signed) ret = strict_strtoll(pred->regex.pattern, 0, &val); else @@ -1338,10 +1345,7 @@ static struct filter_pred *create_pred(struct filter_parse_state *ps, strcpy(pred.regex.pattern, operand2); pred.regex.len = strlen(pred.regex.pattern); - -#ifdef CONFIG_FTRACE_STARTUP_TEST pred.field = field; -#endif return init_pred(ps, field, &pred) ? NULL : &pred; } @@ -1954,6 +1958,148 @@ void ftrace_profile_free_filter(struct perf_event *event) __free_filter(filter); } +struct function_filter_data { + struct ftrace_ops *ops; + int first_filter; + int first_notrace; +}; + +#ifdef CONFIG_FUNCTION_TRACER +static char ** +ftrace_function_filter_re(char *buf, int len, int *count) +{ + char *str, *sep, **re; + + str = kstrndup(buf, len, GFP_KERNEL); + if (!str) + return NULL; + + /* + * The argv_split function takes white space + * as a separator, so convert ',' into spaces. + */ + while ((sep = strchr(str, ','))) + *sep = ' '; + + re = argv_split(GFP_KERNEL, str, count); + kfree(str); + return re; +} + +static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter, + int reset, char *re, int len) +{ + int ret; + + if (filter) + ret = ftrace_set_filter(ops, re, len, reset); + else + ret = ftrace_set_notrace(ops, re, len, reset); + + return ret; +} + +static int __ftrace_function_set_filter(int filter, char *buf, int len, + struct function_filter_data *data) +{ + int i, re_cnt, ret; + int *reset; + char **re; + + reset = filter ? &data->first_filter : &data->first_notrace; + + /* + * The 'ip' field could have multiple filters set, separated + * either by space or comma. We first cut the filter and apply + * all pieces separatelly. + */ + re = ftrace_function_filter_re(buf, len, &re_cnt); + if (!re) + return -EINVAL; + + for (i = 0; i < re_cnt; i++) { + ret = ftrace_function_set_regexp(data->ops, filter, *reset, + re[i], strlen(re[i])); + if (ret) + break; + + if (*reset) + *reset = 0; + } + + argv_free(re); + return ret; +} + +static int ftrace_function_check_pred(struct filter_pred *pred, int leaf) +{ + struct ftrace_event_field *field = pred->field; + + if (leaf) { + /* + * Check the leaf predicate for function trace, verify: + * - only '==' and '!=' is used + * - the 'ip' field is used + */ + if ((pred->op != OP_EQ) && (pred->op != OP_NE)) + return -EINVAL; + + if (strcmp(field->name, "ip")) + return -EINVAL; + } else { + /* + * Check the non leaf predicate for function trace, verify: + * - only '||' is used + */ + if (pred->op != OP_OR) + return -EINVAL; + } + + return 0; +} + +static int ftrace_function_set_filter_cb(enum move_type move, + struct filter_pred *pred, + int *err, void *data) +{ + /* Checking the node is valid for function trace. */ + if ((move != MOVE_DOWN) || + (pred->left != FILTER_PRED_INVALID)) { + *err = ftrace_function_check_pred(pred, 0); + } else { + *err = ftrace_function_check_pred(pred, 1); + if (*err) + return WALK_PRED_ABORT; + + *err = __ftrace_function_set_filter(pred->op == OP_EQ, + pred->regex.pattern, + pred->regex.len, + data); + } + + return (*err) ? WALK_PRED_ABORT : WALK_PRED_DEFAULT; +} + +static int ftrace_function_set_filter(struct perf_event *event, + struct event_filter *filter) +{ + struct function_filter_data data = { + .first_filter = 1, + .first_notrace = 1, + .ops = &event->ftrace_ops, + }; + + return walk_pred_tree(filter->preds, filter->root, + ftrace_function_set_filter_cb, &data); +} +#else +static int ftrace_function_set_filter(struct perf_event *event, + struct event_filter *filter) +{ + return -ENODEV; +} +#endif /* CONFIG_FUNCTION_TRACER */ + int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str) { @@ -1974,9 +2120,16 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id, goto out_unlock; err = create_filter(call, filter_str, false, &filter); - if (!err) - event->filter = filter; + if (err) + goto free_filter; + + if (ftrace_event_is_function(call)) + err = ftrace_function_set_filter(event, filter); else + event->filter = filter; + +free_filter: + if (err || ftrace_event_is_function(call)) __free_filter(filter); out_unlock: -- cgit v0.10.2 From 6b1bee9035d430c4b4f586df6df4b3f840e89b5b Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Tue, 21 Feb 2012 15:54:25 +0100 Subject: perf tools: fix broken perf record -a mode The following commit: b52956c perf tools: Allow multiple threads or processes in record, stat, top introduced a bug in the thread_map code which caused perf record -a to not setup system-wide monitoring properly. $ taskset -c 1 noploop 1000 & $ perf record -a -C 1 sleep 10 $ perf report -D | tail -20 cycles stats: TOTAL events: 4413 MMAP events: 4025 COMM events: 340 SAMPLE events: 48 Here I was expecting about 10,000 samples and not 48. In system-wide mode, the PID passed to perf_event_open() must be -1 and it was 0. That caused the kernel to setup a per-process event on PID:0. Consequently, the number of samples captured does not correspond to the requested measurement. The following one-liner fixes the problem for me with or without -C. I would also suggest to change the malloc() to something that matches the struct definition. thread_map->map[] is declared as int map[] and not pid_t map[]. If map[] can only contain pids, then change the struct definition. Acked-by: David Ahern Cc: David Ahern Cc: Eric Dumazet Cc: Ingo Molnar Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20120221145424.GA6757@quad Signed-off-by: Stephane Eranian Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c index e15983c..84d9bd78 100644 --- a/tools/perf/util/thread_map.c +++ b/tools/perf/util/thread_map.c @@ -229,7 +229,7 @@ static struct thread_map *thread_map__new_by_tid_str(const char *tid_str) if (!tid_str) { threads = malloc(sizeof(*threads) + sizeof(pid_t)); if (threads != NULL) { - threads->map[1] = -1; + threads->map[0] = -1; threads->nr = 1; } return threads; -- cgit v0.10.2 From fadf0464b83f91ba021a358c0238a0810c0d2a0b Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Tue, 21 Feb 2012 15:02:53 -0500 Subject: jump label: Add a WARN() if jump label key count goes negative The count on a jump label key should never go negative. Add a WARN() to check for this condition. Signed-off-by: Jason Baron Cc: Gleb Natapov Cc: rostedt@goodmis.org Cc: mathieu.desnoyers@efficios.com Cc: davem@davemloft.net Cc: ddaney.cavm@gmail.com Cc: a.p.zijlstra@chello.nl Link: http://lkml.kernel.org/r/3c68556121be4d1920417a3fe367da1ec38246b4.1329851692.git.jbaron@redhat.com Signed-off-by: Ingo Molnar diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 01d3b70..ed9654f 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -76,8 +76,11 @@ EXPORT_SYMBOL_GPL(jump_label_inc); static void __jump_label_dec(struct jump_label_key *key, unsigned long rate_limit, struct delayed_work *work) { - if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) + if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { + WARN(atomic_read(&key->enabled) < 0, + "jump label: negative count!\n"); return; + } if (rate_limit) { atomic_inc(&key->enabled); -- cgit v0.10.2 From a746e3cc984b0aa5b620dd07c1a433283b1835cf Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Tue, 21 Feb 2012 15:02:57 -0500 Subject: jump label: Fix compiler warning While cross-compiling on sparc64, I found: kernel/jump_label.c: In function 'jump_label_update': kernel/jump_label.c:447:40: warning: cast from pointer to integer of different size [-Wpointer-to-int-cast] Fix by casting to 'unsigned long'. Signed-off-by: Jason Baron Cc: rostedt@goodmis.org Cc: mathieu.desnoyers@efficios.com Cc: davem@davemloft.net Cc: ddaney.cavm@gmail.com Cc: a.p.zijlstra@chello.nl Link: http://lkml.kernel.org/r/08026cbc6df80619cae833ef1ebbbc43efab69ab.1329851692.git.jbaron@redhat.com Signed-off-by: Ingo Molnar diff --git a/kernel/jump_label.c b/kernel/jump_label.c index ed9654f..543782e 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -424,7 +424,7 @@ static void jump_label_update(struct jump_label_key *key, int enable) struct jump_entry *entry = key->entries, *stop = __stop___jump_table; #ifdef CONFIG_MODULES - struct module *mod = __module_address((jump_label_t)key); + struct module *mod = __module_address((unsigned long)key); __jump_label_mod_update(key, enable); -- cgit v0.10.2 From 4ff16c25e2cc48cbe6956e356c38a25ac063a64d Mon Sep 17 00:00:00 2001 From: David Smith Date: Tue, 7 Feb 2012 10:11:05 -0600 Subject: tracepoint, vfs, sched: Add exec() tracepoint Added a minimal exec tracepoint. Exec is an important major event in the life of a task, like fork(), clone() or exit(), all of which we already trace. [ We also do scheduling re-balancing during exec() - so it's useful from a scheduler instrumentation POV as well. ] If you want to watch a task start up, when it gets exec'ed is a good place to start. With the addition of this tracepoint, exec's can be monitored and better picture of general system activity can be obtained. This tracepoint will also enable better process life tracking, allowing you to answer questions like "what process keeps starting up binary X?". This tracepoint can also be useful in ftrace filtering and trigger conditions: i.e. starting or stopping filtering when exec is called. Signed-off-by: David Smith Signed-off-by: Peter Zijlstra Cc: Steven Rostedt Cc: Christoph Hellwig Cc: Al Viro Cc: Andrew Morton Cc: Linus Torvalds Link: http://lkml.kernel.org/r/4F314D19.7030504@redhat.com Signed-off-by: Ingo Molnar diff --git a/fs/exec.c b/fs/exec.c index aeb135c..d0d2080 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -63,6 +63,8 @@ #include #include "internal.h" +#include + int core_uses_pid; char core_pattern[CORENAME_MAX_SIZE] = "core"; unsigned int core_pipe_limit; @@ -1401,9 +1403,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) */ bprm->recursion_depth = depth; if (retval >= 0) { - if (depth == 0) - ptrace_event(PTRACE_EVENT_EXEC, - old_pid); + if (depth == 0) { + trace_sched_process_exec(current, old_pid, bprm); + ptrace_event(PTRACE_EVENT_EXEC, old_pid); + } put_binfmt(fmt); allow_write_access(bprm->file); if (bprm->file) diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 6ba596b..e61ddfe 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -6,6 +6,7 @@ #include #include +#include /* * Tracepoint for calling kthread_stop, performed to end a kthread: @@ -276,6 +277,32 @@ TRACE_EVENT(sched_process_fork, ); /* + * Tracepoint for exec: + */ +TRACE_EVENT(sched_process_exec, + + TP_PROTO(struct task_struct *p, pid_t old_pid, + struct linux_binprm *bprm), + + TP_ARGS(p, old_pid, bprm), + + TP_STRUCT__entry( + __string( filename, bprm->filename ) + __field( pid_t, pid ) + __field( pid_t, old_pid ) + ), + + TP_fast_assign( + __assign_str(filename, bprm->filename); + __entry->pid = p->pid; + __entry->old_pid = p->pid; + ), + + TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename), + __entry->pid, __entry->old_pid) +); + +/* * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE * adding sched_stat support to SCHED_FIFO/RR would be welcome. */ -- cgit v0.10.2 From 1cfa60dc7d7c7cc774a44eee47ff135a644a1f31 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Tue, 21 Feb 2012 15:03:30 -0500 Subject: static keys: Add docs better explaining the whole 'struct static_key' mechanism Add better documentation for static keys. Signed-off-by: Jason Baron Cc: rostedt@goodmis.org Cc: mathieu.desnoyers@efficios.com Cc: davem@davemloft.net Cc: ddaney.cavm@gmail.com Cc: a.p.zijlstra@chello.nl Link: http://lkml.kernel.org/r/52570e566e5f1914f27b67e4eafb5781b8f9f9db.1329851692.git.jbaron@redhat.com [ Added a 'Summary' section and rewrote it to explain static keys ] Signed-off-by: Ingo Molnar diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt new file mode 100644 index 0000000..d93f3c0 --- /dev/null +++ b/Documentation/static-keys.txt @@ -0,0 +1,286 @@ + Static Keys + ----------- + +By: Jason Baron + +0) Abstract + +Static keys allows the inclusion of seldom used features in +performance-sensitive fast-path kernel code, via a GCC feature and a code +patching technique. A quick example: + + struct static_key key = STATIC_KEY_INIT_FALSE; + + ... + + if (static_key_false(&key)) + do unlikely code + else + do likely code + + ... + static_key_slow_inc(); + ... + static_key_slow_inc(); + ... + +The static_key_false() branch will be generated into the code with as little +impact to the likely code path as possible. + + +1) Motivation + + +Currently, tracepoints are implemented using a conditional branch. The +conditional check requires checking a global variable for each tracepoint. +Although the overhead of this check is small, it increases when the memory +cache comes under pressure (memory cache lines for these global variables may +be shared with other memory accesses). As we increase the number of tracepoints +in the kernel this overhead may become more of an issue. In addition, +tracepoints are often dormant (disabled) and provide no direct kernel +functionality. Thus, it is highly desirable to reduce their impact as much as +possible. Although tracepoints are the original motivation for this work, other +kernel code paths should be able to make use of the static keys facility. + + +2) Solution + + +gcc (v4.5) adds a new 'asm goto' statement that allows branching to a label: + +http://gcc.gnu.org/ml/gcc-patches/2009-07/msg01556.html + +Using the 'asm goto', we can create branches that are either taken or not taken +by default, without the need to check memory. Then, at run-time, we can patch +the branch site to change the branch direction. + +For example, if we have a simple branch that is disabled by default: + + if (static_key_false(&key)) + printk("I am the true branch\n"); + +Thus, by default the 'printk' will not be emitted. And the code generated will +consist of a single atomic 'no-op' instruction (5 bytes on x86), in the +straight-line code path. When the branch is 'flipped', we will patch the +'no-op' in the straight-line codepath with a 'jump' instruction to the +out-of-line true branch. Thus, changing branch direction is expensive but +branch selection is basically 'free'. That is the basic tradeoff of this +optimization. + +This lowlevel patching mechanism is called 'jump label patching', and it gives +the basis for the static keys facility. + +3) Static key label API, usage and examples: + + +In order to make use of this optimization you must first define a key: + + struct static_key key; + +Which is initialized as: + + struct static_key key = STATIC_KEY_INIT_TRUE; + +or: + + struct static_key key = STATIC_KEY_INIT_FALSE; + +If the key is not initialized, it is default false. The 'struct static_key', +must be a 'global'. That is, it can't be allocated on the stack or dynamically +allocated at run-time. + +The key is then used in code as: + + if (static_key_false(&key)) + do unlikely code + else + do likely code + +Or: + + if (static_key_true(&key)) + do likely code + else + do unlikely code + +A key that is initialized via 'STATIC_KEY_INIT_FALSE', must be used in a +'static_key_false()' construct. Likewise, a key initialized via +'STATIC_KEY_INIT_TRUE' must be used in a 'static_key_true()' construct. A +single key can be used in many branches, but all the branches must match the +way that the key has been initialized. + +The branch(es) can then be switched via: + + static_key_slow_inc(&key); + ... + static_key_slow_dec(&key); + +Thus, 'static_key_slow_inc()' means 'make the branch true', and +'static_key_slow_dec()' means 'make the the branch false' with appropriate +reference counting. For example, if the key is initialized true, a +static_key_slow_dec(), will switch the branch to false. And a subsequent +static_key_slow_inc(), will change the branch back to true. Likewise, if the +key is initialized false, a 'static_key_slow_inc()', will change the branch to +true. And then a 'static_key_slow_dec()', will again make the branch false. + +An example usage in the kernel is the implementation of tracepoints: + + static inline void trace_##name(proto) \ + { \ + if (static_key_false(&__tracepoint_##name.key)) \ + __DO_TRACE(&__tracepoint_##name, \ + TP_PROTO(data_proto), \ + TP_ARGS(data_args), \ + TP_CONDITION(cond)); \ + } + +Tracepoints are disabled by default, and can be placed in performance critical +pieces of the kernel. Thus, by using a static key, the tracepoints can have +absolutely minimal impact when not in use. + + +4) Architecture level code patching interface, 'jump labels' + + +There are a few functions and macros that architectures must implement in order +to take advantage of this optimization. If there is no architecture support, we +simply fall back to a traditional, load, test, and jump sequence. + +* select HAVE_ARCH_JUMP_LABEL, see: arch/x86/Kconfig + +* #define JUMP_LABEL_NOP_SIZE, see: arch/x86/include/asm/jump_label.h + +* __always_inline bool arch_static_branch(struct static_key *key), see: + arch/x86/include/asm/jump_label.h + +* void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type), + see: arch/x86/kernel/jump_label.c + +* __init_or_module void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type), + see: arch/x86/kernel/jump_label.c + + +* struct jump_entry, see: arch/x86/include/asm/jump_label.h + + +5) Static keys / jump label analysis, results (x86_64): + + +As an example, let's add the following branch to 'getppid()', such that the +system call now looks like: + +SYSCALL_DEFINE0(getppid) +{ + int pid; + ++ if (static_key_false(&key)) ++ printk("I am the true branch\n"); + + rcu_read_lock(); + pid = task_tgid_vnr(rcu_dereference(current->real_parent)); + rcu_read_unlock(); + + return pid; +} + +The resulting instructions with jump labels generated by GCC is: + +ffffffff81044290 : +ffffffff81044290: 55 push %rbp +ffffffff81044291: 48 89 e5 mov %rsp,%rbp +ffffffff81044294: e9 00 00 00 00 jmpq ffffffff81044299 +ffffffff81044299: 65 48 8b 04 25 c0 b6 mov %gs:0xb6c0,%rax +ffffffff810442a0: 00 00 +ffffffff810442a2: 48 8b 80 80 02 00 00 mov 0x280(%rax),%rax +ffffffff810442a9: 48 8b 80 b0 02 00 00 mov 0x2b0(%rax),%rax +ffffffff810442b0: 48 8b b8 e8 02 00 00 mov 0x2e8(%rax),%rdi +ffffffff810442b7: e8 f4 d9 00 00 callq ffffffff81051cb0 +ffffffff810442bc: 5d pop %rbp +ffffffff810442bd: 48 98 cltq +ffffffff810442bf: c3 retq +ffffffff810442c0: 48 c7 c7 e3 54 98 81 mov $0xffffffff819854e3,%rdi +ffffffff810442c7: 31 c0 xor %eax,%eax +ffffffff810442c9: e8 71 13 6d 00 callq ffffffff8171563f +ffffffff810442ce: eb c9 jmp ffffffff81044299 + +Without the jump label optimization it looks like: + +ffffffff810441f0 : +ffffffff810441f0: 8b 05 8a 52 d8 00 mov 0xd8528a(%rip),%eax # ffffffff81dc9480 +ffffffff810441f6: 55 push %rbp +ffffffff810441f7: 48 89 e5 mov %rsp,%rbp +ffffffff810441fa: 85 c0 test %eax,%eax +ffffffff810441fc: 75 27 jne ffffffff81044225 +ffffffff810441fe: 65 48 8b 04 25 c0 b6 mov %gs:0xb6c0,%rax +ffffffff81044205: 00 00 +ffffffff81044207: 48 8b 80 80 02 00 00 mov 0x280(%rax),%rax +ffffffff8104420e: 48 8b 80 b0 02 00 00 mov 0x2b0(%rax),%rax +ffffffff81044215: 48 8b b8 e8 02 00 00 mov 0x2e8(%rax),%rdi +ffffffff8104421c: e8 2f da 00 00 callq ffffffff81051c50 +ffffffff81044221: 5d pop %rbp +ffffffff81044222: 48 98 cltq +ffffffff81044224: c3 retq +ffffffff81044225: 48 c7 c7 13 53 98 81 mov $0xffffffff81985313,%rdi +ffffffff8104422c: 31 c0 xor %eax,%eax +ffffffff8104422e: e8 60 0f 6d 00 callq ffffffff81715193 +ffffffff81044233: eb c9 jmp ffffffff810441fe +ffffffff81044235: 66 66 2e 0f 1f 84 00 data32 nopw %cs:0x0(%rax,%rax,1) +ffffffff8104423c: 00 00 00 00 + +Thus, the disable jump label case adds a 'mov', 'test' and 'jne' instruction +vs. the jump label case just has a 'no-op' or 'jmp 0'. (The jmp 0, is patched +to a 5 byte atomic no-op instruction at boot-time.) Thus, the disabled jump +label case adds: + +6 (mov) + 2 (test) + 2 (jne) = 10 - 5 (5 byte jump 0) = 5 addition bytes. + +If we then include the padding bytes, the jump label code saves, 16 total bytes +of instruction memory for this small fucntion. In this case the non-jump label +function is 80 bytes long. Thus, we have have saved 20% of the instruction +footprint. We can in fact improve this even further, since the 5-byte no-op +really can be a 2-byte no-op since we can reach the branch with a 2-byte jmp. +However, we have not yet implemented optimal no-op sizes (they are currently +hard-coded). + +Since there are a number of static key API uses in the scheduler paths, +'pipe-test' (also known as 'perf bench sched pipe') can be used to show the +performance improvement. Testing done on 3.3.0-rc2: + +jump label disabled: + + Performance counter stats for 'bash -c /tmp/pipe-test' (50 runs): + + 855.700314 task-clock # 0.534 CPUs utilized ( +- 0.11% ) + 200,003 context-switches # 0.234 M/sec ( +- 0.00% ) + 0 CPU-migrations # 0.000 M/sec ( +- 39.58% ) + 487 page-faults # 0.001 M/sec ( +- 0.02% ) + 1,474,374,262 cycles # 1.723 GHz ( +- 0.17% ) + stalled-cycles-frontend + stalled-cycles-backend + 1,178,049,567 instructions # 0.80 insns per cycle ( +- 0.06% ) + 208,368,926 branches # 243.507 M/sec ( +- 0.06% ) + 5,569,188 branch-misses # 2.67% of all branches ( +- 0.54% ) + + 1.601607384 seconds time elapsed ( +- 0.07% ) + +jump label enabled: + + Performance counter stats for 'bash -c /tmp/pipe-test' (50 runs): + + 841.043185 task-clock # 0.533 CPUs utilized ( +- 0.12% ) + 200,004 context-switches # 0.238 M/sec ( +- 0.00% ) + 0 CPU-migrations # 0.000 M/sec ( +- 40.87% ) + 487 page-faults # 0.001 M/sec ( +- 0.05% ) + 1,432,559,428 cycles # 1.703 GHz ( +- 0.18% ) + stalled-cycles-frontend + stalled-cycles-backend + 1,175,363,994 instructions # 0.82 insns per cycle ( +- 0.04% ) + 206,859,359 branches # 245.956 M/sec ( +- 0.04% ) + 4,884,119 branch-misses # 2.36% of all branches ( +- 0.85% ) + + 1.579384366 seconds time elapsed + +The percentage of saved branches is .7%, and we've saved 12% on +'branch-misses'. This is where we would expect to get the most savings, since +this optimization is about reducing the number of branches. In addition, we've +saved .2% on instructions, and 2.8% on cycles and 1.4% on elapsed time. -- cgit v0.10.2 From c5905afb0ee6550b42c49213da1c22d67316c194 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 24 Feb 2012 08:31:31 +0100 Subject: static keys: Introduce 'struct static_key', static_key_true()/false() and static_key_slow_[inc|dec]() So here's a boot tested patch on top of Jason's series that does all the cleanups I talked about and turns jump labels into a more intuitive to use facility. It should also address the various misconceptions and confusions that surround jump labels. Typical usage scenarios: #include struct static_key key = STATIC_KEY_INIT_TRUE; if (static_key_false(&key)) do unlikely code else do likely code Or: if (static_key_true(&key)) do likely code else do unlikely code The static key is modified via: static_key_slow_inc(&key); ... static_key_slow_dec(&key); The 'slow' prefix makes it abundantly clear that this is an expensive operation. I've updated all in-kernel code to use this everywhere. Note that I (intentionally) have not pushed through the rename blindly through to the lowest levels: the actual jump-label patching arch facility should be named like that, so we want to decouple jump labels from the static-key facility a bit. On non-jump-label enabled architectures static keys default to likely()/unlikely() branches. Signed-off-by: Ingo Molnar Acked-by: Jason Baron Acked-by: Steven Rostedt Cc: a.p.zijlstra@chello.nl Cc: mathieu.desnoyers@efficios.com Cc: davem@davemloft.net Cc: ddaney.cavm@gmail.com Cc: Linus Torvalds Link: http://lkml.kernel.org/r/20120222085809.GA26397@elte.hu Signed-off-by: Ingo Molnar diff --git a/arch/Kconfig b/arch/Kconfig index 4f55c73..5b448a7 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -47,18 +47,29 @@ config KPROBES If in doubt, say "N". config JUMP_LABEL - bool "Optimize trace point call sites" + bool "Optimize very unlikely/likely branches" depends on HAVE_ARCH_JUMP_LABEL help + This option enables a transparent branch optimization that + makes certain almost-always-true or almost-always-false branch + conditions even cheaper to execute within the kernel. + + Certain performance-sensitive kernel code, such as trace points, + scheduler functionality, networking code and KVM have such + branches and include support for this optimization technique. + If it is detected that the compiler has support for "asm goto", - the kernel will compile trace point locations with just a - nop instruction. When trace points are enabled, the nop will - be converted to a jump to the trace function. This technique - lowers overhead and stress on the branch prediction of the - processor. - - On i386, options added to the compiler flags may increase - the size of the kernel slightly. + the kernel will compile such branches with just a nop + instruction. When the condition flag is toggled to true, the + nop will be converted to a jump instruction to execute the + conditional block of instructions. + + This technique lowers overhead and stress on the branch prediction + of the processor and generally makes the kernel faster. The update + of the condition is slower, but those are always very rare. + + ( On 32-bit x86, the necessary options added to the compiler + flags may increase the size of the kernel slightly. ) config OPTPROBES def_bool y diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h index 32551d3..b149b88 100644 --- a/arch/ia64/include/asm/paravirt.h +++ b/arch/ia64/include/asm/paravirt.h @@ -281,9 +281,9 @@ paravirt_init_missing_ticks_accounting(int cpu) pv_time_ops.init_missing_ticks_accounting(cpu); } -struct jump_label_key; -extern struct jump_label_key paravirt_steal_enabled; -extern struct jump_label_key paravirt_steal_rq_enabled; +struct static_key; +extern struct static_key paravirt_steal_enabled; +extern struct static_key paravirt_steal_rq_enabled; static inline int paravirt_do_steal_accounting(unsigned long *new_itm) diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c index 1008682..1b22f6d 100644 --- a/arch/ia64/kernel/paravirt.c +++ b/arch/ia64/kernel/paravirt.c @@ -634,8 +634,8 @@ struct pv_irq_ops pv_irq_ops = { * pv_time_ops * time operations */ -struct jump_label_key paravirt_steal_enabled; -struct jump_label_key paravirt_steal_rq_enabled; +struct static_key paravirt_steal_enabled; +struct static_key paravirt_steal_rq_enabled; static int ia64_native_do_steal_accounting(unsigned long *new_itm) diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h index 1881b31..4d6d77e 100644 --- a/arch/mips/include/asm/jump_label.h +++ b/arch/mips/include/asm/jump_label.h @@ -20,7 +20,7 @@ #define WORD_INSN ".word" #endif -static __always_inline bool arch_static_branch(struct jump_label_key *key) +static __always_inline bool arch_static_branch(struct static_key *key) { asm goto("1:\tnop\n\t" "nop\n\t" diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h index 938986e..ae098c4 100644 --- a/arch/powerpc/include/asm/jump_label.h +++ b/arch/powerpc/include/asm/jump_label.h @@ -17,7 +17,7 @@ #define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG) #define JUMP_LABEL_NOP_SIZE 4 -static __always_inline bool arch_static_branch(struct jump_label_key *key) +static __always_inline bool arch_static_branch(struct static_key *key) { asm goto("1:\n\t" "nop\n\t" diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h index 95a6cf2..6c32190 100644 --- a/arch/s390/include/asm/jump_label.h +++ b/arch/s390/include/asm/jump_label.h @@ -13,7 +13,7 @@ #define ASM_ALIGN ".balign 4" #endif -static __always_inline bool arch_static_branch(struct jump_label_key *key) +static __always_inline bool arch_static_branch(struct static_key *key) { asm goto("0: brcl 0,0\n" ".pushsection __jump_table, \"aw\"\n" diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h index fc73a82..5080d16 100644 --- a/arch/sparc/include/asm/jump_label.h +++ b/arch/sparc/include/asm/jump_label.h @@ -7,7 +7,7 @@ #define JUMP_LABEL_NOP_SIZE 4 -static __always_inline bool arch_static_branch(struct jump_label_key *key) +static __always_inline bool arch_static_branch(struct static_key *key) { asm goto("1:\n\t" "nop\n\t" diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index a32b18c..3a16c14 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h @@ -9,12 +9,12 @@ #define JUMP_LABEL_NOP_SIZE 5 -#define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" +#define STATIC_KEY_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" -static __always_inline bool arch_static_branch(struct jump_label_key *key) +static __always_inline bool arch_static_branch(struct static_key *key) { asm goto("1:" - JUMP_LABEL_INITIAL_NOP + STATIC_KEY_INITIAL_NOP ".pushsection __jump_table, \"aw\" \n\t" _ASM_ALIGN "\n\t" _ASM_PTR "1b, %l[l_yes], %c0 \n\t" diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index a7d2db9..c0180fd 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -230,9 +230,9 @@ static inline unsigned long long paravirt_sched_clock(void) return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock); } -struct jump_label_key; -extern struct jump_label_key paravirt_steal_enabled; -extern struct jump_label_key paravirt_steal_rq_enabled; +struct static_key; +extern struct static_key paravirt_steal_enabled; +extern struct static_key paravirt_steal_rq_enabled; static inline u64 paravirt_steal_clock(int cpu) { diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index f0c6fd6..694d801 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -438,9 +438,9 @@ void __init kvm_guest_init(void) static __init int activate_jump_labels(void) { if (has_steal_clock) { - jump_label_inc(¶virt_steal_enabled); + static_key_slow_inc(¶virt_steal_enabled); if (steal_acc) - jump_label_inc(¶virt_steal_rq_enabled); + static_key_slow_inc(¶virt_steal_rq_enabled); } return 0; diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index d90272e..ada2f99 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -202,8 +202,8 @@ static void native_flush_tlb_single(unsigned long addr) __native_flush_tlb_single(addr); } -struct jump_label_key paravirt_steal_enabled; -struct jump_label_key paravirt_steal_rq_enabled; +struct static_key paravirt_steal_enabled; +struct static_key paravirt_steal_rq_enabled; static u64 native_steal_clock(int cpu) { diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c index fe15dcc..ea7b4fd 100644 --- a/arch/x86/kvm/mmu_audit.c +++ b/arch/x86/kvm/mmu_audit.c @@ -234,7 +234,7 @@ static void audit_vcpu_spte(struct kvm_vcpu *vcpu) } static bool mmu_audit; -static struct jump_label_key mmu_audit_key; +static struct static_key mmu_audit_key; static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { @@ -250,7 +250,7 @@ static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { - if (static_branch((&mmu_audit_key))) + if (static_key_false((&mmu_audit_key))) __kvm_mmu_audit(vcpu, point); } @@ -259,7 +259,7 @@ static void mmu_audit_enable(void) if (mmu_audit) return; - jump_label_inc(&mmu_audit_key); + static_key_slow_inc(&mmu_audit_key); mmu_audit = true; } @@ -268,7 +268,7 @@ static void mmu_audit_disable(void) if (!mmu_audit) return; - jump_label_dec(&mmu_audit_key); + static_key_slow_dec(&mmu_audit_key); mmu_audit = false; } diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index f7c6958..2172da2 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -9,15 +9,15 @@ * * Jump labels provide an interface to generate dynamic branches using * self-modifying code. Assuming toolchain and architecture support the result - * of a "if (static_branch(&key))" statement is a unconditional branch (which + * of a "if (static_key_false(&key))" statement is a unconditional branch (which * defaults to false - and the true block is placed out of line). * - * However at runtime we can change the 'static' branch target using - * jump_label_{inc,dec}(). These function as a 'reference' count on the key + * However at runtime we can change the branch target using + * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key * object and for as long as there are references all branches referring to * that particular key will point to the (out of line) true block. * - * Since this relies on modifying code the jump_label_{inc,dec}() functions + * Since this relies on modifying code the static_key_slow_{inc,dec}() functions * must be considered absolute slow paths (machine wide synchronization etc.). * OTOH, since the affected branches are unconditional their runtime overhead * will be absolutely minimal, esp. in the default (off) case where the total @@ -26,12 +26,26 @@ * * When the control is directly exposed to userspace it is prudent to delay the * decrement to avoid high frequency code modifications which can (and do) - * cause significant performance degradation. Struct jump_label_key_deferred and - * jump_label_dec_deferred() provide for this. + * cause significant performance degradation. Struct static_key_deferred and + * static_key_slow_dec_deferred() provide for this. * * Lacking toolchain and or architecture support, it falls back to a simple * conditional branch. - */ + * + * struct static_key my_key = STATIC_KEY_INIT_TRUE; + * + * if (static_key_true(&my_key)) { + * } + * + * will result in the true case being in-line and starts the key with a single + * reference. Mixing static_key_true() and static_key_false() on the same key is not + * allowed. + * + * Not initializing the key (static data is initialized to 0s anyway) is the + * same as using STATIC_KEY_INIT_FALSE and static_key_false() is + * equivalent with static_branch(). + * +*/ #include #include @@ -39,16 +53,17 @@ #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) -struct jump_label_key { +struct static_key { atomic_t enabled; +/* Set lsb bit to 1 if branch is default true, 0 ot */ struct jump_entry *entries; #ifdef CONFIG_MODULES - struct jump_label_mod *next; + struct static_key_mod *next; #endif }; -struct jump_label_key_deferred { - struct jump_label_key key; +struct static_key_deferred { + struct static_key key; unsigned long timeout; struct delayed_work work; }; @@ -66,13 +81,34 @@ struct module; #ifdef HAVE_JUMP_LABEL -#ifdef CONFIG_MODULES -#define JUMP_LABEL_INIT {ATOMIC_INIT(0), NULL, NULL} -#else -#define JUMP_LABEL_INIT {ATOMIC_INIT(0), NULL} -#endif +#define JUMP_LABEL_TRUE_BRANCH 1UL + +static +inline struct jump_entry *jump_label_get_entries(struct static_key *key) +{ + return (struct jump_entry *)((unsigned long)key->entries + & ~JUMP_LABEL_TRUE_BRANCH); +} + +static inline bool jump_label_get_branch_default(struct static_key *key) +{ + if ((unsigned long)key->entries & JUMP_LABEL_TRUE_BRANCH) + return true; + return false; +} + +static __always_inline bool static_key_false(struct static_key *key) +{ + return arch_static_branch(key); +} -static __always_inline bool static_branch(struct jump_label_key *key) +static __always_inline bool static_key_true(struct static_key *key) +{ + return !static_key_false(key); +} + +/* Deprecated. Please use 'static_key_false() instead. */ +static __always_inline bool static_branch(struct static_key *key) { return arch_static_branch(key); } @@ -88,21 +124,24 @@ extern void arch_jump_label_transform(struct jump_entry *entry, extern void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type); extern int jump_label_text_reserved(void *start, void *end); -extern void jump_label_inc(struct jump_label_key *key); -extern void jump_label_dec(struct jump_label_key *key); -extern void jump_label_dec_deferred(struct jump_label_key_deferred *key); -extern bool jump_label_enabled(struct jump_label_key *key); +extern void static_key_slow_inc(struct static_key *key); +extern void static_key_slow_dec(struct static_key *key); +extern void static_key_slow_dec_deferred(struct static_key_deferred *key); +extern bool static_key_enabled(struct static_key *key); extern void jump_label_apply_nops(struct module *mod); -extern void jump_label_rate_limit(struct jump_label_key_deferred *key, - unsigned long rl); +extern void +jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl); + +#define STATIC_KEY_INIT_TRUE ((struct static_key) \ + { .enabled = ATOMIC_INIT(1), .entries = (void *)1 }) +#define STATIC_KEY_INIT_FALSE ((struct static_key) \ + { .enabled = ATOMIC_INIT(0), .entries = (void *)0 }) #else /* !HAVE_JUMP_LABEL */ #include -#define JUMP_LABEL_INIT {ATOMIC_INIT(0)} - -struct jump_label_key { +struct static_key { atomic_t enabled; }; @@ -110,30 +149,45 @@ static __always_inline void jump_label_init(void) { } -struct jump_label_key_deferred { - struct jump_label_key key; +struct static_key_deferred { + struct static_key key; }; -static __always_inline bool static_branch(struct jump_label_key *key) +static __always_inline bool static_key_false(struct static_key *key) +{ + if (unlikely(atomic_read(&key->enabled)) > 0) + return true; + return false; +} + +static __always_inline bool static_key_true(struct static_key *key) { - if (unlikely(atomic_read(&key->enabled))) + if (likely(atomic_read(&key->enabled)) > 0) return true; return false; } -static inline void jump_label_inc(struct jump_label_key *key) +/* Deprecated. Please use 'static_key_false() instead. */ +static __always_inline bool static_branch(struct static_key *key) +{ + if (unlikely(atomic_read(&key->enabled)) > 0) + return true; + return false; +} + +static inline void static_key_slow_inc(struct static_key *key) { atomic_inc(&key->enabled); } -static inline void jump_label_dec(struct jump_label_key *key) +static inline void static_key_slow_dec(struct static_key *key) { atomic_dec(&key->enabled); } -static inline void jump_label_dec_deferred(struct jump_label_key_deferred *key) +static inline void static_key_slow_dec_deferred(struct static_key_deferred *key) { - jump_label_dec(&key->key); + static_key_slow_dec(&key->key); } static inline int jump_label_text_reserved(void *start, void *end) @@ -144,9 +198,9 @@ static inline int jump_label_text_reserved(void *start, void *end) static inline void jump_label_lock(void) {} static inline void jump_label_unlock(void) {} -static inline bool jump_label_enabled(struct jump_label_key *key) +static inline bool static_key_enabled(struct static_key *key) { - return !!atomic_read(&key->enabled); + return (atomic_read(&key->enabled) > 0); } static inline int jump_label_apply_nops(struct module *mod) @@ -154,13 +208,20 @@ static inline int jump_label_apply_nops(struct module *mod) return 0; } -static inline void jump_label_rate_limit(struct jump_label_key_deferred *key, +static inline void +jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl) { } + +#define STATIC_KEY_INIT_TRUE ((struct static_key) \ + { .enabled = ATOMIC_INIT(1) }) +#define STATIC_KEY_INIT_FALSE ((struct static_key) \ + { .enabled = ATOMIC_INIT(0) }) + #endif /* HAVE_JUMP_LABEL */ -#define jump_label_key_enabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(1), }) -#define jump_label_key_disabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(0), }) +#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE +#define jump_label_enabled static_key_enabled #endif /* _LINUX_JUMP_LABEL_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 0eac07c..7dfaae7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -214,8 +214,8 @@ enum { #include #ifdef CONFIG_RPS -#include -extern struct jump_label_key rps_needed; +#include +extern struct static_key rps_needed; #endif struct neighbour; diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index b809265..29734be 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -163,13 +163,13 @@ extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[]; extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; #if defined(CONFIG_JUMP_LABEL) -#include -extern struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; +#include +extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) { if (__builtin_constant_p(pf) && __builtin_constant_p(hook)) - return static_branch(&nf_hooks_needed[pf][hook]); + return static_key_false(&nf_hooks_needed[pf][hook]); return !list_empty(&nf_hooks[pf][hook]); } diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 412b790..0d21e6f 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -514,7 +514,7 @@ struct perf_guest_info_callbacks { #include #include #include -#include +#include #include #include @@ -1038,7 +1038,7 @@ static inline int is_software_event(struct perf_event *event) return event->pmu->task_ctx_nr == perf_sw_context; } -extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; +extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); @@ -1066,7 +1066,7 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { struct pt_regs hot_regs; - if (static_branch(&perf_swevent_enabled[event_id])) { + if (static_key_false(&perf_swevent_enabled[event_id])) { if (!regs) { perf_fetch_caller_regs(&hot_regs); regs = &hot_regs; @@ -1075,12 +1075,12 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) } } -extern struct jump_label_key_deferred perf_sched_events; +extern struct static_key_deferred perf_sched_events; static inline void perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task) { - if (static_branch(&perf_sched_events.key)) + if (static_key_false(&perf_sched_events.key)) __perf_event_task_sched_in(prev, task); } @@ -1089,7 +1089,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev, { perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); - if (static_branch(&perf_sched_events.key)) + if (static_key_false(&perf_sched_events.key)) __perf_event_task_sched_out(prev, next); } diff --git a/include/linux/static_key.h b/include/linux/static_key.h new file mode 100644 index 0000000..27bd3f8 --- /dev/null +++ b/include/linux/static_key.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index fc36da9..bd96ecd 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -17,7 +17,7 @@ #include #include #include -#include +#include struct module; struct tracepoint; @@ -29,7 +29,7 @@ struct tracepoint_func { struct tracepoint { const char *name; /* Tracepoint name */ - struct jump_label_key key; + struct static_key key; void (*regfunc)(void); void (*unregfunc)(void); struct tracepoint_func __rcu *funcs; @@ -145,7 +145,7 @@ static inline void tracepoint_synchronize_unregister(void) extern struct tracepoint __tracepoint_##name; \ static inline void trace_##name(proto) \ { \ - if (static_branch(&__tracepoint_##name.key)) \ + if (static_key_false(&__tracepoint_##name.key)) \ __DO_TRACE(&__tracepoint_##name, \ TP_PROTO(data_proto), \ TP_ARGS(data_args), \ @@ -188,7 +188,7 @@ static inline void tracepoint_synchronize_unregister(void) __attribute__((section("__tracepoints_strings"))) = #name; \ struct tracepoint __tracepoint_##name \ __attribute__((section("__tracepoints"))) = \ - { __tpstrtab_##name, JUMP_LABEL_INIT, reg, unreg, NULL };\ + { __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\ static struct tracepoint * const __tracepoint_ptr_##name __used \ __attribute__((section("__tracepoints_ptrs"))) = \ &__tracepoint_##name; diff --git a/include/net/sock.h b/include/net/sock.h index 91c1c8b..dcde2d9 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -55,7 +55,7 @@ #include #include #include -#include +#include #include #include @@ -924,13 +924,13 @@ inline void sk_refcnt_debug_release(const struct sock *sk) #endif /* SOCK_REFCNT_DEBUG */ #if defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) && defined(CONFIG_NET) -extern struct jump_label_key memcg_socket_limit_enabled; +extern struct static_key memcg_socket_limit_enabled; static inline struct cg_proto *parent_cg_proto(struct proto *proto, struct cg_proto *cg_proto) { return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg)); } -#define mem_cgroup_sockets_enabled static_branch(&memcg_socket_limit_enabled) +#define mem_cgroup_sockets_enabled static_key_false(&memcg_socket_limit_enabled) #else #define mem_cgroup_sockets_enabled 0 static inline struct cg_proto *parent_cg_proto(struct proto *proto, diff --git a/kernel/events/core.c b/kernel/events/core.c index 7c3b9de..5e0f8bb 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -128,7 +128,7 @@ enum event_type_t { * perf_sched_events : >0 events exist * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu */ -struct jump_label_key_deferred perf_sched_events __read_mostly; +struct static_key_deferred perf_sched_events __read_mostly; static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); static atomic_t nr_mmap_events __read_mostly; @@ -2769,7 +2769,7 @@ static void free_event(struct perf_event *event) if (!event->parent) { if (event->attach_state & PERF_ATTACH_TASK) - jump_label_dec_deferred(&perf_sched_events); + static_key_slow_dec_deferred(&perf_sched_events); if (event->attr.mmap || event->attr.mmap_data) atomic_dec(&nr_mmap_events); if (event->attr.comm) @@ -2780,7 +2780,7 @@ static void free_event(struct perf_event *event) put_callchain_buffers(); if (is_cgroup_event(event)) { atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); - jump_label_dec_deferred(&perf_sched_events); + static_key_slow_dec_deferred(&perf_sched_events); } } @@ -4982,7 +4982,7 @@ fail: return err; } -struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; +struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; static void sw_perf_event_destroy(struct perf_event *event) { @@ -4990,7 +4990,7 @@ static void sw_perf_event_destroy(struct perf_event *event) WARN_ON(event->parent); - jump_label_dec(&perf_swevent_enabled[event_id]); + static_key_slow_dec(&perf_swevent_enabled[event_id]); swevent_hlist_put(event); } @@ -5020,7 +5020,7 @@ static int perf_swevent_init(struct perf_event *event) if (err) return err; - jump_label_inc(&perf_swevent_enabled[event_id]); + static_key_slow_inc(&perf_swevent_enabled[event_id]); event->destroy = sw_perf_event_destroy; } @@ -5843,7 +5843,7 @@ done: if (!event->parent) { if (event->attach_state & PERF_ATTACH_TASK) - jump_label_inc(&perf_sched_events.key); + static_key_slow_inc(&perf_sched_events.key); if (event->attr.mmap || event->attr.mmap_data) atomic_inc(&nr_mmap_events); if (event->attr.comm) @@ -6081,7 +6081,7 @@ SYSCALL_DEFINE5(perf_event_open, * - that may need work on context switch */ atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); - jump_label_inc(&perf_sched_events.key); + static_key_slow_inc(&perf_sched_events.key); } /* diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 543782e..bf9dcad 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #ifdef HAVE_JUMP_LABEL @@ -29,10 +29,11 @@ void jump_label_unlock(void) mutex_unlock(&jump_label_mutex); } -bool jump_label_enabled(struct jump_label_key *key) +bool static_key_enabled(struct static_key *key) { - return !!atomic_read(&key->enabled); + return (atomic_read(&key->enabled) > 0); } +EXPORT_SYMBOL_GPL(static_key_enabled); static int jump_label_cmp(const void *a, const void *b) { @@ -58,22 +59,26 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); } -static void jump_label_update(struct jump_label_key *key, int enable); +static void jump_label_update(struct static_key *key, int enable); -void jump_label_inc(struct jump_label_key *key) +void static_key_slow_inc(struct static_key *key) { if (atomic_inc_not_zero(&key->enabled)) return; jump_label_lock(); - if (atomic_read(&key->enabled) == 0) - jump_label_update(key, JUMP_LABEL_ENABLE); + if (atomic_read(&key->enabled) == 0) { + if (!jump_label_get_branch_default(key)) + jump_label_update(key, JUMP_LABEL_ENABLE); + else + jump_label_update(key, JUMP_LABEL_DISABLE); + } atomic_inc(&key->enabled); jump_label_unlock(); } -EXPORT_SYMBOL_GPL(jump_label_inc); +EXPORT_SYMBOL_GPL(static_key_slow_inc); -static void __jump_label_dec(struct jump_label_key *key, +static void __static_key_slow_dec(struct static_key *key, unsigned long rate_limit, struct delayed_work *work) { if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { @@ -85,32 +90,35 @@ static void __jump_label_dec(struct jump_label_key *key, if (rate_limit) { atomic_inc(&key->enabled); schedule_delayed_work(work, rate_limit); - } else - jump_label_update(key, JUMP_LABEL_DISABLE); - + } else { + if (!jump_label_get_branch_default(key)) + jump_label_update(key, JUMP_LABEL_DISABLE); + else + jump_label_update(key, JUMP_LABEL_ENABLE); + } jump_label_unlock(); } -EXPORT_SYMBOL_GPL(jump_label_dec); static void jump_label_update_timeout(struct work_struct *work) { - struct jump_label_key_deferred *key = - container_of(work, struct jump_label_key_deferred, work.work); - __jump_label_dec(&key->key, 0, NULL); + struct static_key_deferred *key = + container_of(work, struct static_key_deferred, work.work); + __static_key_slow_dec(&key->key, 0, NULL); } -void jump_label_dec(struct jump_label_key *key) +void static_key_slow_dec(struct static_key *key) { - __jump_label_dec(key, 0, NULL); + __static_key_slow_dec(key, 0, NULL); } +EXPORT_SYMBOL_GPL(static_key_slow_dec); -void jump_label_dec_deferred(struct jump_label_key_deferred *key) +void static_key_slow_dec_deferred(struct static_key_deferred *key) { - __jump_label_dec(&key->key, key->timeout, &key->work); + __static_key_slow_dec(&key->key, key->timeout, &key->work); } +EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred); - -void jump_label_rate_limit(struct jump_label_key_deferred *key, +void jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl) { key->timeout = rl; @@ -153,7 +161,7 @@ void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry arch_jump_label_transform(entry, type); } -static void __jump_label_update(struct jump_label_key *key, +static void __jump_label_update(struct static_key *key, struct jump_entry *entry, struct jump_entry *stop, int enable) { @@ -170,27 +178,40 @@ static void __jump_label_update(struct jump_label_key *key, } } +static enum jump_label_type jump_label_type(struct static_key *key) +{ + bool true_branch = jump_label_get_branch_default(key); + bool state = static_key_enabled(key); + + if ((!true_branch && state) || (true_branch && !state)) + return JUMP_LABEL_ENABLE; + + return JUMP_LABEL_DISABLE; +} + void __init jump_label_init(void) { struct jump_entry *iter_start = __start___jump_table; struct jump_entry *iter_stop = __stop___jump_table; - struct jump_label_key *key = NULL; + struct static_key *key = NULL; struct jump_entry *iter; jump_label_lock(); jump_label_sort_entries(iter_start, iter_stop); for (iter = iter_start; iter < iter_stop; iter++) { - struct jump_label_key *iterk; + struct static_key *iterk; - iterk = (struct jump_label_key *)(unsigned long)iter->key; - arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ? - JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE); + iterk = (struct static_key *)(unsigned long)iter->key; + arch_jump_label_transform_static(iter, jump_label_type(iterk)); if (iterk == key) continue; key = iterk; - key->entries = iter; + /* + * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. + */ + *((unsigned long *)&key->entries) += (unsigned long)iter; #ifdef CONFIG_MODULES key->next = NULL; #endif @@ -200,8 +221,8 @@ void __init jump_label_init(void) #ifdef CONFIG_MODULES -struct jump_label_mod { - struct jump_label_mod *next; +struct static_key_mod { + struct static_key_mod *next; struct jump_entry *entries; struct module *mod; }; @@ -221,9 +242,9 @@ static int __jump_label_mod_text_reserved(void *start, void *end) start, end); } -static void __jump_label_mod_update(struct jump_label_key *key, int enable) +static void __jump_label_mod_update(struct static_key *key, int enable) { - struct jump_label_mod *mod = key->next; + struct static_key_mod *mod = key->next; while (mod) { struct module *m = mod->mod; @@ -254,11 +275,7 @@ void jump_label_apply_nops(struct module *mod) return; for (iter = iter_start; iter < iter_stop; iter++) { - struct jump_label_key *iterk; - - iterk = (struct jump_label_key *)(unsigned long)iter->key; - arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ? - JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE); + arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE); } } @@ -267,8 +284,8 @@ static int jump_label_add_module(struct module *mod) struct jump_entry *iter_start = mod->jump_entries; struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; struct jump_entry *iter; - struct jump_label_key *key = NULL; - struct jump_label_mod *jlm; + struct static_key *key = NULL; + struct static_key_mod *jlm; /* if the module doesn't have jump label entries, just return */ if (iter_start == iter_stop) @@ -277,28 +294,30 @@ static int jump_label_add_module(struct module *mod) jump_label_sort_entries(iter_start, iter_stop); for (iter = iter_start; iter < iter_stop; iter++) { - if (iter->key == (jump_label_t)(unsigned long)key) - continue; + struct static_key *iterk; - key = (struct jump_label_key *)(unsigned long)iter->key; + iterk = (struct static_key *)(unsigned long)iter->key; + if (iterk == key) + continue; + key = iterk; if (__module_address(iter->key) == mod) { - atomic_set(&key->enabled, 0); - key->entries = iter; + /* + * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. + */ + *((unsigned long *)&key->entries) += (unsigned long)iter; key->next = NULL; continue; } - - jlm = kzalloc(sizeof(struct jump_label_mod), GFP_KERNEL); + jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL); if (!jlm) return -ENOMEM; - jlm->mod = mod; jlm->entries = iter; jlm->next = key->next; key->next = jlm; - if (jump_label_enabled(key)) + if (jump_label_type(key) == JUMP_LABEL_ENABLE) __jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE); } @@ -310,14 +329,14 @@ static void jump_label_del_module(struct module *mod) struct jump_entry *iter_start = mod->jump_entries; struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; struct jump_entry *iter; - struct jump_label_key *key = NULL; - struct jump_label_mod *jlm, **prev; + struct static_key *key = NULL; + struct static_key_mod *jlm, **prev; for (iter = iter_start; iter < iter_stop; iter++) { if (iter->key == (jump_label_t)(unsigned long)key) continue; - key = (struct jump_label_key *)(unsigned long)iter->key; + key = (struct static_key *)(unsigned long)iter->key; if (__module_address(iter->key) == mod) continue; @@ -419,9 +438,10 @@ int jump_label_text_reserved(void *start, void *end) return ret; } -static void jump_label_update(struct jump_label_key *key, int enable) +static void jump_label_update(struct static_key *key, int enable) { - struct jump_entry *entry = key->entries, *stop = __stop___jump_table; + struct jump_entry *stop = __stop___jump_table; + struct jump_entry *entry = jump_label_get_entries(key); #ifdef CONFIG_MODULES struct module *mod = __module_address((unsigned long)key); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5255c9d..112c682 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -162,13 +162,13 @@ static int sched_feat_show(struct seq_file *m, void *v) #ifdef HAVE_JUMP_LABEL -#define jump_label_key__true jump_label_key_enabled -#define jump_label_key__false jump_label_key_disabled +#define jump_label_key__true STATIC_KEY_INIT_TRUE +#define jump_label_key__false STATIC_KEY_INIT_FALSE #define SCHED_FEAT(name, enabled) \ jump_label_key__##enabled , -struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR] = { +struct static_key sched_feat_keys[__SCHED_FEAT_NR] = { #include "features.h" }; @@ -176,14 +176,14 @@ struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR] = { static void sched_feat_disable(int i) { - if (jump_label_enabled(&sched_feat_keys[i])) - jump_label_dec(&sched_feat_keys[i]); + if (static_key_enabled(&sched_feat_keys[i])) + static_key_slow_dec(&sched_feat_keys[i]); } static void sched_feat_enable(int i) { - if (!jump_label_enabled(&sched_feat_keys[i])) - jump_label_inc(&sched_feat_keys[i]); + if (!static_key_enabled(&sched_feat_keys[i])) + static_key_slow_inc(&sched_feat_keys[i]); } #else static void sched_feat_disable(int i) { }; @@ -894,7 +894,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) delta -= irq_delta; #endif #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING - if (static_branch((¶virt_steal_rq_enabled))) { + if (static_key_false((¶virt_steal_rq_enabled))) { u64 st; steal = paravirt_steal_clock(cpu_of(rq)); @@ -2756,7 +2756,7 @@ void account_idle_time(cputime_t cputime) static __always_inline bool steal_account_process_tick(void) { #ifdef CONFIG_PARAVIRT - if (static_branch(¶virt_steal_enabled)) { + if (static_key_false(¶virt_steal_enabled)) { u64 steal, st = 0; steal = paravirt_steal_clock(smp_processor_id()); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7c6414f..423547a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1399,20 +1399,20 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) #ifdef CONFIG_CFS_BANDWIDTH #ifdef HAVE_JUMP_LABEL -static struct jump_label_key __cfs_bandwidth_used; +static struct static_key __cfs_bandwidth_used; static inline bool cfs_bandwidth_used(void) { - return static_branch(&__cfs_bandwidth_used); + return static_key_false(&__cfs_bandwidth_used); } void account_cfs_bandwidth_used(int enabled, int was_enabled) { /* only need to count groups transitioning between enabled/!enabled */ if (enabled && !was_enabled) - jump_label_inc(&__cfs_bandwidth_used); + static_key_slow_inc(&__cfs_bandwidth_used); else if (!enabled && was_enabled) - jump_label_dec(&__cfs_bandwidth_used); + static_key_slow_dec(&__cfs_bandwidth_used); } #else /* HAVE_JUMP_LABEL */ static bool cfs_bandwidth_used(void) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 98c0c26..b4cd6d8 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -611,7 +611,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) * Tunables that become constants when CONFIG_SCHED_DEBUG is off: */ #ifdef CONFIG_SCHED_DEBUG -# include +# include # define const_debug __read_mostly #else # define const_debug const @@ -630,18 +630,18 @@ enum { #undef SCHED_FEAT #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) -static __always_inline bool static_branch__true(struct jump_label_key *key) +static __always_inline bool static_branch__true(struct static_key *key) { - return likely(static_branch(key)); /* Not out of line branch. */ + return static_key_true(key); /* Not out of line branch. */ } -static __always_inline bool static_branch__false(struct jump_label_key *key) +static __always_inline bool static_branch__false(struct static_key *key) { - return unlikely(static_branch(key)); /* Out of line branch. */ + return static_key_false(key); /* Out of line branch. */ } #define SCHED_FEAT(name, enabled) \ -static __always_inline bool static_branch_##name(struct jump_label_key *key) \ +static __always_inline bool static_branch_##name(struct static_key *key) \ { \ return static_branch__##enabled(key); \ } @@ -650,7 +650,7 @@ static __always_inline bool static_branch_##name(struct jump_label_key *key) \ #undef SCHED_FEAT -extern struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR]; +extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index f1539de..d96ba22 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include extern struct tracepoint * const __start___tracepoints_ptrs[]; extern struct tracepoint * const __stop___tracepoints_ptrs[]; @@ -256,9 +256,9 @@ static void set_tracepoint(struct tracepoint_entry **entry, { WARN_ON(strcmp((*entry)->name, elem->name) != 0); - if (elem->regfunc && !jump_label_enabled(&elem->key) && active) + if (elem->regfunc && !static_key_enabled(&elem->key) && active) elem->regfunc(); - else if (elem->unregfunc && jump_label_enabled(&elem->key) && !active) + else if (elem->unregfunc && static_key_enabled(&elem->key) && !active) elem->unregfunc(); /* @@ -269,10 +269,10 @@ static void set_tracepoint(struct tracepoint_entry **entry, * is used. */ rcu_assign_pointer(elem->funcs, (*entry)->funcs); - if (active && !jump_label_enabled(&elem->key)) - jump_label_inc(&elem->key); - else if (!active && jump_label_enabled(&elem->key)) - jump_label_dec(&elem->key); + if (active && !static_key_enabled(&elem->key)) + static_key_slow_inc(&elem->key); + else if (!active && static_key_enabled(&elem->key)) + static_key_slow_dec(&elem->key); } /* @@ -283,11 +283,11 @@ static void set_tracepoint(struct tracepoint_entry **entry, */ static void disable_tracepoint(struct tracepoint *elem) { - if (elem->unregfunc && jump_label_enabled(&elem->key)) + if (elem->unregfunc && static_key_enabled(&elem->key)) elem->unregfunc(); - if (jump_label_enabled(&elem->key)) - jump_label_dec(&elem->key); + if (static_key_enabled(&elem->key)) + static_key_slow_dec(&elem->key); rcu_assign_pointer(elem->funcs, NULL); } diff --git a/net/core/dev.c b/net/core/dev.c index 115dee1..da7ce7f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -134,7 +134,7 @@ #include #include #include -#include +#include #include #include "net-sysfs.h" @@ -1441,11 +1441,11 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev) } EXPORT_SYMBOL(call_netdevice_notifiers); -static struct jump_label_key netstamp_needed __read_mostly; +static struct static_key netstamp_needed __read_mostly; #ifdef HAVE_JUMP_LABEL -/* We are not allowed to call jump_label_dec() from irq context +/* We are not allowed to call static_key_slow_dec() from irq context * If net_disable_timestamp() is called from irq context, defer the - * jump_label_dec() calls. + * static_key_slow_dec() calls. */ static atomic_t netstamp_needed_deferred; #endif @@ -1457,12 +1457,12 @@ void net_enable_timestamp(void) if (deferred) { while (--deferred) - jump_label_dec(&netstamp_needed); + static_key_slow_dec(&netstamp_needed); return; } #endif WARN_ON(in_interrupt()); - jump_label_inc(&netstamp_needed); + static_key_slow_inc(&netstamp_needed); } EXPORT_SYMBOL(net_enable_timestamp); @@ -1474,19 +1474,19 @@ void net_disable_timestamp(void) return; } #endif - jump_label_dec(&netstamp_needed); + static_key_slow_dec(&netstamp_needed); } EXPORT_SYMBOL(net_disable_timestamp); static inline void net_timestamp_set(struct sk_buff *skb) { skb->tstamp.tv64 = 0; - if (static_branch(&netstamp_needed)) + if (static_key_false(&netstamp_needed)) __net_timestamp(skb); } #define net_timestamp_check(COND, SKB) \ - if (static_branch(&netstamp_needed)) { \ + if (static_key_false(&netstamp_needed)) { \ if ((COND) && !(SKB)->tstamp.tv64) \ __net_timestamp(SKB); \ } \ @@ -2660,7 +2660,7 @@ EXPORT_SYMBOL(__skb_get_rxhash); struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; EXPORT_SYMBOL(rps_sock_flow_table); -struct jump_label_key rps_needed __read_mostly; +struct static_key rps_needed __read_mostly; static struct rps_dev_flow * set_rps_cpu(struct net_device *dev, struct sk_buff *skb, @@ -2945,7 +2945,7 @@ int netif_rx(struct sk_buff *skb) trace_netif_rx(skb); #ifdef CONFIG_RPS - if (static_branch(&rps_needed)) { + if (static_key_false(&rps_needed)) { struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu; @@ -3309,7 +3309,7 @@ int netif_receive_skb(struct sk_buff *skb) return NET_RX_SUCCESS; #ifdef CONFIG_RPS - if (static_branch(&rps_needed)) { + if (static_key_false(&rps_needed)) { struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu, ret; diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index a1727cd..4955862 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -608,10 +608,10 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, spin_unlock(&rps_map_lock); if (map) - jump_label_inc(&rps_needed); + static_key_slow_inc(&rps_needed); if (old_map) { kfree_rcu(old_map, rcu); - jump_label_dec(&rps_needed); + static_key_slow_dec(&rps_needed); } free_cpumask_var(mask); return len; diff --git a/net/core/sock.c b/net/core/sock.c index 3e81fd2..3a4e581 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -111,7 +111,7 @@ #include #include #include -#include +#include #include #include @@ -184,7 +184,7 @@ void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss) static struct lock_class_key af_family_keys[AF_MAX]; static struct lock_class_key af_family_slock_keys[AF_MAX]; -struct jump_label_key memcg_socket_limit_enabled; +struct static_key memcg_socket_limit_enabled; EXPORT_SYMBOL(memcg_socket_limit_enabled); /* diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index d05559d..0c28508 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -69,9 +69,9 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write, if (sock_table != orig_sock_table) { rcu_assign_pointer(rps_sock_flow_table, sock_table); if (sock_table) - jump_label_inc(&rps_needed); + static_key_slow_inc(&rps_needed); if (orig_sock_table) { - jump_label_dec(&rps_needed); + static_key_slow_dec(&rps_needed); synchronize_rcu(); vfree(orig_sock_table); } diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c index 4997878..602fb30 100644 --- a/net/ipv4/tcp_memcontrol.c +++ b/net/ipv4/tcp_memcontrol.c @@ -111,7 +111,7 @@ void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss) val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT); if (val != RESOURCE_MAX) - jump_label_dec(&memcg_socket_limit_enabled); + static_key_slow_dec(&memcg_socket_limit_enabled); } EXPORT_SYMBOL(tcp_destroy_cgroup); @@ -143,9 +143,9 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) net->ipv4.sysctl_tcp_mem[i]); if (val == RESOURCE_MAX && old_lim != RESOURCE_MAX) - jump_label_dec(&memcg_socket_limit_enabled); + static_key_slow_dec(&memcg_socket_limit_enabled); else if (old_lim == RESOURCE_MAX && val != RESOURCE_MAX) - jump_label_inc(&memcg_socket_limit_enabled); + static_key_slow_inc(&memcg_socket_limit_enabled); return 0; } diff --git a/net/netfilter/core.c b/net/netfilter/core.c index b4e8ff0..e1b7e05 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -56,7 +56,7 @@ struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly; EXPORT_SYMBOL(nf_hooks); #if defined(CONFIG_JUMP_LABEL) -struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; +struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; EXPORT_SYMBOL(nf_hooks_needed); #endif @@ -77,7 +77,7 @@ int nf_register_hook(struct nf_hook_ops *reg) list_add_rcu(®->list, elem->list.prev); mutex_unlock(&nf_hook_mutex); #if defined(CONFIG_JUMP_LABEL) - jump_label_inc(&nf_hooks_needed[reg->pf][reg->hooknum]); + static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]); #endif return 0; } @@ -89,7 +89,7 @@ void nf_unregister_hook(struct nf_hook_ops *reg) list_del_rcu(®->list); mutex_unlock(&nf_hook_mutex); #if defined(CONFIG_JUMP_LABEL) - jump_label_dec(&nf_hooks_needed[reg->pf][reg->hooknum]); + static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]); #endif synchronize_net(); } -- cgit v0.10.2 From 8eedce996556d7d06522cd3a0e6069141c8dffe0 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Tue, 28 Feb 2012 13:49:01 -0500 Subject: static keys: Inline the static_key_enabled() function In the jump label enabled case, calling static_key_enabled() results in a function call. The function returns the results of a compare, so it really doesn't need the overhead of a full function call. Let's make it 'static inline' for both the jump label enabled and disabled cases. Signed-off-by: Jason Baron Cc: a.p.zijlstra@chello.nl Cc: rostedt@goodmis.org Cc: mathieu.desnoyers@polymtl.ca Cc: Andrew Morton Cc: Linus Torvalds Link: http://lkml.kernel.org/r/201202281849.q1SIn1p2023270@int-mx10.intmail.prod.int.phx2.redhat.com Signed-off-by: Ingo Molnar diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 2172da2..c513a40 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -127,7 +127,6 @@ extern int jump_label_text_reserved(void *start, void *end); extern void static_key_slow_inc(struct static_key *key); extern void static_key_slow_dec(struct static_key *key); extern void static_key_slow_dec_deferred(struct static_key_deferred *key); -extern bool static_key_enabled(struct static_key *key); extern void jump_label_apply_nops(struct module *mod); extern void jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl); @@ -198,11 +197,6 @@ static inline int jump_label_text_reserved(void *start, void *end) static inline void jump_label_lock(void) {} static inline void jump_label_unlock(void) {} -static inline bool static_key_enabled(struct static_key *key) -{ - return (atomic_read(&key->enabled) > 0); -} - static inline int jump_label_apply_nops(struct module *mod) { return 0; @@ -224,4 +218,9 @@ jump_label_rate_limit(struct static_key_deferred *key, #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE #define jump_label_enabled static_key_enabled +static inline bool static_key_enabled(struct static_key *key) +{ + return (atomic_read(&key->enabled) > 0); +} + #endif /* _LINUX_JUMP_LABEL_H */ diff --git a/kernel/jump_label.c b/kernel/jump_label.c index bf9dcad..4304919 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -29,12 +29,6 @@ void jump_label_unlock(void) mutex_unlock(&jump_label_mutex); } -bool static_key_enabled(struct static_key *key) -{ - return (atomic_read(&key->enabled) > 0); -} -EXPORT_SYMBOL_GPL(static_key_enabled); - static int jump_label_cmp(const void *a, const void *b) { const struct jump_entry *jea = a; -- cgit v0.10.2 From 41c21a68308f2f268a01ec1a7fa5f650e320a0f3 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 23 Feb 2012 12:13:36 +0900 Subject: perf evlist: Restore original errno after open failed If perf_evsel__open() failed, the errno was set and returned properly. However since the perf_evlist__open() called close() on fd's for all of evsel x cpu x thread after the failure, the errno was overridden by other code (EBADF). So the caller of the function ended up seeing different error message and getting confused. Fit it by restoring original return value. Because one of caller of the function is in the python extension, and it uses system errno internally, it'd be better restoring the original value rather than using the return value of the function directly, IMHO (i.e. I'm not a python expert :) Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1329966816-23175-1-git-send-email-namhyung.kim@lge.com Signed-off-by: Namhyung Kim Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index f8da9fa..159263d 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -765,6 +765,7 @@ out_err: list_for_each_entry_reverse(evsel, &evlist->entries, node) perf_evsel__close(evsel, ncpus, nthreads); + errno = -err; return err; } -- cgit v0.10.2 From 67cbbd7f1838cc8f6900f39d860d6c2e1f86cf61 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 23 Feb 2012 16:08:14 +0900 Subject: perf tools: Add descriptions of missing Makefile arguments There are some variable arguments can be specified on make invocation, but some of them are missing descriptions so that user cannot be informed easily. Fix it. Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1329980894-4289-1-git-send-email-namhyung.kim@lge.com Signed-off-by: Namhyung Kim Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/Makefile b/tools/perf/Makefile index e011b50..fa04340 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -15,6 +15,16 @@ endif # Define V to have a more verbose compile. # +# Define O to save output files in a separate directory. +# +# Define ARCH as name of target architecture if you want cross-builds. +# +# Define CROSS_COMPILE as prefix name of compiler if you want cross-builds. +# +# Define NO_LIBPERL to disable perl script extension. +# +# Define NO_LIBPYTHON to disable python script extension. +# # Define PYTHON to point to the python binary if the default # `python' is not correct; for example: PYTHON=python2 # @@ -32,6 +42,10 @@ endif # Define NO_DWARF if you do not want debug-info analysis feature at all. # # Define WERROR=0 to disable treating any warnings as errors. +# +# Define NO_NEWT if you do not want TUI support. +# +# Define NO_DEMANGLE if you do not want C++ symbol demangling. $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT) -- cgit v0.10.2 From 58e817d997d162682717dcaae39393dcadf5f296 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 23 Feb 2012 17:46:20 +0900 Subject: perf annotate: Print asm code as blue when source code is displayed Print unselected asm code lines as blue. This is what we do now for --stdio. Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1329986784-4916-2-git-send-email-namhyung.kim@lge.com Signed-off-by: Namhyung Kim Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c index 295a9c9..76caae9 100644 --- a/tools/perf/util/ui/browsers/annotate.c +++ b/tools/perf/util/ui/browsers/annotate.c @@ -69,14 +69,17 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro if (!self->navkeypressed) width += 1; + if (!ab->hide_src_code && ol->offset != -1) + if (!current_entry || (self->use_navkeypressed && + !self->navkeypressed)) + ui_browser__set_color(self, HE_COLORSET_CODE); + if (!*ol->line) slsmg_write_nstring(" ", width - 18); else slsmg_write_nstring(ol->line, width - 18); - if (!current_entry) - ui_browser__set_color(self, HE_COLORSET_CODE); - else + if (current_entry) ab->selection = ol; } -- cgit v0.10.2 From ef7c537221160c285704997a913fb66ddc1fe6bc Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 23 Feb 2012 17:46:21 +0900 Subject: perf annotate: Handle lower case key code in annotate_browser__run() Accepting upper case character only is unconvenient since it requires SHIFT key too. Why not change to it accept a simple key stroke? Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1329986784-4916-3-git-send-email-namhyung.kim@lge.com Signed-off-by: Namhyung Kim Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c index 76caae9..374d3bc5 100644 --- a/tools/perf/util/ui/browsers/annotate.c +++ b/tools/perf/util/ui/browsers/annotate.c @@ -287,9 +287,11 @@ static int annotate_browser__run(struct annotate_browser *self, int evidx, nd = self->curr_hot; break; case 'H': + case 'h': nd = self->curr_hot; break; case 'S': + case 's': if (annotate_browser__toggle_source(self)) ui_helpline__puts(help); continue; -- cgit v0.10.2 From 142cfbd0a0353bbcf4ce62f1d4b4ffaa5fe228f9 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 23 Feb 2012 17:46:22 +0900 Subject: perf annotate: Restore title when came back to original symbol On tui annotation, the title was set to name of the target symbol if user selects the target. However it remained after returning to original symbol from the target. Fix it. Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1329986784-4916-4-git-send-email-namhyung.kim@lge.com Signed-off-by: Namhyung Kim Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c index 374d3bc5..e733577 100644 --- a/tools/perf/util/ui/browsers/annotate.c +++ b/tools/perf/util/ui/browsers/annotate.c @@ -343,6 +343,7 @@ static int annotate_browser__run(struct annotate_browser *self, int evidx, pthread_mutex_unlock(¬es->lock); symbol__tui_annotate(target, ms->map, evidx, timer, arg, delay_secs); + ui_browser__show_title(&self->b, sym->name); } continue; case K_LEFT: -- cgit v0.10.2 From 824ac0e9838aadf5a398850c80ec2a43b9d0ca49 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 23 Feb 2012 17:46:23 +0900 Subject: perf annotate: Fix help string on tui Separate multiple binding using /, capitalize descriptions, add missing key binding. Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1329986784-4916-5-git-send-email-namhyung.kim@lge.com Signed-off-by: Namhyung Kim Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c index e733577..57a4c6e 100644 --- a/tools/perf/util/ui/browsers/annotate.c +++ b/tools/perf/util/ui/browsers/annotate.c @@ -233,9 +233,9 @@ static int annotate_browser__run(struct annotate_browser *self, int evidx, struct rb_node *nd = NULL; struct map_symbol *ms = self->b.priv; struct symbol *sym = ms->sym; - const char *help = "<-, ESC: exit, TAB/shift+TAB: cycle hottest lines, " - "H: Hottest, -> Line action, S -> Toggle source " - "code view"; + const char *help = "<-/ESC: Exit, TAB/shift+TAB: Cycle hot lines, " + "H: Go to hottest line, ->/ENTER: Line action, " + "S: Toggle source code view"; int key; if (ui_browser__show(&self->b, sym->name, help) < 0) -- cgit v0.10.2 From ff2a6617c2acd6a8dc468c90ab8708a6ad112bb0 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 23 Feb 2012 17:46:24 +0900 Subject: perf annotate: Add missing newline on error message If perf.data couldn't find vmlinux image for the given build-id, it would print error message. However it lacked a newline at the end, so the output looked like below: $ perf annotate --stdio No vmlinux file with build id 63b554b2e90f14a4bced200008865e757d3e8b36 was found in the path. Please use: perf buildid-cache -av vmlinux or: --vmlinux vmlinux Percent | Source code & Disassembly of a.out ------------------------------------------------ : : : : Disassembly of section .text: : : 00000000004004f4 : 0.00 : 4004f4: push %rbp 0.00 : 4004f5: mov %rsp,%rbp 0.00 : 4004f8: movl $0x0,-0x4(%rbp) 0.00 : 4004ff: jmp 400517 14.70 : 400501: mov 0x200b28(%rip),%rax # 601030 0.02 : 400508: add $0x1,%rax 0.01 : 40050c: mov %rax,0x200b1d(%rip) # 601030 0.01 : 400513: addl $0x1,-0x4(%rbp) 13.92 : 400517: cmpl $0x98967f,-0x4(%rbp) 71.33 : 40051e: jle 400501 0.00 : 400520: leaveq 0.00 : 400521: retq Fix it by adding a newline at the end of the message. It doesn't affect the tui output AFAICS. New output will look like this: ... or: --vmlinux vmlinux Percent | Source code & Disassembly of a.out ------------------------------------------------ : : : : Disassembly of section .text: ... Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1329986784-4916-6-git-send-email-namhyung.kim@lge.com Signed-off-by: Namhyung Kim Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 011ed26..e5a462f 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -315,7 +315,7 @@ fallback: "Please use:\n\n" " perf buildid-cache -av vmlinux\n\n" "or:\n\n" - " --vmlinux vmlinux", + " --vmlinux vmlinux\n", sym->name, build_id_msg ?: ""); goto out_free_filename; } -- cgit v0.10.2 From bce38cd53e5ddba9cb6d708c4ef3d04a4016ec7e Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 9 Feb 2012 23:20:51 +0100 Subject: perf: Add generic taken branch sampling support This patch adds the ability to sample taken branches to the perf_event interface. The ability to capture taken branches is very useful for all sorts of analysis. For instance, basic block profiling, call counts, statistical call graph. This new capability requires hardware assist and as such may not be available on all HW platforms. On Intel x86 it is implemented on top of the Last Branch Record (LBR) facility. To enable taken branches sampling, the PERF_SAMPLE_BRANCH_STACK bit must be set in attr->sample_type. Sampled taken branches may be filtered by type and/or priv levels. The patch adds a new field, called branch_sample_type, to the perf_event_attr structure. It contains a bitmask of filters to apply to the sampled taken branches. Filters may be implemented in HW. If the HW filter does not exist or is not good enough, some arch may also implement a SW filter. The following generic filters are currently defined: - PERF_SAMPLE_USER only branches whose targets are at the user level - PERF_SAMPLE_KERNEL only branches whose targets are at the kernel level - PERF_SAMPLE_HV only branches whose targets are at the hypervisor level - PERF_SAMPLE_ANY any type of branches (subject to priv levels filters) - PERF_SAMPLE_ANY_CALL any call branches (may incl. syscall on some arch) - PERF_SAMPLE_ANY_RET any return branches (may incl. syscall returns on some arch) - PERF_SAMPLE_IND_CALL indirect call branches Obviously filter may be combined. The priv level bits are optional. If not provided, the priv level of the associated event are used. It is possible to collect branches at a priv level different from the associated event. Use of kernel, hv priv levels is subject to permissions and availability (hv). The number of taken branch records present in each sample may vary based on HW, the type of sampled branches, the executed code. Therefore each sample contains the number of taken branches it contains. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1328826068-11713-2-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 47a7e63..309d0cc 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c @@ -142,9 +142,11 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc) rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr); - cpuc->lbr_entries[i].from = msr_lastbranch.from; - cpuc->lbr_entries[i].to = msr_lastbranch.to; - cpuc->lbr_entries[i].flags = 0; + cpuc->lbr_entries[i].from = msr_lastbranch.from; + cpuc->lbr_entries[i].to = msr_lastbranch.to; + cpuc->lbr_entries[i].mispred = 0; + cpuc->lbr_entries[i].predicted = 0; + cpuc->lbr_entries[i].reserved = 0; } cpuc->lbr_stack.nr = i; } @@ -165,19 +167,22 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) for (i = 0; i < x86_pmu.lbr_nr; i++) { unsigned long lbr_idx = (tos - i) & mask; - u64 from, to, flags = 0; + u64 from, to, mis = 0, pred = 0; rdmsrl(x86_pmu.lbr_from + lbr_idx, from); rdmsrl(x86_pmu.lbr_to + lbr_idx, to); if (lbr_format == LBR_FORMAT_EIP_FLAGS) { - flags = !!(from & LBR_FROM_FLAG_MISPRED); + mis = !!(from & LBR_FROM_FLAG_MISPRED); + pred = !mis; from = (u64)((((s64)from) << 1) >> 1); } - cpuc->lbr_entries[i].from = from; - cpuc->lbr_entries[i].to = to; - cpuc->lbr_entries[i].flags = flags; + cpuc->lbr_entries[i].from = from; + cpuc->lbr_entries[i].to = to; + cpuc->lbr_entries[i].mispred = mis; + cpuc->lbr_entries[i].predicted = pred; + cpuc->lbr_entries[i].reserved = 0; } cpuc->lbr_stack.nr = i; } diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 64426b7..5fc494f 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -129,11 +129,40 @@ enum perf_event_sample_format { PERF_SAMPLE_PERIOD = 1U << 8, PERF_SAMPLE_STREAM_ID = 1U << 9, PERF_SAMPLE_RAW = 1U << 10, + PERF_SAMPLE_BRANCH_STACK = 1U << 11, - PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ + PERF_SAMPLE_MAX = 1U << 12, /* non-ABI */ }; /* + * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set + * + * If the user does not pass priv level information via branch_sample_type, + * the kernel uses the event's priv level. Branch and event priv levels do + * not have to match. Branch priv level is checked for permissions. + * + * The branch types can be combined, however BRANCH_ANY covers all types + * of branches and therefore it supersedes all the other types. + */ +enum perf_branch_sample_type { + PERF_SAMPLE_BRANCH_USER = 1U << 0, /* user branches */ + PERF_SAMPLE_BRANCH_KERNEL = 1U << 1, /* kernel branches */ + PERF_SAMPLE_BRANCH_HV = 1U << 2, /* hypervisor branches */ + + PERF_SAMPLE_BRANCH_ANY = 1U << 3, /* any branch types */ + PERF_SAMPLE_BRANCH_ANY_CALL = 1U << 4, /* any call branch */ + PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << 5, /* any return branch */ + PERF_SAMPLE_BRANCH_IND_CALL = 1U << 6, /* indirect calls */ + + PERF_SAMPLE_BRANCH_MAX = 1U << 7, /* non-ABI */ +}; + +#define PERF_SAMPLE_BRANCH_PLM_ALL \ + (PERF_SAMPLE_BRANCH_USER|\ + PERF_SAMPLE_BRANCH_KERNEL|\ + PERF_SAMPLE_BRANCH_HV) + +/* * The format of the data returned by read() on a perf event fd, * as specified by attr.read_format: * @@ -240,6 +269,7 @@ struct perf_event_attr { __u64 bp_len; __u64 config2; /* extension of config1 */ }; + __u64 branch_sample_type; /* enum branch_sample_type */ }; /* @@ -458,6 +488,8 @@ enum perf_event_type { * * { u32 size; * char data[size];}&& PERF_SAMPLE_RAW + * + * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK * }; */ PERF_RECORD_SAMPLE = 9, @@ -530,12 +562,34 @@ struct perf_raw_record { void *data; }; +/* + * single taken branch record layout: + * + * from: source instruction (may not always be a branch insn) + * to: branch target + * mispred: branch target was mispredicted + * predicted: branch target was predicted + * + * support for mispred, predicted is optional. In case it + * is not supported mispred = predicted = 0. + */ struct perf_branch_entry { - __u64 from; - __u64 to; - __u64 flags; + __u64 from; + __u64 to; + __u64 mispred:1, /* target mispredicted */ + predicted:1,/* target predicted */ + reserved:62; }; +/* + * branch stack layout: + * nr: number of taken branches stored in entries[] + * + * Note that nr can vary from sample to sample + * branches (to, from) are stored from most recent + * to least recent, i.e., entries[0] contains the most + * recent branch. + */ struct perf_branch_stack { __u64 nr; struct perf_branch_entry entries[0]; @@ -566,7 +620,9 @@ struct hw_perf_event { unsigned long event_base; int idx; int last_cpu; + struct hw_perf_event_extra extra_reg; + struct hw_perf_event_extra branch_reg; }; struct { /* software */ struct hrtimer hrtimer; @@ -1007,12 +1063,14 @@ struct perf_sample_data { u64 period; struct perf_callchain_entry *callchain; struct perf_raw_record *raw; + struct perf_branch_stack *br_stack; }; static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr) { data->addr = addr; data->raw = NULL; + data->br_stack = NULL; } extern void perf_output_sample(struct perf_output_handle *handle, @@ -1151,6 +1209,11 @@ extern void perf_bp_event(struct perf_event *event, void *data); # define perf_instruction_pointer(regs) instruction_pointer(regs) #endif +static inline bool has_branch_stack(struct perf_event *event) +{ + return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; +} + extern int perf_output_begin(struct perf_output_handle *handle, struct perf_event *event, unsigned int size); extern void perf_output_end(struct perf_output_handle *handle); diff --git a/kernel/events/core.c b/kernel/events/core.c index e8b32ac..5820efd 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -118,6 +118,13 @@ static int cpu_function_call(int cpu, int (*func) (void *info), void *info) PERF_FLAG_FD_OUTPUT |\ PERF_FLAG_PID_CGROUP) +/* + * branch priv levels that need permission checks + */ +#define PERF_SAMPLE_BRANCH_PERM_PLM \ + (PERF_SAMPLE_BRANCH_KERNEL |\ + PERF_SAMPLE_BRANCH_HV) + enum event_type_t { EVENT_FLEXIBLE = 0x1, EVENT_PINNED = 0x2, @@ -3907,6 +3914,24 @@ void perf_output_sample(struct perf_output_handle *handle, } } } + + if (sample_type & PERF_SAMPLE_BRANCH_STACK) { + if (data->br_stack) { + size_t size; + + size = data->br_stack->nr + * sizeof(struct perf_branch_entry); + + perf_output_put(handle, data->br_stack->nr); + perf_output_copy(handle, data->br_stack->entries, size); + } else { + /* + * we always store at least the value of nr + */ + u64 nr = 0; + perf_output_put(handle, nr); + } + } } void perf_prepare_sample(struct perf_event_header *header, @@ -3949,6 +3974,15 @@ void perf_prepare_sample(struct perf_event_header *header, WARN_ON_ONCE(size & (sizeof(u64)-1)); header->size += size; } + + if (sample_type & PERF_SAMPLE_BRANCH_STACK) { + int size = sizeof(u64); /* nr */ + if (data->br_stack) { + size += data->br_stack->nr + * sizeof(struct perf_branch_entry); + } + header->size += size; + } } static void perf_event_output(struct perf_event *event, @@ -5935,6 +5969,40 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, if (attr->read_format & ~(PERF_FORMAT_MAX-1)) return -EINVAL; + if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) { + u64 mask = attr->branch_sample_type; + + /* only using defined bits */ + if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1)) + return -EINVAL; + + /* at least one branch bit must be set */ + if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL)) + return -EINVAL; + + /* kernel level capture: check permissions */ + if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM) + && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) + return -EACCES; + + /* propagate priv level, when not set for branch */ + if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) { + + /* exclude_kernel checked on syscall entry */ + if (!attr->exclude_kernel) + mask |= PERF_SAMPLE_BRANCH_KERNEL; + + if (!attr->exclude_user) + mask |= PERF_SAMPLE_BRANCH_USER; + + if (!attr->exclude_hv) + mask |= PERF_SAMPLE_BRANCH_HV; + /* + * adjust user setting (for HW filter setup) + */ + attr->branch_sample_type = mask; + } + } out: return ret; -- cgit v0.10.2 From 225ce53910edc3c2322b1e4f2ed049a9196cd0b3 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 9 Feb 2012 23:20:52 +0100 Subject: perf/x86: Add Intel LBR MSR definitions This patch adds the LBR definitions for NHM/WSM/SNB and Core. It also adds the definitions for the architected LBR MSR: LBR_SELECT, LBRT_TOS. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1328826068-11713-3-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index a6962d9..ccb8059 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -56,6 +56,13 @@ #define MSR_OFFCORE_RSP_0 0x000001a6 #define MSR_OFFCORE_RSP_1 0x000001a7 +#define MSR_LBR_SELECT 0x000001c8 +#define MSR_LBR_TOS 0x000001c9 +#define MSR_LBR_NHM_FROM 0x00000680 +#define MSR_LBR_NHM_TO 0x000006c0 +#define MSR_LBR_CORE_FROM 0x00000040 +#define MSR_LBR_CORE_TO 0x00000060 + #define MSR_IA32_PEBS_ENABLE 0x000003f1 #define MSR_IA32_DS_AREA 0x00000600 #define MSR_IA32_PERF_CAPABILITIES 0x00000345 diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 309d0cc..6710a51 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c @@ -203,23 +203,23 @@ void intel_pmu_lbr_read(void) void intel_pmu_lbr_init_core(void) { x86_pmu.lbr_nr = 4; - x86_pmu.lbr_tos = 0x01c9; - x86_pmu.lbr_from = 0x40; - x86_pmu.lbr_to = 0x60; + x86_pmu.lbr_tos = MSR_LBR_TOS; + x86_pmu.lbr_from = MSR_LBR_CORE_FROM; + x86_pmu.lbr_to = MSR_LBR_CORE_TO; } void intel_pmu_lbr_init_nhm(void) { x86_pmu.lbr_nr = 16; - x86_pmu.lbr_tos = 0x01c9; - x86_pmu.lbr_from = 0x680; - x86_pmu.lbr_to = 0x6c0; + x86_pmu.lbr_tos = MSR_LBR_TOS; + x86_pmu.lbr_from = MSR_LBR_NHM_FROM; + x86_pmu.lbr_to = MSR_LBR_NHM_TO; } void intel_pmu_lbr_init_atom(void) { x86_pmu.lbr_nr = 8; - x86_pmu.lbr_tos = 0x01c9; - x86_pmu.lbr_from = 0x40; - x86_pmu.lbr_to = 0x60; + x86_pmu.lbr_tos = MSR_LBR_TOS; + x86_pmu.lbr_from = MSR_LBR_CORE_FROM; + x86_pmu.lbr_to = MSR_LBR_CORE_TO; } -- cgit v0.10.2 From b36817e8863090f1f24e538106ca50fa1d9e4003 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 9 Feb 2012 23:20:53 +0100 Subject: perf/x86: Add Intel LBR sharing logic The Intel LBR on some recent processor is capable of filtering branches by type. The filter is configurable via the LBR_SELECT MSR register. There are limitation on how this register can be used. On Nehalem/Westmere, the LBR_SELECT is shared by the two HT threads when HT is on. It is private to each core when HT is off. On SandyBridge, the LBR_SELECT register is private to each thread when HT is on. It is private to each core when HT is off. The kernel must manage the sharing of LBR_SELECT. It allows multiple users on the same logical CPU to use LBR_SELECT as long as they program it with the same value. Across sibling CPUs (HT threads), the same restriction applies on NHM/WSM. This patch implements this sharing logic by leveraging the mechanism put in place for managing the offcore_response shared MSR. We modify __intel_shared_reg_get_constraints() to cause x86_get_event_constraint() to be called because LBR may be associated with events that may be counter constrained. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1328826068-11713-4-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index f8bddb5..3779313 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -426,6 +426,10 @@ static int __x86_pmu_event_init(struct perf_event *event) /* mark unused */ event->hw.extra_reg.idx = EXTRA_REG_NONE; + /* mark not used */ + event->hw.extra_reg.idx = EXTRA_REG_NONE; + event->hw.branch_reg.idx = EXTRA_REG_NONE; + return x86_pmu.hw_config(event); } diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 82db83b..9b9c580 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -33,6 +33,7 @@ enum extra_reg_type { EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ + EXTRA_REG_LBR = 2, /* lbr_select */ EXTRA_REG_MAX /* number of entries needed */ }; @@ -130,6 +131,7 @@ struct cpu_hw_events { void *lbr_context; struct perf_branch_stack lbr_stack; struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; + struct er_account *lbr_sel; /* * Intel host/guest exclude bits @@ -342,6 +344,8 @@ struct x86_pmu { */ unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ int lbr_nr; /* hardware stack size */ + u64 lbr_sel_mask; /* LBR_SELECT valid bits */ + const int *lbr_sel_map; /* lbr_select mappings */ /* * Extra registers for events diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 3bd37bd..97f7bb5 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1123,17 +1123,17 @@ static bool intel_try_alt_er(struct perf_event *event, int orig_idx) */ static struct event_constraint * __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, - struct perf_event *event) + struct perf_event *event, + struct hw_perf_event_extra *reg) { struct event_constraint *c = &emptyconstraint; - struct hw_perf_event_extra *reg = &event->hw.extra_reg; struct er_account *era; unsigned long flags; int orig_idx = reg->idx; /* already allocated shared msr */ if (reg->alloc) - return &unconstrained; + return NULL; /* call x86_get_event_constraint() */ again: era = &cpuc->shared_regs->regs[reg->idx]; @@ -1156,14 +1156,10 @@ again: reg->alloc = 1; /* - * All events using extra_reg are unconstrained. - * Avoids calling x86_get_event_constraints() - * - * Must revisit if extra_reg controlling events - * ever have constraints. Worst case we go through - * the regular event constraint table. + * need to call x86_get_event_constraint() + * to check if associated event has constraints */ - c = &unconstrained; + c = NULL; } else if (intel_try_alt_er(event, orig_idx)) { raw_spin_unlock_irqrestore(&era->lock, flags); goto again; @@ -1200,11 +1196,23 @@ static struct event_constraint * intel_shared_regs_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) { - struct event_constraint *c = NULL; - - if (event->hw.extra_reg.idx != EXTRA_REG_NONE) - c = __intel_shared_reg_get_constraints(cpuc, event); - + struct event_constraint *c = NULL, *d; + struct hw_perf_event_extra *xreg, *breg; + + xreg = &event->hw.extra_reg; + if (xreg->idx != EXTRA_REG_NONE) { + c = __intel_shared_reg_get_constraints(cpuc, event, xreg); + if (c == &emptyconstraint) + return c; + } + breg = &event->hw.branch_reg; + if (breg->idx != EXTRA_REG_NONE) { + d = __intel_shared_reg_get_constraints(cpuc, event, breg); + if (d == &emptyconstraint) { + __intel_shared_reg_put_constraints(cpuc, xreg); + c = d; + } + } return c; } @@ -1252,6 +1260,10 @@ intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc, reg = &event->hw.extra_reg; if (reg->idx != EXTRA_REG_NONE) __intel_shared_reg_put_constraints(cpuc, reg); + + reg = &event->hw.branch_reg; + if (reg->idx != EXTRA_REG_NONE) + __intel_shared_reg_put_constraints(cpuc, reg); } static void intel_put_event_constraints(struct cpu_hw_events *cpuc, @@ -1431,7 +1443,7 @@ static int intel_pmu_cpu_prepare(int cpu) { struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); - if (!x86_pmu.extra_regs) + if (!(x86_pmu.extra_regs || x86_pmu.lbr_sel_map)) return NOTIFY_OK; cpuc->shared_regs = allocate_shared_regs(cpu); @@ -1453,22 +1465,28 @@ static void intel_pmu_cpu_starting(int cpu) */ intel_pmu_lbr_reset(); - if (!cpuc->shared_regs || (x86_pmu.er_flags & ERF_NO_HT_SHARING)) + cpuc->lbr_sel = NULL; + + if (!cpuc->shared_regs) return; - for_each_cpu(i, topology_thread_cpumask(cpu)) { - struct intel_shared_regs *pc; + if (!(x86_pmu.er_flags & ERF_NO_HT_SHARING)) { + for_each_cpu(i, topology_thread_cpumask(cpu)) { + struct intel_shared_regs *pc; - pc = per_cpu(cpu_hw_events, i).shared_regs; - if (pc && pc->core_id == core_id) { - cpuc->kfree_on_online = cpuc->shared_regs; - cpuc->shared_regs = pc; - break; + pc = per_cpu(cpu_hw_events, i).shared_regs; + if (pc && pc->core_id == core_id) { + cpuc->kfree_on_online = cpuc->shared_regs; + cpuc->shared_regs = pc; + break; + } } + cpuc->shared_regs->core_id = core_id; + cpuc->shared_regs->refcnt++; } - cpuc->shared_regs->core_id = core_id; - cpuc->shared_regs->refcnt++; + if (x86_pmu.lbr_sel_map) + cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR]; } static void intel_pmu_cpu_dying(int cpu) -- cgit v0.10.2 From ff3fb511ba377e8a0a7f553cc352237f70d08121 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 9 Feb 2012 23:20:54 +0100 Subject: perf/x86: Sync branch stack sampling with precise_sampling If precise sampling is enabled on Intel x86 then perf_event uses PEBS. To correct for the off-by-one error of PEBS, perf_event uses LBR when precise_sample > 1. On Intel x86 PERF_SAMPLE_BRANCH_STACK is implemented using LBR, therefore both features must be coordinated as they may not configure LBR the same way. For PEBS, LBR needs to capture all branches at the priv level of the associated event. This patch checks that the branch type and priv level of BRANCH_STACK is compatible with that of the PEBS LBR requirement, thereby allowing: $ perf record -b any,u -e instructions:upp .... But: $ perf record -b any_call,u -e instructions:upp Is not possible. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1328826068-11713-5-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 3779313..cea5674 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -353,6 +353,36 @@ int x86_setup_perfctr(struct perf_event *event) return 0; } +/* + * check that branch_sample_type is compatible with + * settings needed for precise_ip > 1 which implies + * using the LBR to capture ALL taken branches at the + * priv levels of the measurement + */ +static inline int precise_br_compat(struct perf_event *event) +{ + u64 m = event->attr.branch_sample_type; + u64 b = 0; + + /* must capture all branches */ + if (!(m & PERF_SAMPLE_BRANCH_ANY)) + return 0; + + m &= PERF_SAMPLE_BRANCH_KERNEL | PERF_SAMPLE_BRANCH_USER; + + if (!event->attr.exclude_user) + b |= PERF_SAMPLE_BRANCH_USER; + + if (!event->attr.exclude_kernel) + b |= PERF_SAMPLE_BRANCH_KERNEL; + + /* + * ignore PERF_SAMPLE_BRANCH_HV, not supported on x86 + */ + + return m == b; +} + int x86_pmu_hw_config(struct perf_event *event) { if (event->attr.precise_ip) { @@ -369,6 +399,36 @@ int x86_pmu_hw_config(struct perf_event *event) if (event->attr.precise_ip > precise) return -EOPNOTSUPP; + /* + * check that PEBS LBR correction does not conflict with + * whatever the user is asking with attr->branch_sample_type + */ + if (event->attr.precise_ip > 1) { + u64 *br_type = &event->attr.branch_sample_type; + + if (has_branch_stack(event)) { + if (!precise_br_compat(event)) + return -EOPNOTSUPP; + + /* branch_sample_type is compatible */ + + } else { + /* + * user did not specify branch_sample_type + * + * For PEBS fixups, we capture all + * the branches at the priv level of the + * event. + */ + *br_type = PERF_SAMPLE_BRANCH_ANY; + + if (!event->attr.exclude_user) + *br_type |= PERF_SAMPLE_BRANCH_USER; + + if (!event->attr.exclude_kernel) + *br_type |= PERF_SAMPLE_BRANCH_KERNEL; + } + } } /* -- cgit v0.10.2 From c5cc2cd906ea9fe73e3c93f9ad824996faa278cc Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 9 Feb 2012 23:20:55 +0100 Subject: perf/x86: Add Intel LBR mappings for PERF_SAMPLE_BRANCH filters This patch adds the mappings from the generic PERF_SAMPLE_BRANCH_* filters to the actual Intel x86LBR filters, whenever they exist. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1328826068-11713-6-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 9b9c580..4e94897 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -539,6 +539,8 @@ void intel_pmu_lbr_init_nhm(void); void intel_pmu_lbr_init_atom(void); +void intel_pmu_lbr_init_snb(void); + int p4_pmu_init(void); int p6_pmu_init(void); diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 97f7bb5..b0db016 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1757,7 +1757,7 @@ __init int intel_pmu_init(void) memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - intel_pmu_lbr_init_nhm(); + intel_pmu_lbr_init_snb(); x86_pmu.event_constraints = intel_snb_event_constraints; x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 6710a51..e54a063 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c @@ -14,6 +14,49 @@ enum { }; /* + * Intel LBR_SELECT bits + * Intel Vol3a, April 2011, Section 16.7 Table 16-10 + * + * Hardware branch filter (not available on all CPUs) + */ +#define LBR_KERNEL_BIT 0 /* do not capture at ring0 */ +#define LBR_USER_BIT 1 /* do not capture at ring > 0 */ +#define LBR_JCC_BIT 2 /* do not capture conditional branches */ +#define LBR_REL_CALL_BIT 3 /* do not capture relative calls */ +#define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */ +#define LBR_RETURN_BIT 5 /* do not capture near returns */ +#define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */ +#define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */ +#define LBR_FAR_BIT 8 /* do not capture far branches */ + +#define LBR_KERNEL (1 << LBR_KERNEL_BIT) +#define LBR_USER (1 << LBR_USER_BIT) +#define LBR_JCC (1 << LBR_JCC_BIT) +#define LBR_REL_CALL (1 << LBR_REL_CALL_BIT) +#define LBR_IND_CALL (1 << LBR_IND_CALL_BIT) +#define LBR_RETURN (1 << LBR_RETURN_BIT) +#define LBR_REL_JMP (1 << LBR_REL_JMP_BIT) +#define LBR_IND_JMP (1 << LBR_IND_JMP_BIT) +#define LBR_FAR (1 << LBR_FAR_BIT) + +#define LBR_PLM (LBR_KERNEL | LBR_USER) + +#define LBR_SEL_MASK 0x1ff /* valid bits in LBR_SELECT */ +#define LBR_NOT_SUPP -1 /* LBR filter not supported */ +#define LBR_IGN 0 /* ignored */ + +#define LBR_ANY \ + (LBR_JCC |\ + LBR_REL_CALL |\ + LBR_IND_CALL |\ + LBR_RETURN |\ + LBR_REL_JMP |\ + LBR_IND_JMP |\ + LBR_FAR) + +#define LBR_FROM_FLAG_MISPRED (1ULL << 63) + +/* * We only support LBR implementations that have FREEZE_LBRS_ON_PMI * otherwise it becomes near impossible to get a reliable stack. */ @@ -151,8 +194,6 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc) cpuc->lbr_stack.nr = i; } -#define LBR_FROM_FLAG_MISPRED (1ULL << 63) - /* * Due to lack of segmentation in Linux the effective address (offset) * is the same as the linear address, allowing us to merge the LIP and EIP @@ -200,26 +241,84 @@ void intel_pmu_lbr_read(void) intel_pmu_lbr_read_64(cpuc); } +/* + * Map interface branch filters onto LBR filters + */ +static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = { + [PERF_SAMPLE_BRANCH_ANY] = LBR_ANY, + [PERF_SAMPLE_BRANCH_USER] = LBR_USER, + [PERF_SAMPLE_BRANCH_KERNEL] = LBR_KERNEL, + [PERF_SAMPLE_BRANCH_HV] = LBR_IGN, + [PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_REL_JMP + | LBR_IND_JMP | LBR_FAR, + /* + * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches + */ + [PERF_SAMPLE_BRANCH_ANY_CALL] = + LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR, + /* + * NHM/WSM erratum: must include IND_JMP to capture IND_CALL + */ + [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL | LBR_IND_JMP, +}; + +static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = { + [PERF_SAMPLE_BRANCH_ANY] = LBR_ANY, + [PERF_SAMPLE_BRANCH_USER] = LBR_USER, + [PERF_SAMPLE_BRANCH_KERNEL] = LBR_KERNEL, + [PERF_SAMPLE_BRANCH_HV] = LBR_IGN, + [PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_FAR, + [PERF_SAMPLE_BRANCH_ANY_CALL] = LBR_REL_CALL | LBR_IND_CALL + | LBR_FAR, + [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL, +}; + +/* core */ void intel_pmu_lbr_init_core(void) { x86_pmu.lbr_nr = 4; x86_pmu.lbr_tos = MSR_LBR_TOS; x86_pmu.lbr_from = MSR_LBR_CORE_FROM; x86_pmu.lbr_to = MSR_LBR_CORE_TO; + + pr_cont("4-deep LBR, "); } +/* nehalem/westmere */ void intel_pmu_lbr_init_nhm(void) { x86_pmu.lbr_nr = 16; x86_pmu.lbr_tos = MSR_LBR_TOS; x86_pmu.lbr_from = MSR_LBR_NHM_FROM; x86_pmu.lbr_to = MSR_LBR_NHM_TO; + + x86_pmu.lbr_sel_mask = LBR_SEL_MASK; + x86_pmu.lbr_sel_map = nhm_lbr_sel_map; + + pr_cont("16-deep LBR, "); } +/* sandy bridge */ +void intel_pmu_lbr_init_snb(void) +{ + x86_pmu.lbr_nr = 16; + x86_pmu.lbr_tos = MSR_LBR_TOS; + x86_pmu.lbr_from = MSR_LBR_NHM_FROM; + x86_pmu.lbr_to = MSR_LBR_NHM_TO; + + x86_pmu.lbr_sel_mask = LBR_SEL_MASK; + x86_pmu.lbr_sel_map = snb_lbr_sel_map; + + pr_cont("16-deep LBR, "); +} + +/* atom */ void intel_pmu_lbr_init_atom(void) { x86_pmu.lbr_nr = 8; x86_pmu.lbr_tos = MSR_LBR_TOS; x86_pmu.lbr_from = MSR_LBR_CORE_FROM; x86_pmu.lbr_to = MSR_LBR_CORE_TO; + + pr_cont("8-deep LBR, "); } -- cgit v0.10.2 From 88c9a65e13f393fd60d8b9e9c659a34f9e39967d Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 9 Feb 2012 23:20:56 +0100 Subject: perf/x86: Disable LBR support for older Intel Atom processors The patch adds a restriction for Intel Atom LBR support. Only steppings 10 (PineView) and more recent are supported. Older models do not have a functional LBR. Their LBR does not freeze on PMU interrupt which makes LBR unusable in the context of perf_events. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1328826068-11713-7-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index e54a063..07f0ff8 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c @@ -315,6 +315,16 @@ void intel_pmu_lbr_init_snb(void) /* atom */ void intel_pmu_lbr_init_atom(void) { + /* + * only models starting at stepping 10 seems + * to have an operational LBR which can freeze + * on PMU interrupt + */ + if (boot_cpu_data.x86_mask < 10) { + pr_cont("LBR disabled due to erratum"); + return; + } + x86_pmu.lbr_nr = 8; x86_pmu.lbr_tos = MSR_LBR_TOS; x86_pmu.lbr_from = MSR_LBR_CORE_FROM; -- cgit v0.10.2 From 60ce0fbd072695866cb27b729690ab59dce705a5 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 9 Feb 2012 23:20:57 +0100 Subject: perf/x86: Implement PERF_SAMPLE_BRANCH for Intel CPUs This patch implements PERF_SAMPLE_BRANCH support for Intel x86processors. It connects PERF_SAMPLE_BRANCH to the actual LBR. The patch adds the hooks in the PMU irq handler to save the LBR on counter overflow for both regular and PEBS modes. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1328826068-11713-8-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 4e94897..ef7419c 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -541,6 +541,8 @@ void intel_pmu_lbr_init_atom(void); void intel_pmu_lbr_init_snb(void); +int intel_pmu_setup_lbr_filter(struct perf_event *event); + int p4_pmu_init(void); int p6_pmu_init(void); diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index b0db016..7cc1e2d 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -727,6 +727,19 @@ static __initconst const u64 atom_hw_cache_event_ids }, }; +static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event) +{ + /* user explicitly requested branch sampling */ + if (has_branch_stack(event)) + return true; + + /* implicit branch sampling to correct PEBS skid */ + if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) + return true; + + return false; +} + static void intel_pmu_disable_all(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); @@ -881,6 +894,13 @@ static void intel_pmu_disable_event(struct perf_event *event) cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx); cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx); + /* + * must disable before any actual event + * because any event may be combined with LBR + */ + if (intel_pmu_needs_lbr_smpl(event)) + intel_pmu_lbr_disable(event); + if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { intel_pmu_disable_fixed(hwc); return; @@ -935,6 +955,12 @@ static void intel_pmu_enable_event(struct perf_event *event) intel_pmu_enable_bts(hwc->config); return; } + /* + * must enabled before any actual event + * because any event may be combined with LBR + */ + if (intel_pmu_needs_lbr_smpl(event)) + intel_pmu_lbr_enable(event); if (event->attr.exclude_host) cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx); @@ -1057,6 +1083,9 @@ again: data.period = event->hw.last_period; + if (has_branch_stack(event)) + data.br_stack = &cpuc->lbr_stack; + if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); } @@ -1305,6 +1334,12 @@ static int intel_pmu_hw_config(struct perf_event *event) event->hw.config = alt_config; } + if (intel_pmu_needs_lbr_smpl(event)) { + ret = intel_pmu_setup_lbr_filter(event); + if (ret) + return ret; + } + if (event->attr.type != PERF_TYPE_RAW) return 0; diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index d6bd49f..ee7e3c8 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -439,9 +439,6 @@ void intel_pmu_pebs_enable(struct perf_event *event) hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; cpuc->pebs_enabled |= 1ULL << hwc->idx; - - if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) - intel_pmu_lbr_enable(event); } void intel_pmu_pebs_disable(struct perf_event *event) @@ -454,9 +451,6 @@ void intel_pmu_pebs_disable(struct perf_event *event) wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); hwc->config |= ARCH_PERFMON_EVENTSEL_INT; - - if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) - intel_pmu_lbr_disable(event); } void intel_pmu_pebs_enable_all(void) @@ -572,6 +566,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, * both formats and we don't use the other fields in this * routine. */ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct pebs_record_core *pebs = __pebs; struct perf_sample_data data; struct pt_regs regs; @@ -602,6 +597,9 @@ static void __intel_pmu_pebs_event(struct perf_event *event, else regs.flags &= ~PERF_EFLAGS_EXACT; + if (has_branch_stack(event)) + data.br_stack = &cpuc->lbr_stack; + if (perf_event_overflow(event, &data, ®s)) x86_pmu_stop(event, 0); } diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 07f0ff8..d0fb864 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c @@ -56,6 +56,10 @@ enum { #define LBR_FROM_FLAG_MISPRED (1ULL << 63) +#define for_each_branch_sample_type(x) \ + for ((x) = PERF_SAMPLE_BRANCH_USER; \ + (x) < PERF_SAMPLE_BRANCH_MAX; (x) <<= 1) + /* * We only support LBR implementations that have FREEZE_LBRS_ON_PMI * otherwise it becomes near impossible to get a reliable stack. @@ -64,6 +68,10 @@ enum { static void __intel_pmu_lbr_enable(void) { u64 debugctl; + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + if (cpuc->lbr_sel) + wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config); rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); debugctl |= (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); @@ -119,7 +127,6 @@ void intel_pmu_lbr_enable(struct perf_event *event) * Reset the LBR stack if we changed task context to * avoid data leaks. */ - if (event->ctx->task && cpuc->lbr_context != event->ctx) { intel_pmu_lbr_reset(); cpuc->lbr_context = event->ctx; @@ -138,8 +145,11 @@ void intel_pmu_lbr_disable(struct perf_event *event) cpuc->lbr_users--; WARN_ON_ONCE(cpuc->lbr_users < 0); - if (cpuc->enabled && !cpuc->lbr_users) + if (cpuc->enabled && !cpuc->lbr_users) { __intel_pmu_lbr_disable(); + /* avoid stale pointer */ + cpuc->lbr_context = NULL; + } } void intel_pmu_lbr_enable_all(void) @@ -158,6 +168,9 @@ void intel_pmu_lbr_disable_all(void) __intel_pmu_lbr_disable(); } +/* + * TOS = most recently recorded branch + */ static inline u64 intel_pmu_lbr_tos(void) { u64 tos; @@ -242,6 +255,75 @@ void intel_pmu_lbr_read(void) } /* + * setup the HW LBR filter + * Used only when available, may not be enough to disambiguate + * all branches, may need the help of the SW filter + */ +static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event) +{ + struct hw_perf_event_extra *reg; + u64 br_type = event->attr.branch_sample_type; + u64 mask = 0, m; + u64 v; + + for_each_branch_sample_type(m) { + if (!(br_type & m)) + continue; + + v = x86_pmu.lbr_sel_map[m]; + if (v == LBR_NOT_SUPP) + return -EOPNOTSUPP; + mask |= v; + + if (m == PERF_SAMPLE_BRANCH_ANY) + break; + } + reg = &event->hw.branch_reg; + reg->idx = EXTRA_REG_LBR; + + /* LBR_SELECT operates in suppress mode so invert mask */ + reg->config = ~mask & x86_pmu.lbr_sel_mask; + + return 0; +} + +/* + * all the bits supported on some flavor of x86LBR + * we ignore BRANCH_HV because it is not supported + */ +#define PERF_SAMPLE_BRANCH_X86_ALL \ + (PERF_SAMPLE_BRANCH_ANY |\ + PERF_SAMPLE_BRANCH_USER |\ + PERF_SAMPLE_BRANCH_KERNEL) + +int intel_pmu_setup_lbr_filter(struct perf_event *event) +{ + u64 br_type = event->attr.branch_sample_type; + + /* + * no LBR on this PMU + */ + if (!x86_pmu.lbr_nr) + return -EOPNOTSUPP; + + /* + * if no LBR HW filter, users can only + * capture all branches + */ + if (!x86_pmu.lbr_sel_map) { + if (br_type != PERF_SAMPLE_BRANCH_X86_ALL) + return -EOPNOTSUPP; + return 0; + } + /* + * we ignore branch priv levels we do not + * know about: BRANCH_HV + */ + + return intel_pmu_setup_hw_lbr_filter(event); +} + +/* * Map interface branch filters onto LBR filters */ static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = { -- cgit v0.10.2 From 3e702ff6d1ea12dcf1c798ecb61e7f3a1579df42 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 9 Feb 2012 23:20:58 +0100 Subject: perf/x86: Add LBR software filter support for Intel CPUs This patch adds an internal sofware filter to complement the (optional) LBR hardware filter. The software filter is necessary: - as a substitute when there is no HW LBR filter (e.g., Atom, Core) - to complement HW LBR filter in case of errata (e.g., Nehalem/Westmere) - to provide finer grain filtering (e.g., all processors) Sometimes the LBR HW filter cannot distinguish between two types of branches. For instance, to capture syscall as CALLS, it is necessary to enable the LBR_FAR filter which will also capture JMP instructions. Thus, a second pass is necessary to filter those out, this is what the SW filter can do. The SW filter is built on top of the internal x86 disassembler. It is a best effort filter especially for user level code. It is subject to the availability of the text page of the program. The SW filter is enabled on all Intel processors. It is bypassed when the user is capturing all branches at all priv levels. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1328826068-11713-9-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index ef7419c..f104c05 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -132,6 +132,7 @@ struct cpu_hw_events { struct perf_branch_stack lbr_stack; struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; struct er_account *lbr_sel; + u64 br_sel; /* * Intel host/guest exclude bits @@ -459,6 +460,15 @@ extern struct event_constraint emptyconstraint; extern struct event_constraint unconstrained; +static inline bool kernel_ip(unsigned long ip) +{ +#ifdef CONFIG_X86_32 + return ip > PAGE_OFFSET; +#else + return (long)ip < 0; +#endif +} + #ifdef CONFIG_CPU_SUP_AMD int amd_pmu_init(void); diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index ee7e3c8..7f64df1 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -3,6 +3,7 @@ #include #include +#include #include "perf_event.h" @@ -469,17 +470,6 @@ void intel_pmu_pebs_disable_all(void) wrmsrl(MSR_IA32_PEBS_ENABLE, 0); } -#include - -static inline bool kernel_ip(unsigned long ip) -{ -#ifdef CONFIG_X86_32 - return ip > PAGE_OFFSET; -#else - return (long)ip < 0; -#endif -} - static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index d0fb864..520b426 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c @@ -3,6 +3,7 @@ #include #include +#include #include "perf_event.h" @@ -61,6 +62,53 @@ enum { (x) < PERF_SAMPLE_BRANCH_MAX; (x) <<= 1) /* + * x86control flow change classification + * x86control flow changes include branches, interrupts, traps, faults + */ +enum { + X86_BR_NONE = 0, /* unknown */ + + X86_BR_USER = 1 << 0, /* branch target is user */ + X86_BR_KERNEL = 1 << 1, /* branch target is kernel */ + + X86_BR_CALL = 1 << 2, /* call */ + X86_BR_RET = 1 << 3, /* return */ + X86_BR_SYSCALL = 1 << 4, /* syscall */ + X86_BR_SYSRET = 1 << 5, /* syscall return */ + X86_BR_INT = 1 << 6, /* sw interrupt */ + X86_BR_IRET = 1 << 7, /* return from interrupt */ + X86_BR_JCC = 1 << 8, /* conditional */ + X86_BR_JMP = 1 << 9, /* jump */ + X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */ + X86_BR_IND_CALL = 1 << 11,/* indirect calls */ +}; + +#define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL) + +#define X86_BR_ANY \ + (X86_BR_CALL |\ + X86_BR_RET |\ + X86_BR_SYSCALL |\ + X86_BR_SYSRET |\ + X86_BR_INT |\ + X86_BR_IRET |\ + X86_BR_JCC |\ + X86_BR_JMP |\ + X86_BR_IRQ |\ + X86_BR_IND_CALL) + +#define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY) + +#define X86_BR_ANY_CALL \ + (X86_BR_CALL |\ + X86_BR_IND_CALL |\ + X86_BR_SYSCALL |\ + X86_BR_IRQ |\ + X86_BR_INT) + +static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc); + +/* * We only support LBR implementations that have FREEZE_LBRS_ON_PMI * otherwise it becomes near impossible to get a reliable stack. */ @@ -131,6 +179,7 @@ void intel_pmu_lbr_enable(struct perf_event *event) intel_pmu_lbr_reset(); cpuc->lbr_context = event->ctx; } + cpuc->br_sel = event->hw.branch_reg.reg; cpuc->lbr_users++; } @@ -252,6 +301,44 @@ void intel_pmu_lbr_read(void) intel_pmu_lbr_read_32(cpuc); else intel_pmu_lbr_read_64(cpuc); + + intel_pmu_lbr_filter(cpuc); +} + +/* + * SW filter is used: + * - in case there is no HW filter + * - in case the HW filter has errata or limitations + */ +static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event) +{ + u64 br_type = event->attr.branch_sample_type; + int mask = 0; + + if (br_type & PERF_SAMPLE_BRANCH_USER) + mask |= X86_BR_USER; + + if (br_type & PERF_SAMPLE_BRANCH_KERNEL) + mask |= X86_BR_KERNEL; + + /* we ignore BRANCH_HV here */ + + if (br_type & PERF_SAMPLE_BRANCH_ANY) + mask |= X86_BR_ANY; + + if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL) + mask |= X86_BR_ANY_CALL; + + if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN) + mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET; + + if (br_type & PERF_SAMPLE_BRANCH_IND_CALL) + mask |= X86_BR_IND_CALL; + /* + * stash actual user request into reg, it may + * be used by fixup code for some CPU + */ + event->hw.branch_reg.reg = mask; } /* @@ -273,10 +360,9 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event) v = x86_pmu.lbr_sel_map[m]; if (v == LBR_NOT_SUPP) return -EOPNOTSUPP; - mask |= v; - if (m == PERF_SAMPLE_BRANCH_ANY) - break; + if (v != LBR_IGN) + mask |= v; } reg = &event->hw.branch_reg; reg->idx = EXTRA_REG_LBR; @@ -287,18 +373,9 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event) return 0; } -/* - * all the bits supported on some flavor of x86LBR - * we ignore BRANCH_HV because it is not supported - */ -#define PERF_SAMPLE_BRANCH_X86_ALL \ - (PERF_SAMPLE_BRANCH_ANY |\ - PERF_SAMPLE_BRANCH_USER |\ - PERF_SAMPLE_BRANCH_KERNEL) - int intel_pmu_setup_lbr_filter(struct perf_event *event) { - u64 br_type = event->attr.branch_sample_type; + int ret = 0; /* * no LBR on this PMU @@ -307,20 +384,210 @@ int intel_pmu_setup_lbr_filter(struct perf_event *event) return -EOPNOTSUPP; /* - * if no LBR HW filter, users can only - * capture all branches + * setup SW LBR filter */ - if (!x86_pmu.lbr_sel_map) { - if (br_type != PERF_SAMPLE_BRANCH_X86_ALL) - return -EOPNOTSUPP; - return 0; + intel_pmu_setup_sw_lbr_filter(event); + + /* + * setup HW LBR filter, if any + */ + if (x86_pmu.lbr_sel_map) + ret = intel_pmu_setup_hw_lbr_filter(event); + + return ret; +} + +/* + * return the type of control flow change at address "from" + * intruction is not necessarily a branch (in case of interrupt). + * + * The branch type returned also includes the priv level of the + * target of the control flow change (X86_BR_USER, X86_BR_KERNEL). + * + * If a branch type is unknown OR the instruction cannot be + * decoded (e.g., text page not present), then X86_BR_NONE is + * returned. + */ +static int branch_type(unsigned long from, unsigned long to) +{ + struct insn insn; + void *addr; + int bytes, size = MAX_INSN_SIZE; + int ret = X86_BR_NONE; + int ext, to_plm, from_plm; + u8 buf[MAX_INSN_SIZE]; + int is64 = 0; + + to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER; + from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER; + + /* + * maybe zero if lbr did not fill up after a reset by the time + * we get a PMU interrupt + */ + if (from == 0 || to == 0) + return X86_BR_NONE; + + if (from_plm == X86_BR_USER) { + /* + * can happen if measuring at the user level only + * and we interrupt in a kernel thread, e.g., idle. + */ + if (!current->mm) + return X86_BR_NONE; + + /* may fail if text not present */ + bytes = copy_from_user_nmi(buf, (void __user *)from, size); + if (bytes != size) + return X86_BR_NONE; + + addr = buf; + } else + addr = (void *)from; + + /* + * decoder needs to know the ABI especially + * on 64-bit systems running 32-bit apps + */ +#ifdef CONFIG_X86_64 + is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32); +#endif + insn_init(&insn, addr, is64); + insn_get_opcode(&insn); + + switch (insn.opcode.bytes[0]) { + case 0xf: + switch (insn.opcode.bytes[1]) { + case 0x05: /* syscall */ + case 0x34: /* sysenter */ + ret = X86_BR_SYSCALL; + break; + case 0x07: /* sysret */ + case 0x35: /* sysexit */ + ret = X86_BR_SYSRET; + break; + case 0x80 ... 0x8f: /* conditional */ + ret = X86_BR_JCC; + break; + default: + ret = X86_BR_NONE; + } + break; + case 0x70 ... 0x7f: /* conditional */ + ret = X86_BR_JCC; + break; + case 0xc2: /* near ret */ + case 0xc3: /* near ret */ + case 0xca: /* far ret */ + case 0xcb: /* far ret */ + ret = X86_BR_RET; + break; + case 0xcf: /* iret */ + ret = X86_BR_IRET; + break; + case 0xcc ... 0xce: /* int */ + ret = X86_BR_INT; + break; + case 0xe8: /* call near rel */ + case 0x9a: /* call far absolute */ + ret = X86_BR_CALL; + break; + case 0xe0 ... 0xe3: /* loop jmp */ + ret = X86_BR_JCC; + break; + case 0xe9 ... 0xeb: /* jmp */ + ret = X86_BR_JMP; + break; + case 0xff: /* call near absolute, call far absolute ind */ + insn_get_modrm(&insn); + ext = (insn.modrm.bytes[0] >> 3) & 0x7; + switch (ext) { + case 2: /* near ind call */ + case 3: /* far ind call */ + ret = X86_BR_IND_CALL; + break; + case 4: + case 5: + ret = X86_BR_JMP; + break; + } + break; + default: + ret = X86_BR_NONE; } /* - * we ignore branch priv levels we do not - * know about: BRANCH_HV + * interrupts, traps, faults (and thus ring transition) may + * occur on any instructions. Thus, to classify them correctly, + * we need to first look at the from and to priv levels. If they + * are different and to is in the kernel, then it indicates + * a ring transition. If the from instruction is not a ring + * transition instr (syscall, systenter, int), then it means + * it was a irq, trap or fault. + * + * we have no way of detecting kernel to kernel faults. + */ + if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL + && ret != X86_BR_SYSCALL && ret != X86_BR_INT) + ret = X86_BR_IRQ; + + /* + * branch priv level determined by target as + * is done by HW when LBR_SELECT is implemented */ + if (ret != X86_BR_NONE) + ret |= to_plm; - return intel_pmu_setup_hw_lbr_filter(event); + return ret; +} + +/* + * implement actual branch filter based on user demand. + * Hardware may not exactly satisfy that request, thus + * we need to inspect opcodes. Mismatched branches are + * discarded. Therefore, the number of branches returned + * in PERF_SAMPLE_BRANCH_STACK sample may vary. + */ +static void +intel_pmu_lbr_filter(struct cpu_hw_events *cpuc) +{ + u64 from, to; + int br_sel = cpuc->br_sel; + int i, j, type; + bool compress = false; + + /* if sampling all branches, then nothing to filter */ + if ((br_sel & X86_BR_ALL) == X86_BR_ALL) + return; + + for (i = 0; i < cpuc->lbr_stack.nr; i++) { + + from = cpuc->lbr_entries[i].from; + to = cpuc->lbr_entries[i].to; + + type = branch_type(from, to); + + /* if type does not correspond, then discard */ + if (type == X86_BR_NONE || (br_sel & type) != type) { + cpuc->lbr_entries[i].from = 0; + compress = true; + } + } + + if (!compress) + return; + + /* remove all entries with from=0 */ + for (i = 0; i < cpuc->lbr_stack.nr; ) { + if (!cpuc->lbr_entries[i].from) { + j = i; + while (++j < cpuc->lbr_stack.nr) + cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j]; + cpuc->lbr_stack.nr--; + if (!cpuc->lbr_entries[i].from) + continue; + } + i++; + } } /* @@ -363,6 +630,10 @@ void intel_pmu_lbr_init_core(void) x86_pmu.lbr_from = MSR_LBR_CORE_FROM; x86_pmu.lbr_to = MSR_LBR_CORE_TO; + /* + * SW branch filter usage: + * - compensate for lack of HW filter + */ pr_cont("4-deep LBR, "); } @@ -377,6 +648,13 @@ void intel_pmu_lbr_init_nhm(void) x86_pmu.lbr_sel_mask = LBR_SEL_MASK; x86_pmu.lbr_sel_map = nhm_lbr_sel_map; + /* + * SW branch filter usage: + * - workaround LBR_SEL errata (see above) + * - support syscall, sysret capture. + * That requires LBR_FAR but that means far + * jmp need to be filtered out + */ pr_cont("16-deep LBR, "); } @@ -391,6 +669,12 @@ void intel_pmu_lbr_init_snb(void) x86_pmu.lbr_sel_mask = LBR_SEL_MASK; x86_pmu.lbr_sel_map = snb_lbr_sel_map; + /* + * SW branch filter usage: + * - support syscall, sysret capture. + * That requires LBR_FAR but that means far + * jmp need to be filtered out + */ pr_cont("16-deep LBR, "); } @@ -412,5 +696,9 @@ void intel_pmu_lbr_init_atom(void) x86_pmu.lbr_from = MSR_LBR_CORE_FROM; x86_pmu.lbr_to = MSR_LBR_CORE_TO; + /* + * SW branch filter usage: + * - compensate for lack of HW filter + */ pr_cont("8-deep LBR, "); } -- cgit v0.10.2 From 2481c5fa6db0237e4f0168f88913178b2b495b7c Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 9 Feb 2012 23:20:59 +0100 Subject: perf: Disable PERF_SAMPLE_BRANCH_* when not supported PERF_SAMPLE_BRANCH_* is disabled for: - SW events (sw counters, tracepoints) - HW breakpoints - ALL but Intel x86 architecture - AMD64 processors Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1328826068-11713-10-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 8143cd7..0dae252 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c @@ -685,6 +685,10 @@ static int alpha_pmu_event_init(struct perf_event *event) { int err; + /* does not support taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + switch (event->attr.type) { case PERF_TYPE_RAW: case PERF_TYPE_HARDWARE: diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 5bb91bf..a23c42a 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -539,6 +539,10 @@ static int armpmu_event_init(struct perf_event *event) int err = 0; atomic_t *active_events = &armpmu->active_events; + /* does not support taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + if (armpmu->map_event(event) == -ENOENT) return -ENOENT; diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index e3b897a..811084f 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -606,6 +606,10 @@ static int mipspmu_event_init(struct perf_event *event) { int err = 0; + /* does not support taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + switch (event->attr.type) { case PERF_TYPE_RAW: case PERF_TYPE_HARDWARE: diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index f04c230..c2e27ed 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -1084,6 +1084,10 @@ static int power_pmu_event_init(struct perf_event *event) if (!ppmu) return -ENOENT; + /* does not support taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + switch (event->attr.type) { case PERF_TYPE_HARDWARE: ev = event->attr.config; diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 10b14e3..068b8a2 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c @@ -310,6 +310,10 @@ static int sh_pmu_event_init(struct perf_event *event) { int err; + /* does not support taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + switch (event->attr.type) { case PERF_TYPE_RAW: case PERF_TYPE_HW_CACHE: diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 614da62..8e16a4a 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -1105,6 +1105,10 @@ static int sparc_pmu_event_init(struct perf_event *event) if (atomic_read(&nmi_active) < 0) return -ENODEV; + /* does not support taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + switch (attr->type) { case PERF_TYPE_HARDWARE: if (attr->config >= sparc_pmu->max_events) diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 67250a5..dd002fa 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -139,6 +139,9 @@ static int amd_pmu_hw_config(struct perf_event *event) if (ret) return ret; + if (has_branch_stack(event)) + return -EOPNOTSUPP; + if (event->attr.exclude_host && event->attr.exclude_guest) /* * When HO == GO == 1 the hardware treats that as GO == HO == 0 diff --git a/kernel/events/core.c b/kernel/events/core.c index 5820efd..242bb51 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -5044,6 +5044,12 @@ static int perf_swevent_init(struct perf_event *event) if (event->attr.type != PERF_TYPE_SOFTWARE) return -ENOENT; + /* + * no branch sampling for software events + */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + switch (event_id) { case PERF_COUNT_SW_CPU_CLOCK: case PERF_COUNT_SW_TASK_CLOCK: @@ -5154,6 +5160,12 @@ static int perf_tp_event_init(struct perf_event *event) if (event->attr.type != PERF_TYPE_TRACEPOINT) return -ENOENT; + /* + * no branch sampling for tracepoint events + */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + err = perf_trace_init(event); if (err) return err; @@ -5379,6 +5391,12 @@ static int cpu_clock_event_init(struct perf_event *event) if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) return -ENOENT; + /* + * no branch sampling for software events + */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + perf_swevent_init_hrtimer(event); return 0; @@ -5453,6 +5471,12 @@ static int task_clock_event_init(struct perf_event *event) if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) return -ENOENT; + /* + * no branch sampling for software events + */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + perf_swevent_init_hrtimer(event); return 0; diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 3330022..bb38c4d 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -581,6 +581,12 @@ static int hw_breakpoint_event_init(struct perf_event *bp) if (bp->attr.type != PERF_TYPE_BREAKPOINT) return -ENOENT; + /* + * no branch sampling for breakpoint events + */ + if (has_branch_stack(bp)) + return -EOPNOTSUPP; + err = register_perf_hw_breakpoint(bp); if (err) return err; -- cgit v0.10.2 From d010b3326cf06b3406cdd88af16dcf4e4b6fec2e Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 9 Feb 2012 23:21:00 +0100 Subject: perf: Add callback to flush branch_stack on context switch With branch stack sampling, it is possible to filter by priv levels. In system-wide mode, that means it is possible to capture only user level branches. The builtin SW LBR filter needs to disassemble code based on LBR captured addresses. For that, it needs to know the task the addresses are associated with. Because of context switches, the content of the branch stack buffer may contain addresses from different tasks. We need a callback on context switch to either flush the branch stack or save it. This patch adds a new callback in struct pmu which is called during context switches. The callback is called only when necessary. That is when a system-wide context has, at least, one event which uses PERF_SAMPLE_BRANCH_STACK. The callback is never called for per-thread context. In this version, the Intel x86 code simply flushes (resets) the LBR on context switches (fills it with zeroes). Those zeroed branches are then filtered out by the SW filter. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1328826068-11713-11-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index cea5674..0a18d16 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1671,25 +1671,32 @@ static const struct attribute_group *x86_pmu_attr_groups[] = { NULL, }; +static void x86_pmu_flush_branch_stack(void) +{ + if (x86_pmu.flush_branch_stack) + x86_pmu.flush_branch_stack(); +} + static struct pmu pmu = { - .pmu_enable = x86_pmu_enable, - .pmu_disable = x86_pmu_disable, + .pmu_enable = x86_pmu_enable, + .pmu_disable = x86_pmu_disable, .attr_groups = x86_pmu_attr_groups, .event_init = x86_pmu_event_init, - .add = x86_pmu_add, - .del = x86_pmu_del, - .start = x86_pmu_start, - .stop = x86_pmu_stop, - .read = x86_pmu_read, + .add = x86_pmu_add, + .del = x86_pmu_del, + .start = x86_pmu_start, + .stop = x86_pmu_stop, + .read = x86_pmu_read, .start_txn = x86_pmu_start_txn, .cancel_txn = x86_pmu_cancel_txn, .commit_txn = x86_pmu_commit_txn, .event_idx = x86_pmu_event_idx, + .flush_branch_stack = x86_pmu_flush_branch_stack, }; void perf_update_user_clock(struct perf_event_mmap_page *userpg, u64 now) diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index f104c05..74387c1 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -324,6 +324,7 @@ struct x86_pmu { void (*cpu_starting)(int cpu); void (*cpu_dying)(int cpu); void (*cpu_dead)(int cpu); + void (*flush_branch_stack)(void); /* * Intel Arch Perfmon v2+ diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 7cc1e2d..6627089 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1539,6 +1539,18 @@ static void intel_pmu_cpu_dying(int cpu) fini_debug_store_on_cpu(cpu); } +static void intel_pmu_flush_branch_stack(void) +{ + /* + * Intel LBR does not tag entries with the + * PID of the current task, then we need to + * flush it on ctxsw + * For now, we simply reset it + */ + if (x86_pmu.lbr_nr) + intel_pmu_lbr_reset(); +} + static __initconst const struct x86_pmu intel_pmu = { .name = "Intel", .handle_irq = intel_pmu_handle_irq, @@ -1566,6 +1578,7 @@ static __initconst const struct x86_pmu intel_pmu = { .cpu_starting = intel_pmu_cpu_starting, .cpu_dying = intel_pmu_cpu_dying, .guest_get_msrs = intel_guest_get_msrs, + .flush_branch_stack = intel_pmu_flush_branch_stack, }; static __init void intel_clovertown_quirk(void) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 5fc494f..fbbf5e5 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -746,6 +746,11 @@ struct pmu { * if no implementation is provided it will default to: event->hw.idx + 1. */ int (*event_idx) (struct perf_event *event); /*optional */ + + /* + * flush branch stack on context-switches (needed in cpu-wide mode) + */ + void (*flush_branch_stack) (void); }; /** @@ -979,7 +984,8 @@ struct perf_event_context { u64 parent_gen; u64 generation; int pin_count; - int nr_cgroups; /* cgroup events present */ + int nr_cgroups; /* cgroup evts */ + int nr_branch_stack; /* branch_stack evt */ struct rcu_head rcu_head; }; @@ -1044,6 +1050,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, extern u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running); + struct perf_sample_data { u64 type; diff --git a/kernel/events/core.c b/kernel/events/core.c index 242bb51..c61234b 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -137,6 +137,7 @@ enum event_type_t { */ struct static_key_deferred perf_sched_events __read_mostly; static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); +static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events); static atomic_t nr_mmap_events __read_mostly; static atomic_t nr_comm_events __read_mostly; @@ -888,6 +889,9 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx) if (is_cgroup_event(event)) ctx->nr_cgroups++; + if (has_branch_stack(event)) + ctx->nr_branch_stack++; + list_add_rcu(&event->event_entry, &ctx->event_list); if (!ctx->nr_events) perf_pmu_rotate_start(ctx->pmu); @@ -1027,6 +1031,9 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx) cpuctx->cgrp = NULL; } + if (has_branch_stack(event)) + ctx->nr_branch_stack--; + ctx->nr_events--; if (event->attr.inherit_stat) ctx->nr_stat--; @@ -2202,6 +2209,66 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx, } /* + * When sampling the branck stack in system-wide, it may be necessary + * to flush the stack on context switch. This happens when the branch + * stack does not tag its entries with the pid of the current task. + * Otherwise it becomes impossible to associate a branch entry with a + * task. This ambiguity is more likely to appear when the branch stack + * supports priv level filtering and the user sets it to monitor only + * at the user level (which could be a useful measurement in system-wide + * mode). In that case, the risk is high of having a branch stack with + * branch from multiple tasks. Flushing may mean dropping the existing + * entries or stashing them somewhere in the PMU specific code layer. + * + * This function provides the context switch callback to the lower code + * layer. It is invoked ONLY when there is at least one system-wide context + * with at least one active event using taken branch sampling. + */ +static void perf_branch_stack_sched_in(struct task_struct *prev, + struct task_struct *task) +{ + struct perf_cpu_context *cpuctx; + struct pmu *pmu; + unsigned long flags; + + /* no need to flush branch stack if not changing task */ + if (prev == task) + return; + + local_irq_save(flags); + + rcu_read_lock(); + + list_for_each_entry_rcu(pmu, &pmus, entry) { + cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); + + /* + * check if the context has at least one + * event using PERF_SAMPLE_BRANCH_STACK + */ + if (cpuctx->ctx.nr_branch_stack > 0 + && pmu->flush_branch_stack) { + + pmu = cpuctx->ctx.pmu; + + perf_ctx_lock(cpuctx, cpuctx->task_ctx); + + perf_pmu_disable(pmu); + + pmu->flush_branch_stack(); + + perf_pmu_enable(pmu); + + perf_ctx_unlock(cpuctx, cpuctx->task_ctx); + } + } + + rcu_read_unlock(); + + local_irq_restore(flags); +} + +/* * Called from scheduler to add the events of the current task * with interrupts disabled. * @@ -2232,6 +2299,10 @@ void __perf_event_task_sched_in(struct task_struct *prev, */ if (atomic_read(&__get_cpu_var(perf_cgroup_events))) perf_cgroup_sched_in(prev, task); + + /* check for system-wide branch_stack events */ + if (atomic_read(&__get_cpu_var(perf_branch_stack_events))) + perf_branch_stack_sched_in(prev, task); } static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) @@ -2798,6 +2869,14 @@ static void free_event(struct perf_event *event) atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); static_key_slow_dec_deferred(&perf_sched_events); } + + if (has_branch_stack(event)) { + static_key_slow_dec_deferred(&perf_sched_events); + /* is system-wide event */ + if (!(event->attach_state & PERF_ATTACH_TASK)) + atomic_dec(&per_cpu(perf_branch_stack_events, + event->cpu)); + } } if (event->rb) { @@ -5924,6 +6003,12 @@ done: return ERR_PTR(err); } } + if (has_branch_stack(event)) { + static_key_slow_inc(&perf_sched_events.key); + if (!(event->attach_state & PERF_ATTACH_TASK)) + atomic_inc(&per_cpu(perf_branch_stack_events, + event->cpu)); + } } return event; -- cgit v0.10.2 From 86b4ce3156c0dc140907ad03639564000cde694f Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 5 Mar 2012 22:32:09 +0900 Subject: x86/kprobes: Fix instruction recovery on optimized path Current probed-instruction recovery expects that only breakpoint instruction modifies instruction. However, since kprobes jump optimization can replace original instructions with a jump, that expectation is not enough. And it may cause instruction decoding failure on the function where an optimized probe already exists. This bug can reproduce easily as below: 1) find a target function address (any kprobe-able function is OK) $ grep __secure_computing /proc/kallsyms ffffffff810c19d0 T __secure_computing 2) decode the function $ objdump -d vmlinux --start-address=0xffffffff810c19d0 --stop-address=0xffffffff810c19eb vmlinux: file format elf64-x86-64 Disassembly of section .text: ffffffff810c19d0 <__secure_computing>: ffffffff810c19d0: 55 push %rbp ffffffff810c19d1: 48 89 e5 mov %rsp,%rbp ffffffff810c19d4: e8 67 8f 72 00 callq ffffffff817ea940 ffffffff810c19d9: 65 48 8b 04 25 40 b8 mov %gs:0xb840,%rax ffffffff810c19e0: 00 00 ffffffff810c19e2: 83 b8 88 05 00 00 01 cmpl $0x1,0x588(%rax) ffffffff810c19e9: 74 05 je ffffffff810c19f0 <__secure_computing+0x20> 3) put a kprobe-event at an optimize-able place, where no call/jump places within the 5 bytes. $ su - # cd /sys/kernel/debug/tracing # echo p __secure_computing+0x9 > kprobe_events 4) enable it and check it is optimized. # echo 1 > events/kprobes/p___secure_computing_9/enable # cat ../kprobes/list ffffffff810c19d9 k __secure_computing+0x9 [OPTIMIZED] 5) put another kprobe on an instruction after previous probe in the same function. # echo p __secure_computing+0x12 >> kprobe_events bash: echo: write error: Invalid argument # dmesg | tail -n 1 [ 1666.500016] Probing address(0xffffffff810c19e2) is not an instruction boundary. 6) however, if the kprobes optimization is disabled, it works. # echo 0 > /proc/sys/debug/kprobes-optimization # cat ../kprobes/list ffffffff810c19d9 k __secure_computing+0x9 # echo p __secure_computing+0x12 >> kprobe_events (no error) This is because kprobes doesn't recover the instruction which is overwritten with a relative jump by another kprobe when finding instruction boundary. It only recovers the breakpoint instruction. This patch fixes kprobes to recover such instructions. With this fix: # echo p __secure_computing+0x9 > kprobe_events # echo 1 > events/kprobes/p___secure_computing_9/enable # cat ../kprobes/list ffffffff810c1aa9 k __secure_computing+0x9 [OPTIMIZED] # echo p __secure_computing+0x12 >> kprobe_events # cat ../kprobes/list ffffffff810c1aa9 k __secure_computing+0x9 [OPTIMIZED] ffffffff810c1ab2 k __secure_computing+0x12 [DISABLED] Changes in v4: - Fix a bug to ensure optimized probe is really optimized by jump. - Remove kprobe_optready() dependency. - Cleanup code for preparing optprobe separation. Changes in v3: - Fix a build error when CONFIG_OPTPROBE=n. (Thanks, Ingo!) To fix the error, split optprobe instruction recovering path from kprobes path. - Cleanup comments/styles. Changes in v2: - Fix a bug to recover original instruction address in RIP-relative instruction fixup. - Moved on tip/master. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: yrl.pp-manager.tt@hitachi.com Cc: systemtap@sourceware.org Cc: anderson@redhat.com Link: http://lkml.kernel.org/r/20120305133209.5982.36568.stgit@localhost.localdomain Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 7da647d..6bec22f 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -207,13 +207,15 @@ retry: } } -/* Recover the probed instruction at addr for further analysis. */ -static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) +static unsigned long __recover_probed_insn(kprobe_opcode_t *buf, + unsigned long addr) { struct kprobe *kp; + kp = get_kprobe((void *)addr); + /* There is no probe, return original address */ if (!kp) - return -EINVAL; + return addr; /* * Basically, kp->ainsn.insn has an original instruction. @@ -230,14 +232,76 @@ static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) */ memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); buf[0] = kp->opcode; - return 0; + return (unsigned long)buf; +} + +#ifdef CONFIG_OPTPROBES +static unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, + unsigned long addr) +{ + struct optimized_kprobe *op; + struct kprobe *kp; + long offs; + int i; + + for (i = 0; i < RELATIVEJUMP_SIZE; i++) { + kp = get_kprobe((void *)addr - i); + /* This function only handles jump-optimized kprobe */ + if (kp && kprobe_optimized(kp)) { + op = container_of(kp, struct optimized_kprobe, kp); + /* If op->list is not empty, op is under optimizing */ + if (list_empty(&op->list)) + goto found; + } + } + + return addr; +found: + /* + * If the kprobe can be optimized, original bytes which can be + * overwritten by jump destination address. In this case, original + * bytes must be recovered from op->optinsn.copied_insn buffer. + */ + memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + if (addr == (unsigned long)kp->addr) { + buf[0] = kp->opcode; + memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); + } else { + offs = addr - (unsigned long)kp->addr - 1; + memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs); + } + + return (unsigned long)buf; +} +#else +static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, + unsigned long addr) +{ + return addr; +} +#endif + +/* + * Recover the probed instruction at addr for further analysis. + * Caller must lock kprobes by kprobe_mutex, or disable preemption + * for preventing to release referencing kprobes. + */ +static unsigned long recover_probed_instruction(kprobe_opcode_t *buf, + unsigned long addr) +{ + unsigned long __addr; + + __addr = __recover_optprobed_insn(buf, addr); + if (__addr != addr) + return __addr; + + return __recover_probed_insn(buf, addr); } /* Check if paddr is at an instruction boundary */ static int __kprobes can_probe(unsigned long paddr) { - int ret; - unsigned long addr, offset = 0; + unsigned long addr, __addr, offset = 0; struct insn insn; kprobe_opcode_t buf[MAX_INSN_SIZE]; @@ -247,26 +311,24 @@ static int __kprobes can_probe(unsigned long paddr) /* Decode instructions */ addr = paddr - offset; while (addr < paddr) { - kernel_insn_init(&insn, (void *)addr); - insn_get_opcode(&insn); - /* * Check if the instruction has been modified by another * kprobe, in which case we replace the breakpoint by the * original instruction in our buffer. + * Also, jump optimization will change the breakpoint to + * relative-jump. Since the relative-jump itself is + * normally used, we just go through if there is no kprobe. */ - if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) { - ret = recover_probed_instruction(buf, addr); - if (ret) - /* - * Another debugging subsystem might insert - * this breakpoint. In that case, we can't - * recover it. - */ - return 0; - kernel_insn_init(&insn, buf); - } + __addr = recover_probed_instruction(buf, addr); + kernel_insn_init(&insn, (void *)__addr); insn_get_length(&insn); + + /* + * Another debugging subsystem might insert this breakpoint. + * In that case, we can't recover it. + */ + if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) + return 0; addr += insn.length; } @@ -302,21 +364,17 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover) { struct insn insn; - int ret; kprobe_opcode_t buf[MAX_INSN_SIZE]; + u8 *orig_src = src; /* Back up original src for RIP calculation */ + + if (recover) + src = (u8 *)recover_probed_instruction(buf, (unsigned long)src); kernel_insn_init(&insn, src); - if (recover) { - insn_get_opcode(&insn); - if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) { - ret = recover_probed_instruction(buf, - (unsigned long)src); - if (ret) - return 0; - kernel_insn_init(&insn, buf); - } - } insn_get_length(&insn); + /* Another subsystem puts a breakpoint, failed to recover */ + if (recover && insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) + return 0; memcpy(dest, insn.kaddr, insn.length); #ifdef CONFIG_X86_64 @@ -337,8 +395,7 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover) * extension of the original signed 32-bit displacement would * have given. */ - newdisp = (u8 *) src + (s64) insn.displacement.value - - (u8 *) dest; + newdisp = (u8 *) orig_src + (s64) insn.displacement.value - (u8 *) dest; BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ disp = (u8 *) dest + insn_offset_displacement(&insn); *(s32 *) disp = (s32) newdisp; @@ -1271,8 +1328,7 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) /* Decode whole function to ensure any instructions don't jump into target */ static int __kprobes can_optimize(unsigned long paddr) { - int ret; - unsigned long addr, size = 0, offset = 0; + unsigned long addr, __addr, size = 0, offset = 0; struct insn insn; kprobe_opcode_t buf[MAX_INSN_SIZE]; @@ -1301,15 +1357,12 @@ static int __kprobes can_optimize(unsigned long paddr) * we can't optimize kprobe in this function. */ return 0; - kernel_insn_init(&insn, (void *)addr); - insn_get_opcode(&insn); - if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) { - ret = recover_probed_instruction(buf, addr); - if (ret) - return 0; - kernel_insn_init(&insn, buf); - } + __addr = recover_probed_instruction(buf, addr); + kernel_insn_init(&insn, (void *)__addr); insn_get_length(&insn); + /* Another subsystem puts a breakpoint */ + if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) + return 0; /* Recover address */ insn.kaddr = (void *)addr; insn.next_byte = (void *)(addr + insn.length); @@ -1366,6 +1419,7 @@ void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op) /* * Copy replacing target instructions * Target instructions MUST be relocatable (checked inside) + * This is called when new aggr(opt)probe is allocated or reused. */ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) { -- cgit v0.10.2 From 464846888d9aad186cab3acdae6b654f9eb19772 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 5 Mar 2012 22:32:16 +0900 Subject: x86/kprobes: Fix a bug which can modify kernel code permanently Fix a bug in kprobes which can modify kernel code permanently at run-time. In the result, kernel can crash when it executes the modified code. This bug can happen when we put two probes enough near and the first probe is optimized. When the second probe is set up, it copies a byte which is already modified by the first probe, and executes it when the probe is hit. Even worse, the first probe and the second probe are removed respectively, the second probe writes back the copied (modified) instruction. To fix this bug, kprobes always recovers the original code and copies the first byte from recovered instruction. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: yrl.pp-manager.tt@hitachi.com Cc: systemtap@sourceware.org Cc: anderson@redhat.com Link: http://lkml.kernel.org/r/20120305133215.5982.31991.stgit@localhost.localdomain Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 6bec22f..ca6d450 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -361,19 +361,15 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) * If not, return null. * Only applicable to 64-bit x86. */ -static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover) +static int __kprobes __copy_instruction(u8 *dest, u8 *src) { struct insn insn; kprobe_opcode_t buf[MAX_INSN_SIZE]; - u8 *orig_src = src; /* Back up original src for RIP calculation */ - if (recover) - src = (u8 *)recover_probed_instruction(buf, (unsigned long)src); - - kernel_insn_init(&insn, src); + kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, (unsigned long)src)); insn_get_length(&insn); /* Another subsystem puts a breakpoint, failed to recover */ - if (recover && insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) + if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) return 0; memcpy(dest, insn.kaddr, insn.length); @@ -395,7 +391,7 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover) * extension of the original signed 32-bit displacement would * have given. */ - newdisp = (u8 *) orig_src + (s64) insn.displacement.value - (u8 *) dest; + newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest; BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ disp = (u8 *) dest + insn_offset_displacement(&insn); *(s32 *) disp = (s32) newdisp; @@ -406,18 +402,20 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover) static void __kprobes arch_copy_kprobe(struct kprobe *p) { + /* Copy an instruction with recovering if other optprobe modifies it.*/ + __copy_instruction(p->ainsn.insn, p->addr); + /* - * Copy an instruction without recovering int3, because it will be - * put by another subsystem. + * __copy_instruction can modify the displacement of the instruction, + * but it doesn't affect boostable check. */ - __copy_instruction(p->ainsn.insn, p->addr, 0); - - if (can_boost(p->addr)) + if (can_boost(p->ainsn.insn)) p->ainsn.boostable = 0; else p->ainsn.boostable = -1; - p->opcode = *p->addr; + /* Also, displacement change doesn't affect the first byte */ + p->opcode = p->ainsn.insn[0]; } int __kprobes arch_prepare_kprobe(struct kprobe *p) @@ -1276,7 +1274,7 @@ static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) int len = 0, ret; while (len < RELATIVEJUMP_SIZE) { - ret = __copy_instruction(dest + len, src + len, 1); + ret = __copy_instruction(dest + len, src + len); if (!ret || !can_boost(dest + len)) return -EINVAL; len += ret; @@ -1328,7 +1326,7 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) /* Decode whole function to ensure any instructions don't jump into target */ static int __kprobes can_optimize(unsigned long paddr) { - unsigned long addr, __addr, size = 0, offset = 0; + unsigned long addr, size = 0, offset = 0; struct insn insn; kprobe_opcode_t buf[MAX_INSN_SIZE]; @@ -1357,8 +1355,7 @@ static int __kprobes can_optimize(unsigned long paddr) * we can't optimize kprobe in this function. */ return 0; - __addr = recover_probed_instruction(buf, addr); - kernel_insn_init(&insn, (void *)__addr); + kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, addr)); insn_get_length(&insn); /* Another subsystem puts a breakpoint */ if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) -- cgit v0.10.2 From 3f33ab1c0c741bfab2138c14ba1918a7905a1e8b Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 5 Mar 2012 22:32:22 +0900 Subject: x86/kprobes: Split out optprobe related code to kprobes-opt.c Split out optprobe related code to arch/x86/kernel/kprobes-opt.c for maintenanceability. Signed-off-by: Masami Hiramatsu Suggested-by: Ingo Molnar Cc: Ananth N Mavinakayanahalli Cc: yrl.pp-manager.tt@hitachi.com Cc: systemtap@sourceware.org Cc: anderson@redhat.com Link: http://lkml.kernel.org/r/20120305133222.5982.54794.stgit@localhost.localdomain [ Tidied up the code a tiny bit ] Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 5369059..532d2e0 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -69,6 +69,7 @@ obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o obj-$(CONFIG_KPROBES) += kprobes.o +obj-$(CONFIG_OPTPROBES) += kprobes-opt.o obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o obj-$(CONFIG_KGDB) += kgdb.o diff --git a/arch/x86/kernel/kprobes-common.h b/arch/x86/kernel/kprobes-common.h new file mode 100644 index 0000000..3230b68 --- /dev/null +++ b/arch/x86/kernel/kprobes-common.h @@ -0,0 +1,102 @@ +#ifndef __X86_KERNEL_KPROBES_COMMON_H +#define __X86_KERNEL_KPROBES_COMMON_H + +/* Kprobes and Optprobes common header */ + +#ifdef CONFIG_X86_64 +#define SAVE_REGS_STRING \ + /* Skip cs, ip, orig_ax. */ \ + " subq $24, %rsp\n" \ + " pushq %rdi\n" \ + " pushq %rsi\n" \ + " pushq %rdx\n" \ + " pushq %rcx\n" \ + " pushq %rax\n" \ + " pushq %r8\n" \ + " pushq %r9\n" \ + " pushq %r10\n" \ + " pushq %r11\n" \ + " pushq %rbx\n" \ + " pushq %rbp\n" \ + " pushq %r12\n" \ + " pushq %r13\n" \ + " pushq %r14\n" \ + " pushq %r15\n" +#define RESTORE_REGS_STRING \ + " popq %r15\n" \ + " popq %r14\n" \ + " popq %r13\n" \ + " popq %r12\n" \ + " popq %rbp\n" \ + " popq %rbx\n" \ + " popq %r11\n" \ + " popq %r10\n" \ + " popq %r9\n" \ + " popq %r8\n" \ + " popq %rax\n" \ + " popq %rcx\n" \ + " popq %rdx\n" \ + " popq %rsi\n" \ + " popq %rdi\n" \ + /* Skip orig_ax, ip, cs */ \ + " addq $24, %rsp\n" +#else +#define SAVE_REGS_STRING \ + /* Skip cs, ip, orig_ax and gs. */ \ + " subl $16, %esp\n" \ + " pushl %fs\n" \ + " pushl %es\n" \ + " pushl %ds\n" \ + " pushl %eax\n" \ + " pushl %ebp\n" \ + " pushl %edi\n" \ + " pushl %esi\n" \ + " pushl %edx\n" \ + " pushl %ecx\n" \ + " pushl %ebx\n" +#define RESTORE_REGS_STRING \ + " popl %ebx\n" \ + " popl %ecx\n" \ + " popl %edx\n" \ + " popl %esi\n" \ + " popl %edi\n" \ + " popl %ebp\n" \ + " popl %eax\n" \ + /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\ + " addl $24, %esp\n" +#endif + +/* Ensure if the instruction can be boostable */ +extern int can_boost(kprobe_opcode_t *instruction); +/* Recover instruction if given address is probed */ +extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf, + unsigned long addr); +/* + * Copy an instruction and adjust the displacement if the instruction + * uses the %rip-relative addressing mode. + */ +extern int __copy_instruction(u8 *dest, u8 *src); + +/* Generate a relative-jump/call instruction */ +extern void synthesize_reljump(void *from, void *to); +extern void synthesize_relcall(void *from, void *to); + +#ifdef CONFIG_OPTPROBES +extern int arch_init_optprobes(void); +extern int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter); +extern unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr); +#else /* !CONFIG_OPTPROBES */ +static inline int arch_init_optprobes(void) +{ + return 0; +} +static inline int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) +{ + return 0; +} +static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) +{ + return addr; +} +#endif +#endif diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c new file mode 100644 index 0000000..c5e410e --- /dev/null +++ b/arch/x86/kernel/kprobes-opt.c @@ -0,0 +1,512 @@ +/* + * Kernel Probes Jump Optimization (Optprobes) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2002, 2004 + * Copyright (C) Hitachi Ltd., 2012 + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "kprobes-common.h" + +unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) +{ + struct optimized_kprobe *op; + struct kprobe *kp; + long offs; + int i; + + for (i = 0; i < RELATIVEJUMP_SIZE; i++) { + kp = get_kprobe((void *)addr - i); + /* This function only handles jump-optimized kprobe */ + if (kp && kprobe_optimized(kp)) { + op = container_of(kp, struct optimized_kprobe, kp); + /* If op->list is not empty, op is under optimizing */ + if (list_empty(&op->list)) + goto found; + } + } + + return addr; +found: + /* + * If the kprobe can be optimized, original bytes which can be + * overwritten by jump destination address. In this case, original + * bytes must be recovered from op->optinsn.copied_insn buffer. + */ + memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + if (addr == (unsigned long)kp->addr) { + buf[0] = kp->opcode; + memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); + } else { + offs = addr - (unsigned long)kp->addr - 1; + memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs); + } + + return (unsigned long)buf; +} + +/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ +static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) +{ +#ifdef CONFIG_X86_64 + *addr++ = 0x48; + *addr++ = 0xbf; +#else + *addr++ = 0xb8; +#endif + *(unsigned long *)addr = val; +} + +static void __used __kprobes kprobes_optinsn_template_holder(void) +{ + asm volatile ( + ".global optprobe_template_entry\n" + "optprobe_template_entry:\n" +#ifdef CONFIG_X86_64 + /* We don't bother saving the ss register */ + " pushq %rsp\n" + " pushfq\n" + SAVE_REGS_STRING + " movq %rsp, %rsi\n" + ".global optprobe_template_val\n" + "optprobe_template_val:\n" + ASM_NOP5 + ASM_NOP5 + ".global optprobe_template_call\n" + "optprobe_template_call:\n" + ASM_NOP5 + /* Move flags to rsp */ + " movq 144(%rsp), %rdx\n" + " movq %rdx, 152(%rsp)\n" + RESTORE_REGS_STRING + /* Skip flags entry */ + " addq $8, %rsp\n" + " popfq\n" +#else /* CONFIG_X86_32 */ + " pushf\n" + SAVE_REGS_STRING + " movl %esp, %edx\n" + ".global optprobe_template_val\n" + "optprobe_template_val:\n" + ASM_NOP5 + ".global optprobe_template_call\n" + "optprobe_template_call:\n" + ASM_NOP5 + RESTORE_REGS_STRING + " addl $4, %esp\n" /* skip cs */ + " popf\n" +#endif + ".global optprobe_template_end\n" + "optprobe_template_end:\n"); +} + +#define TMPL_MOVE_IDX \ + ((long)&optprobe_template_val - (long)&optprobe_template_entry) +#define TMPL_CALL_IDX \ + ((long)&optprobe_template_call - (long)&optprobe_template_entry) +#define TMPL_END_IDX \ + ((long)&optprobe_template_end - (long)&optprobe_template_entry) + +#define INT3_SIZE sizeof(kprobe_opcode_t) + +/* Optimized kprobe call back function: called from optinsn */ +static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + unsigned long flags; + + /* This is possible if op is under delayed unoptimizing */ + if (kprobe_disabled(&op->kp)) + return; + + local_irq_save(flags); + if (kprobe_running()) { + kprobes_inc_nmissed_count(&op->kp); + } else { + /* Save skipped registers */ +#ifdef CONFIG_X86_64 + regs->cs = __KERNEL_CS; +#else + regs->cs = __KERNEL_CS | get_kernel_rpl(); + regs->gs = 0; +#endif + regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; + regs->orig_ax = ~0UL; + + __this_cpu_write(current_kprobe, &op->kp); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + opt_pre_handler(&op->kp, regs); + __this_cpu_write(current_kprobe, NULL); + } + local_irq_restore(flags); +} + +static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) +{ + int len = 0, ret; + + while (len < RELATIVEJUMP_SIZE) { + ret = __copy_instruction(dest + len, src + len); + if (!ret || !can_boost(dest + len)) + return -EINVAL; + len += ret; + } + /* Check whether the address range is reserved */ + if (ftrace_text_reserved(src, src + len - 1) || + alternatives_text_reserved(src, src + len - 1) || + jump_label_text_reserved(src, src + len - 1)) + return -EBUSY; + + return len; +} + +/* Check whether insn is indirect jump */ +static int __kprobes insn_is_indirect_jump(struct insn *insn) +{ + return ((insn->opcode.bytes[0] == 0xff && + (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ + insn->opcode.bytes[0] == 0xea); /* Segment based jump */ +} + +/* Check whether insn jumps into specified address range */ +static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) +{ + unsigned long target = 0; + + switch (insn->opcode.bytes[0]) { + case 0xe0: /* loopne */ + case 0xe1: /* loope */ + case 0xe2: /* loop */ + case 0xe3: /* jcxz */ + case 0xe9: /* near relative jump */ + case 0xeb: /* short relative jump */ + break; + case 0x0f: + if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */ + break; + return 0; + default: + if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */ + break; + return 0; + } + target = (unsigned long)insn->next_byte + insn->immediate.value; + + return (start <= target && target <= start + len); +} + +/* Decode whole function to ensure any instructions don't jump into target */ +static int __kprobes can_optimize(unsigned long paddr) +{ + unsigned long addr, size = 0, offset = 0; + struct insn insn; + kprobe_opcode_t buf[MAX_INSN_SIZE]; + + /* Lookup symbol including addr */ + if (!kallsyms_lookup_size_offset(paddr, &size, &offset)) + return 0; + + /* + * Do not optimize in the entry code due to the unstable + * stack handling. + */ + if ((paddr >= (unsigned long)__entry_text_start) && + (paddr < (unsigned long)__entry_text_end)) + return 0; + + /* Check there is enough space for a relative jump. */ + if (size - offset < RELATIVEJUMP_SIZE) + return 0; + + /* Decode instructions */ + addr = paddr - offset; + while (addr < paddr - offset + size) { /* Decode until function end */ + if (search_exception_tables(addr)) + /* + * Since some fixup code will jumps into this function, + * we can't optimize kprobe in this function. + */ + return 0; + kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, addr)); + insn_get_length(&insn); + /* Another subsystem puts a breakpoint */ + if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) + return 0; + /* Recover address */ + insn.kaddr = (void *)addr; + insn.next_byte = (void *)(addr + insn.length); + /* Check any instructions don't jump into target */ + if (insn_is_indirect_jump(&insn) || + insn_jump_into_range(&insn, paddr + INT3_SIZE, + RELATIVE_ADDR_SIZE)) + return 0; + addr += insn.length; + } + + return 1; +} + +/* Check optimized_kprobe can actually be optimized. */ +int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op) +{ + int i; + struct kprobe *p; + + for (i = 1; i < op->optinsn.size; i++) { + p = get_kprobe(op->kp.addr + i); + if (p && !kprobe_disabled(p)) + return -EEXIST; + } + + return 0; +} + +/* Check the addr is within the optimized instructions. */ +int __kprobes +arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr) +{ + return ((unsigned long)op->kp.addr <= addr && + (unsigned long)op->kp.addr + op->optinsn.size > addr); +} + +/* Free optimized instruction slot */ +static __kprobes +void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) +{ + if (op->optinsn.insn) { + free_optinsn_slot(op->optinsn.insn, dirty); + op->optinsn.insn = NULL; + op->optinsn.size = 0; + } +} + +void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op) +{ + __arch_remove_optimized_kprobe(op, 1); +} + +/* + * Copy replacing target instructions + * Target instructions MUST be relocatable (checked inside) + * This is called when new aggr(opt)probe is allocated or reused. + */ +int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) +{ + u8 *buf; + int ret; + long rel; + + if (!can_optimize((unsigned long)op->kp.addr)) + return -EILSEQ; + + op->optinsn.insn = get_optinsn_slot(); + if (!op->optinsn.insn) + return -ENOMEM; + + /* + * Verify if the address gap is in 2GB range, because this uses + * a relative jump. + */ + rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE; + if (abs(rel) > 0x7fffffff) + return -ERANGE; + + buf = (u8 *)op->optinsn.insn; + + /* Copy instructions into the out-of-line buffer */ + ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr); + if (ret < 0) { + __arch_remove_optimized_kprobe(op, 0); + return ret; + } + op->optinsn.size = ret; + + /* Copy arch-dep-instance from template */ + memcpy(buf, &optprobe_template_entry, TMPL_END_IDX); + + /* Set probe information */ + synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op); + + /* Set probe function call */ + synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback); + + /* Set returning jmp instruction at the tail of out-of-line buffer */ + synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size, + (u8 *)op->kp.addr + op->optinsn.size); + + flush_icache_range((unsigned long) buf, + (unsigned long) buf + TMPL_END_IDX + + op->optinsn.size + RELATIVEJUMP_SIZE); + return 0; +} + +#define MAX_OPTIMIZE_PROBES 256 +static struct text_poke_param *jump_poke_params; +static struct jump_poke_buffer { + u8 buf[RELATIVEJUMP_SIZE]; +} *jump_poke_bufs; + +static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm, + u8 *insn_buf, + struct optimized_kprobe *op) +{ + s32 rel = (s32)((long)op->optinsn.insn - + ((long)op->kp.addr + RELATIVEJUMP_SIZE)); + + /* Backup instructions which will be replaced by jump address */ + memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, + RELATIVE_ADDR_SIZE); + + insn_buf[0] = RELATIVEJUMP_OPCODE; + *(s32 *)(&insn_buf[1]) = rel; + + tprm->addr = op->kp.addr; + tprm->opcode = insn_buf; + tprm->len = RELATIVEJUMP_SIZE; +} + +/* + * Replace breakpoints (int3) with relative jumps. + * Caller must call with locking kprobe_mutex and text_mutex. + */ +void __kprobes arch_optimize_kprobes(struct list_head *oplist) +{ + struct optimized_kprobe *op, *tmp; + int c = 0; + + list_for_each_entry_safe(op, tmp, oplist, list) { + WARN_ON(kprobe_disabled(&op->kp)); + /* Setup param */ + setup_optimize_kprobe(&jump_poke_params[c], + jump_poke_bufs[c].buf, op); + list_del_init(&op->list); + if (++c >= MAX_OPTIMIZE_PROBES) + break; + } + + /* + * text_poke_smp doesn't support NMI/MCE code modifying. + * However, since kprobes itself also doesn't support NMI/MCE + * code probing, it's not a problem. + */ + text_poke_smp_batch(jump_poke_params, c); +} + +static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm, + u8 *insn_buf, + struct optimized_kprobe *op) +{ + /* Set int3 to first byte for kprobes */ + insn_buf[0] = BREAKPOINT_INSTRUCTION; + memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); + + tprm->addr = op->kp.addr; + tprm->opcode = insn_buf; + tprm->len = RELATIVEJUMP_SIZE; +} + +/* + * Recover original instructions and breakpoints from relative jumps. + * Caller must call with locking kprobe_mutex. + */ +extern void arch_unoptimize_kprobes(struct list_head *oplist, + struct list_head *done_list) +{ + struct optimized_kprobe *op, *tmp; + int c = 0; + + list_for_each_entry_safe(op, tmp, oplist, list) { + /* Setup param */ + setup_unoptimize_kprobe(&jump_poke_params[c], + jump_poke_bufs[c].buf, op); + list_move(&op->list, done_list); + if (++c >= MAX_OPTIMIZE_PROBES) + break; + } + + /* + * text_poke_smp doesn't support NMI/MCE code modifying. + * However, since kprobes itself also doesn't support NMI/MCE + * code probing, it's not a problem. + */ + text_poke_smp_batch(jump_poke_params, c); +} + +/* Replace a relative jump with a breakpoint (int3). */ +void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) +{ + u8 buf[RELATIVEJUMP_SIZE]; + + /* Set int3 to first byte for kprobes */ + buf[0] = BREAKPOINT_INSTRUCTION; + memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); + text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE); +} + +int __kprobes +setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) +{ + struct optimized_kprobe *op; + + if (p->flags & KPROBE_FLAG_OPTIMIZED) { + /* This kprobe is really able to run optimized path. */ + op = container_of(p, struct optimized_kprobe, kp); + /* Detour through copied instructions */ + regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX; + if (!reenter) + reset_current_kprobe(); + preempt_enable_no_resched(); + return 1; + } + return 0; +} + +int __kprobes arch_init_optprobes(void) +{ + /* Allocate code buffer and parameter array */ + jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) * + MAX_OPTIMIZE_PROBES, GFP_KERNEL); + if (!jump_poke_bufs) + return -ENOMEM; + + jump_poke_params = kmalloc(sizeof(struct text_poke_param) * + MAX_OPTIMIZE_PROBES, GFP_KERNEL); + if (!jump_poke_params) { + kfree(jump_poke_bufs); + jump_poke_bufs = NULL; + return -ENOMEM; + } + + return 0; +} diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index ca6d450..e213fc8 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -30,16 +30,15 @@ * and Prasanna S Panchamukhi * added function-return probes. * 2005-May Rusty Lynch - * Added function return probes functionality + * Added function return probes functionality * 2006-Feb Masami Hiramatsu added - * kprobe-booster and kretprobe-booster for i386. + * kprobe-booster and kretprobe-booster for i386. * 2007-Dec Masami Hiramatsu added kprobe-booster - * and kretprobe-booster for x86-64 + * and kretprobe-booster for x86-64 * 2007-Dec Masami Hiramatsu , Arjan van de Ven - * and Jim Keniston - * unified x86 kprobes code. + * and Jim Keniston + * unified x86 kprobes code. */ - #include #include #include @@ -59,6 +58,8 @@ #include #include +#include "kprobes-common.h" + void jprobe_return_end(void); DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; @@ -108,6 +109,7 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = { doesn't switch kernel stack.*/ {NULL, NULL} /* Terminator */ }; + const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) @@ -123,11 +125,17 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) } /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ -static void __kprobes synthesize_reljump(void *from, void *to) +void __kprobes synthesize_reljump(void *from, void *to) { __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE); } +/* Insert a call instruction at address 'from', which calls address 'to'.*/ +void __kprobes synthesize_relcall(void *from, void *to) +{ + __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE); +} + /* * Skip the prefixes of the instruction. */ @@ -151,7 +159,7 @@ static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn) * Returns non-zero if opcode is boostable. * RIP relative instructions are adjusted at copying time in 64 bits mode */ -static int __kprobes can_boost(kprobe_opcode_t *opcodes) +int __kprobes can_boost(kprobe_opcode_t *opcodes) { kprobe_opcode_t opcode; kprobe_opcode_t *orig_opcodes = opcodes; @@ -207,8 +215,8 @@ retry: } } -static unsigned long __recover_probed_insn(kprobe_opcode_t *buf, - unsigned long addr) +static unsigned long +__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) { struct kprobe *kp; @@ -235,59 +243,12 @@ static unsigned long __recover_probed_insn(kprobe_opcode_t *buf, return (unsigned long)buf; } -#ifdef CONFIG_OPTPROBES -static unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, - unsigned long addr) -{ - struct optimized_kprobe *op; - struct kprobe *kp; - long offs; - int i; - - for (i = 0; i < RELATIVEJUMP_SIZE; i++) { - kp = get_kprobe((void *)addr - i); - /* This function only handles jump-optimized kprobe */ - if (kp && kprobe_optimized(kp)) { - op = container_of(kp, struct optimized_kprobe, kp); - /* If op->list is not empty, op is under optimizing */ - if (list_empty(&op->list)) - goto found; - } - } - - return addr; -found: - /* - * If the kprobe can be optimized, original bytes which can be - * overwritten by jump destination address. In this case, original - * bytes must be recovered from op->optinsn.copied_insn buffer. - */ - memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); - if (addr == (unsigned long)kp->addr) { - buf[0] = kp->opcode; - memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); - } else { - offs = addr - (unsigned long)kp->addr - 1; - memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs); - } - - return (unsigned long)buf; -} -#else -static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, - unsigned long addr) -{ - return addr; -} -#endif - /* * Recover the probed instruction at addr for further analysis. * Caller must lock kprobes by kprobe_mutex, or disable preemption * for preventing to release referencing kprobes. */ -static unsigned long recover_probed_instruction(kprobe_opcode_t *buf, - unsigned long addr) +unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) { unsigned long __addr; @@ -361,7 +322,7 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) * If not, return null. * Only applicable to 64-bit x86. */ -static int __kprobes __copy_instruction(u8 *dest, u8 *src) +int __kprobes __copy_instruction(u8 *dest, u8 *src) { struct insn insn; kprobe_opcode_t buf[MAX_INSN_SIZE]; @@ -497,8 +458,8 @@ static void __kprobes restore_btf(void) } } -void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, - struct pt_regs *regs) +void __kprobes +arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { unsigned long *sara = stack_addr(regs); @@ -508,16 +469,8 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, *sara = (unsigned long) &kretprobe_trampoline; } -#ifdef CONFIG_OPTPROBES -static int __kprobes setup_detour_execution(struct kprobe *p, - struct pt_regs *regs, - int reenter); -#else -#define setup_detour_execution(p, regs, reenter) (0) -#endif - -static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb, int reenter) +static void __kprobes +setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter) { if (setup_detour_execution(p, regs, reenter)) return; @@ -559,8 +512,8 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, * within the handler. We save the original kprobes variables and just single * step on the instruction of the new probe without calling any user handlers. */ -static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) +static int __kprobes +reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { switch (kcb->kprobe_status) { case KPROBE_HIT_SSDONE: @@ -655,69 +608,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) return 0; } -#ifdef CONFIG_X86_64 -#define SAVE_REGS_STRING \ - /* Skip cs, ip, orig_ax. */ \ - " subq $24, %rsp\n" \ - " pushq %rdi\n" \ - " pushq %rsi\n" \ - " pushq %rdx\n" \ - " pushq %rcx\n" \ - " pushq %rax\n" \ - " pushq %r8\n" \ - " pushq %r9\n" \ - " pushq %r10\n" \ - " pushq %r11\n" \ - " pushq %rbx\n" \ - " pushq %rbp\n" \ - " pushq %r12\n" \ - " pushq %r13\n" \ - " pushq %r14\n" \ - " pushq %r15\n" -#define RESTORE_REGS_STRING \ - " popq %r15\n" \ - " popq %r14\n" \ - " popq %r13\n" \ - " popq %r12\n" \ - " popq %rbp\n" \ - " popq %rbx\n" \ - " popq %r11\n" \ - " popq %r10\n" \ - " popq %r9\n" \ - " popq %r8\n" \ - " popq %rax\n" \ - " popq %rcx\n" \ - " popq %rdx\n" \ - " popq %rsi\n" \ - " popq %rdi\n" \ - /* Skip orig_ax, ip, cs */ \ - " addq $24, %rsp\n" -#else -#define SAVE_REGS_STRING \ - /* Skip cs, ip, orig_ax and gs. */ \ - " subl $16, %esp\n" \ - " pushl %fs\n" \ - " pushl %es\n" \ - " pushl %ds\n" \ - " pushl %eax\n" \ - " pushl %ebp\n" \ - " pushl %edi\n" \ - " pushl %esi\n" \ - " pushl %edx\n" \ - " pushl %ecx\n" \ - " pushl %ebx\n" -#define RESTORE_REGS_STRING \ - " popl %ebx\n" \ - " popl %ecx\n" \ - " popl %edx\n" \ - " popl %esi\n" \ - " popl %edi\n" \ - " popl %ebp\n" \ - " popl %eax\n" \ - /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\ - " addl $24, %esp\n" -#endif - /* * When a retprobed function returns, this code saves registers and * calls trampoline_handler() runs, which calls the kretprobe's handler. @@ -871,8 +761,8 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) * jump instruction after the copied instruction, that jumps to the next * instruction after the probepoint. */ -static void __kprobes resume_execution(struct kprobe *p, - struct pt_regs *regs, struct kprobe_ctlblk *kcb) +static void __kprobes +resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { unsigned long *tos = stack_addr(regs); unsigned long copy_ip = (unsigned long)p->ainsn.insn; @@ -1051,8 +941,8 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) /* * Wrapper routine for handling exceptions. */ -int __kprobes kprobe_exceptions_notify(struct notifier_block *self, - unsigned long val, void *data) +int __kprobes +kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { struct die_args *args = data; int ret = NOTIFY_DONE; @@ -1162,462 +1052,9 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) return 0; } - -#ifdef CONFIG_OPTPROBES - -/* Insert a call instruction at address 'from', which calls address 'to'.*/ -static void __kprobes synthesize_relcall(void *from, void *to) -{ - __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE); -} - -/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ -static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, - unsigned long val) -{ -#ifdef CONFIG_X86_64 - *addr++ = 0x48; - *addr++ = 0xbf; -#else - *addr++ = 0xb8; -#endif - *(unsigned long *)addr = val; -} - -static void __used __kprobes kprobes_optinsn_template_holder(void) -{ - asm volatile ( - ".global optprobe_template_entry\n" - "optprobe_template_entry: \n" -#ifdef CONFIG_X86_64 - /* We don't bother saving the ss register */ - " pushq %rsp\n" - " pushfq\n" - SAVE_REGS_STRING - " movq %rsp, %rsi\n" - ".global optprobe_template_val\n" - "optprobe_template_val: \n" - ASM_NOP5 - ASM_NOP5 - ".global optprobe_template_call\n" - "optprobe_template_call: \n" - ASM_NOP5 - /* Move flags to rsp */ - " movq 144(%rsp), %rdx\n" - " movq %rdx, 152(%rsp)\n" - RESTORE_REGS_STRING - /* Skip flags entry */ - " addq $8, %rsp\n" - " popfq\n" -#else /* CONFIG_X86_32 */ - " pushf\n" - SAVE_REGS_STRING - " movl %esp, %edx\n" - ".global optprobe_template_val\n" - "optprobe_template_val: \n" - ASM_NOP5 - ".global optprobe_template_call\n" - "optprobe_template_call: \n" - ASM_NOP5 - RESTORE_REGS_STRING - " addl $4, %esp\n" /* skip cs */ - " popf\n" -#endif - ".global optprobe_template_end\n" - "optprobe_template_end: \n"); -} - -#define TMPL_MOVE_IDX \ - ((long)&optprobe_template_val - (long)&optprobe_template_entry) -#define TMPL_CALL_IDX \ - ((long)&optprobe_template_call - (long)&optprobe_template_entry) -#define TMPL_END_IDX \ - ((long)&optprobe_template_end - (long)&optprobe_template_entry) - -#define INT3_SIZE sizeof(kprobe_opcode_t) - -/* Optimized kprobe call back function: called from optinsn */ -static void __kprobes optimized_callback(struct optimized_kprobe *op, - struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - unsigned long flags; - - /* This is possible if op is under delayed unoptimizing */ - if (kprobe_disabled(&op->kp)) - return; - - local_irq_save(flags); - if (kprobe_running()) { - kprobes_inc_nmissed_count(&op->kp); - } else { - /* Save skipped registers */ -#ifdef CONFIG_X86_64 - regs->cs = __KERNEL_CS; -#else - regs->cs = __KERNEL_CS | get_kernel_rpl(); - regs->gs = 0; -#endif - regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; - regs->orig_ax = ~0UL; - - __this_cpu_write(current_kprobe, &op->kp); - kcb->kprobe_status = KPROBE_HIT_ACTIVE; - opt_pre_handler(&op->kp, regs); - __this_cpu_write(current_kprobe, NULL); - } - local_irq_restore(flags); -} - -static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) -{ - int len = 0, ret; - - while (len < RELATIVEJUMP_SIZE) { - ret = __copy_instruction(dest + len, src + len); - if (!ret || !can_boost(dest + len)) - return -EINVAL; - len += ret; - } - /* Check whether the address range is reserved */ - if (ftrace_text_reserved(src, src + len - 1) || - alternatives_text_reserved(src, src + len - 1) || - jump_label_text_reserved(src, src + len - 1)) - return -EBUSY; - - return len; -} - -/* Check whether insn is indirect jump */ -static int __kprobes insn_is_indirect_jump(struct insn *insn) -{ - return ((insn->opcode.bytes[0] == 0xff && - (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ - insn->opcode.bytes[0] == 0xea); /* Segment based jump */ -} - -/* Check whether insn jumps into specified address range */ -static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) -{ - unsigned long target = 0; - - switch (insn->opcode.bytes[0]) { - case 0xe0: /* loopne */ - case 0xe1: /* loope */ - case 0xe2: /* loop */ - case 0xe3: /* jcxz */ - case 0xe9: /* near relative jump */ - case 0xeb: /* short relative jump */ - break; - case 0x0f: - if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */ - break; - return 0; - default: - if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */ - break; - return 0; - } - target = (unsigned long)insn->next_byte + insn->immediate.value; - - return (start <= target && target <= start + len); -} - -/* Decode whole function to ensure any instructions don't jump into target */ -static int __kprobes can_optimize(unsigned long paddr) -{ - unsigned long addr, size = 0, offset = 0; - struct insn insn; - kprobe_opcode_t buf[MAX_INSN_SIZE]; - - /* Lookup symbol including addr */ - if (!kallsyms_lookup_size_offset(paddr, &size, &offset)) - return 0; - - /* - * Do not optimize in the entry code due to the unstable - * stack handling. - */ - if ((paddr >= (unsigned long )__entry_text_start) && - (paddr < (unsigned long )__entry_text_end)) - return 0; - - /* Check there is enough space for a relative jump. */ - if (size - offset < RELATIVEJUMP_SIZE) - return 0; - - /* Decode instructions */ - addr = paddr - offset; - while (addr < paddr - offset + size) { /* Decode until function end */ - if (search_exception_tables(addr)) - /* - * Since some fixup code will jumps into this function, - * we can't optimize kprobe in this function. - */ - return 0; - kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, addr)); - insn_get_length(&insn); - /* Another subsystem puts a breakpoint */ - if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) - return 0; - /* Recover address */ - insn.kaddr = (void *)addr; - insn.next_byte = (void *)(addr + insn.length); - /* Check any instructions don't jump into target */ - if (insn_is_indirect_jump(&insn) || - insn_jump_into_range(&insn, paddr + INT3_SIZE, - RELATIVE_ADDR_SIZE)) - return 0; - addr += insn.length; - } - - return 1; -} - -/* Check optimized_kprobe can actually be optimized. */ -int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op) -{ - int i; - struct kprobe *p; - - for (i = 1; i < op->optinsn.size; i++) { - p = get_kprobe(op->kp.addr + i); - if (p && !kprobe_disabled(p)) - return -EEXIST; - } - - return 0; -} - -/* Check the addr is within the optimized instructions. */ -int __kprobes arch_within_optimized_kprobe(struct optimized_kprobe *op, - unsigned long addr) -{ - return ((unsigned long)op->kp.addr <= addr && - (unsigned long)op->kp.addr + op->optinsn.size > addr); -} - -/* Free optimized instruction slot */ -static __kprobes -void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) -{ - if (op->optinsn.insn) { - free_optinsn_slot(op->optinsn.insn, dirty); - op->optinsn.insn = NULL; - op->optinsn.size = 0; - } -} - -void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op) -{ - __arch_remove_optimized_kprobe(op, 1); -} - -/* - * Copy replacing target instructions - * Target instructions MUST be relocatable (checked inside) - * This is called when new aggr(opt)probe is allocated or reused. - */ -int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) -{ - u8 *buf; - int ret; - long rel; - - if (!can_optimize((unsigned long)op->kp.addr)) - return -EILSEQ; - - op->optinsn.insn = get_optinsn_slot(); - if (!op->optinsn.insn) - return -ENOMEM; - - /* - * Verify if the address gap is in 2GB range, because this uses - * a relative jump. - */ - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE; - if (abs(rel) > 0x7fffffff) - return -ERANGE; - - buf = (u8 *)op->optinsn.insn; - - /* Copy instructions into the out-of-line buffer */ - ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr); - if (ret < 0) { - __arch_remove_optimized_kprobe(op, 0); - return ret; - } - op->optinsn.size = ret; - - /* Copy arch-dep-instance from template */ - memcpy(buf, &optprobe_template_entry, TMPL_END_IDX); - - /* Set probe information */ - synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op); - - /* Set probe function call */ - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback); - - /* Set returning jmp instruction at the tail of out-of-line buffer */ - synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size, - (u8 *)op->kp.addr + op->optinsn.size); - - flush_icache_range((unsigned long) buf, - (unsigned long) buf + TMPL_END_IDX + - op->optinsn.size + RELATIVEJUMP_SIZE); - return 0; -} - -#define MAX_OPTIMIZE_PROBES 256 -static struct text_poke_param *jump_poke_params; -static struct jump_poke_buffer { - u8 buf[RELATIVEJUMP_SIZE]; -} *jump_poke_bufs; - -static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm, - u8 *insn_buf, - struct optimized_kprobe *op) -{ - s32 rel = (s32)((long)op->optinsn.insn - - ((long)op->kp.addr + RELATIVEJUMP_SIZE)); - - /* Backup instructions which will be replaced by jump address */ - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, - RELATIVE_ADDR_SIZE); - - insn_buf[0] = RELATIVEJUMP_OPCODE; - *(s32 *)(&insn_buf[1]) = rel; - - tprm->addr = op->kp.addr; - tprm->opcode = insn_buf; - tprm->len = RELATIVEJUMP_SIZE; -} - -/* - * Replace breakpoints (int3) with relative jumps. - * Caller must call with locking kprobe_mutex and text_mutex. - */ -void __kprobes arch_optimize_kprobes(struct list_head *oplist) -{ - struct optimized_kprobe *op, *tmp; - int c = 0; - - list_for_each_entry_safe(op, tmp, oplist, list) { - WARN_ON(kprobe_disabled(&op->kp)); - /* Setup param */ - setup_optimize_kprobe(&jump_poke_params[c], - jump_poke_bufs[c].buf, op); - list_del_init(&op->list); - if (++c >= MAX_OPTIMIZE_PROBES) - break; - } - - /* - * text_poke_smp doesn't support NMI/MCE code modifying. - * However, since kprobes itself also doesn't support NMI/MCE - * code probing, it's not a problem. - */ - text_poke_smp_batch(jump_poke_params, c); -} - -static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm, - u8 *insn_buf, - struct optimized_kprobe *op) -{ - /* Set int3 to first byte for kprobes */ - insn_buf[0] = BREAKPOINT_INSTRUCTION; - memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); - - tprm->addr = op->kp.addr; - tprm->opcode = insn_buf; - tprm->len = RELATIVEJUMP_SIZE; -} - -/* - * Recover original instructions and breakpoints from relative jumps. - * Caller must call with locking kprobe_mutex. - */ -extern void arch_unoptimize_kprobes(struct list_head *oplist, - struct list_head *done_list) -{ - struct optimized_kprobe *op, *tmp; - int c = 0; - - list_for_each_entry_safe(op, tmp, oplist, list) { - /* Setup param */ - setup_unoptimize_kprobe(&jump_poke_params[c], - jump_poke_bufs[c].buf, op); - list_move(&op->list, done_list); - if (++c >= MAX_OPTIMIZE_PROBES) - break; - } - - /* - * text_poke_smp doesn't support NMI/MCE code modifying. - * However, since kprobes itself also doesn't support NMI/MCE - * code probing, it's not a problem. - */ - text_poke_smp_batch(jump_poke_params, c); -} - -/* Replace a relative jump with a breakpoint (int3). */ -void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) -{ - u8 buf[RELATIVEJUMP_SIZE]; - - /* Set int3 to first byte for kprobes */ - buf[0] = BREAKPOINT_INSTRUCTION; - memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); - text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE); -} - -static int __kprobes setup_detour_execution(struct kprobe *p, - struct pt_regs *regs, - int reenter) -{ - struct optimized_kprobe *op; - - if (p->flags & KPROBE_FLAG_OPTIMIZED) { - /* This kprobe is really able to run optimized path. */ - op = container_of(p, struct optimized_kprobe, kp); - /* Detour through copied instructions */ - regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX; - if (!reenter) - reset_current_kprobe(); - preempt_enable_no_resched(); - return 1; - } - return 0; -} - -static int __kprobes init_poke_params(void) -{ - /* Allocate code buffer and parameter array */ - jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) * - MAX_OPTIMIZE_PROBES, GFP_KERNEL); - if (!jump_poke_bufs) - return -ENOMEM; - - jump_poke_params = kmalloc(sizeof(struct text_poke_param) * - MAX_OPTIMIZE_PROBES, GFP_KERNEL); - if (!jump_poke_params) { - kfree(jump_poke_bufs); - jump_poke_bufs = NULL; - return -ENOMEM; - } - - return 0; -} -#else /* !CONFIG_OPTPROBES */ -static int __kprobes init_poke_params(void) -{ - return 0; -} -#endif - int __init arch_init_kprobes(void) { - return init_poke_params(); + return arch_init_optprobes(); } int __kprobes arch_trampoline_kprobe(struct kprobe *p) -- cgit v0.10.2 From b5387528f31d98acedf06e930554b563d87e2383 Mon Sep 17 00:00:00 2001 From: Roberto Agostino Vitillo Date: Thu, 9 Feb 2012 23:21:01 +0100 Subject: perf tools: Add code to support PERF_SAMPLE_BRANCH_STACK This patch adds: - ability to parse samples with PERF_SAMPLE_BRANCH_STACK - sort on branches (dso_from, symbol_from, dso_to, symbol_to, mispredict) - build histograms on branches Signed-off-by: Roberto Agostino Vitillo Signed-off-by: Stephane Eranian Cc: peterz@infradead.org Cc: acme@redhat.com Cc: robert.richter@amd.com Cc: ming.m.lin@intel.com Cc: andi@firstfloor.org Cc: asharma@fb.com Cc: vweaver1@eecs.utk.edu Cc: khandual@linux.vnet.ibm.com Cc: dsahern@gmail.com Link: http://lkml.kernel.org/r/1328826068-11713-12-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/tools/perf/perf.h b/tools/perf/perf.h index f0227e9..358f401 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -179,6 +179,23 @@ struct ip_callchain { u64 ips[0]; }; +struct branch_flags { + u64 mispred:1; + u64 predicted:1; + u64 reserved:62; +}; + +struct branch_entry { + u64 from; + u64 to; + struct branch_flags flags; +}; + +struct branch_stack { + u64 nr; + struct branch_entry entries[0]; +}; + extern bool perf_host, perf_guest; extern const char perf_version_string[]; diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index cbdeaad..1b19728 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -81,6 +81,7 @@ struct perf_sample { u32 raw_size; void *raw_data; struct ip_callchain *callchain; + struct branch_stack *branch_stack; }; #define BUILD_ID_SIZE 20 diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 302d49a..a1fd1cd 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -576,6 +576,16 @@ int perf_event__parse_sample(const union perf_event *event, u64 type, data->raw_data = (void *) pdata; } + if (type & PERF_SAMPLE_BRANCH_STACK) { + u64 sz; + + data->branch_stack = (struct branch_stack *)array; + array++; /* nr */ + + sz = data->branch_stack->nr * sizeof(struct branch_entry); + sz /= sizeof(u64); + array += sz; + } return 0; } diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 6f505d1..8380c3d 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -50,21 +50,25 @@ static void hists__reset_col_len(struct hists *hists) hists__set_col_len(hists, col, 0); } +static void hists__set_unres_dso_col_len(struct hists *hists, int dso) +{ + const unsigned int unresolved_col_width = BITS_PER_LONG / 4; + + if (hists__col_len(hists, dso) < unresolved_col_width && + !symbol_conf.col_width_list_str && !symbol_conf.field_sep && + !symbol_conf.dso_list) + hists__set_col_len(hists, dso, unresolved_col_width); +} + static void hists__calc_col_len(struct hists *hists, struct hist_entry *h) { + const unsigned int unresolved_col_width = BITS_PER_LONG / 4; u16 len; if (h->ms.sym) - hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen); - else { - const unsigned int unresolved_col_width = BITS_PER_LONG / 4; - - if (hists__col_len(hists, HISTC_DSO) < unresolved_col_width && - !symbol_conf.col_width_list_str && !symbol_conf.field_sep && - !symbol_conf.dso_list) - hists__set_col_len(hists, HISTC_DSO, - unresolved_col_width); - } + hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4); + else + hists__set_unres_dso_col_len(hists, HISTC_DSO); len = thread__comm_len(h->thread); if (hists__new_col_len(hists, HISTC_COMM, len)) @@ -74,6 +78,37 @@ static void hists__calc_col_len(struct hists *hists, struct hist_entry *h) len = dso__name_len(h->ms.map->dso); hists__new_col_len(hists, HISTC_DSO, len); } + + if (h->branch_info) { + int symlen; + /* + * +4 accounts for '[x] ' priv level info + * +2 account of 0x prefix on raw addresses + */ + if (h->branch_info->from.sym) { + symlen = (int)h->branch_info->from.sym->namelen + 4; + hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); + + symlen = dso__name_len(h->branch_info->from.map->dso); + hists__new_col_len(hists, HISTC_DSO_FROM, symlen); + } else { + symlen = unresolved_col_width + 4 + 2; + hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); + hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); + } + + if (h->branch_info->to.sym) { + symlen = (int)h->branch_info->to.sym->namelen + 4; + hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); + + symlen = dso__name_len(h->branch_info->to.map->dso); + hists__new_col_len(hists, HISTC_DSO_TO, symlen); + } else { + symlen = unresolved_col_width + 4 + 2; + hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); + hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); + } + } } static void hist_entry__add_cpumode_period(struct hist_entry *he, @@ -195,26 +230,14 @@ static u8 symbol__parent_filter(const struct symbol *parent) return 0; } -struct hist_entry *__hists__add_entry(struct hists *hists, +static struct hist_entry *add_hist_entry(struct hists *hists, + struct hist_entry *entry, struct addr_location *al, - struct symbol *sym_parent, u64 period) + u64 period) { struct rb_node **p; struct rb_node *parent = NULL; struct hist_entry *he; - struct hist_entry entry = { - .thread = al->thread, - .ms = { - .map = al->map, - .sym = al->sym, - }, - .cpu = al->cpu, - .ip = al->addr, - .level = al->level, - .period = period, - .parent = sym_parent, - .filtered = symbol__parent_filter(sym_parent), - }; int cmp; pthread_mutex_lock(&hists->lock); @@ -225,7 +248,7 @@ struct hist_entry *__hists__add_entry(struct hists *hists, parent = *p; he = rb_entry(parent, struct hist_entry, rb_node_in); - cmp = hist_entry__cmp(&entry, he); + cmp = hist_entry__cmp(entry, he); if (!cmp) { he->period += period; @@ -239,7 +262,7 @@ struct hist_entry *__hists__add_entry(struct hists *hists, p = &(*p)->rb_right; } - he = hist_entry__new(&entry); + he = hist_entry__new(entry); if (!he) goto out_unlock; @@ -252,6 +275,51 @@ out_unlock: return he; } +struct hist_entry *__hists__add_branch_entry(struct hists *self, + struct addr_location *al, + struct symbol *sym_parent, + struct branch_info *bi, + u64 period) +{ + struct hist_entry entry = { + .thread = al->thread, + .ms = { + .map = bi->to.map, + .sym = bi->to.sym, + }, + .cpu = al->cpu, + .ip = bi->to.addr, + .level = al->level, + .period = period, + .parent = sym_parent, + .filtered = symbol__parent_filter(sym_parent), + .branch_info = bi, + }; + + return add_hist_entry(self, &entry, al, period); +} + +struct hist_entry *__hists__add_entry(struct hists *self, + struct addr_location *al, + struct symbol *sym_parent, u64 period) +{ + struct hist_entry entry = { + .thread = al->thread, + .ms = { + .map = al->map, + .sym = al->sym, + }, + .cpu = al->cpu, + .ip = al->addr, + .level = al->level, + .period = period, + .parent = sym_parent, + .filtered = symbol__parent_filter(sym_parent), + }; + + return add_hist_entry(self, &entry, al, period); +} + int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) { diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 48e5acd..9413f3e 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -42,6 +42,11 @@ enum hist_column { HISTC_COMM, HISTC_PARENT, HISTC_CPU, + HISTC_MISPREDICT, + HISTC_SYMBOL_FROM, + HISTC_SYMBOL_TO, + HISTC_DSO_FROM, + HISTC_DSO_TO, HISTC_NR_COLS, /* Last entry */ }; @@ -74,6 +79,12 @@ int hist_entry__snprintf(struct hist_entry *self, char *bf, size_t size, struct hists *hists); void hist_entry__free(struct hist_entry *); +struct hist_entry *__hists__add_branch_entry(struct hists *self, + struct addr_location *al, + struct symbol *sym_parent, + struct branch_info *bi, + u64 period); + void hists__output_resort(struct hists *self); void hists__output_resort_threaded(struct hists *hists); void hists__collapse_resort(struct hists *self); diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 9f833cf..bec8a32 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -229,6 +229,63 @@ static bool symbol__match_parent_regex(struct symbol *sym) return 0; } +static const u8 cpumodes[] = { + PERF_RECORD_MISC_USER, + PERF_RECORD_MISC_KERNEL, + PERF_RECORD_MISC_GUEST_USER, + PERF_RECORD_MISC_GUEST_KERNEL +}; +#define NCPUMODES (sizeof(cpumodes)/sizeof(u8)) + +static void ip__resolve_ams(struct machine *self, struct thread *thread, + struct addr_map_symbol *ams, + u64 ip) +{ + struct addr_location al; + size_t i; + u8 m; + + memset(&al, 0, sizeof(al)); + + for (i = 0; i < NCPUMODES; i++) { + m = cpumodes[i]; + /* + * We cannot use the header.misc hint to determine whether a + * branch stack address is user, kernel, guest, hypervisor. + * Branches may straddle the kernel/user/hypervisor boundaries. + * Thus, we have to try consecutively until we find a match + * or else, the symbol is unknown + */ + thread__find_addr_location(thread, self, m, MAP__FUNCTION, + ip, &al, NULL); + if (al.sym) + goto found; + } +found: + ams->addr = ip; + ams->sym = al.sym; + ams->map = al.map; +} + +struct branch_info *machine__resolve_bstack(struct machine *self, + struct thread *thr, + struct branch_stack *bs) +{ + struct branch_info *bi; + unsigned int i; + + bi = calloc(bs->nr, sizeof(struct branch_info)); + if (!bi) + return NULL; + + for (i = 0; i < bs->nr; i++) { + ip__resolve_ams(self, thr, &bi[i].to, bs->entries[i].to); + ip__resolve_ams(self, thr, &bi[i].from, bs->entries[i].from); + bi[i].flags = bs->entries[i].flags; + } + return bi; +} + int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, struct thread *thread, struct ip_callchain *chain, @@ -697,6 +754,18 @@ static void callchain__printf(struct perf_sample *sample) i, sample->callchain->ips[i]); } +static void branch_stack__printf(struct perf_sample *sample) +{ + uint64_t i; + + printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr); + + for (i = 0; i < sample->branch_stack->nr; i++) + printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n", + i, sample->branch_stack->entries[i].from, + sample->branch_stack->entries[i].to); +} + static void perf_session__print_tstamp(struct perf_session *session, union perf_event *event, struct perf_sample *sample) @@ -744,6 +813,9 @@ static void dump_sample(struct perf_session *session, union perf_event *event, if (session->sample_type & PERF_SAMPLE_CALLCHAIN) callchain__printf(sample); + + if (session->sample_type & PERF_SAMPLE_BRANCH_STACK) + branch_stack__printf(sample); } static struct machine * diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index c8d9017..7a5434c 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -73,6 +73,10 @@ int perf_session__resolve_callchain(struct perf_session *self, struct perf_evsel struct ip_callchain *chain, struct symbol **parent); +struct branch_info *machine__resolve_bstack(struct machine *self, + struct thread *thread, + struct branch_stack *bs); + bool perf_session__has_traces(struct perf_session *self, const char *msg); void mem_bswap_64(void *src, int byte_size); diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 16da30d..2739ed1 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -8,6 +8,7 @@ const char default_sort_order[] = "comm,dso,symbol"; const char *sort_order = default_sort_order; int sort__need_collapse = 0; int sort__has_parent = 0; +bool sort__branch_mode; enum sort_type sort__first_dimension; @@ -94,6 +95,26 @@ static int hist_entry__comm_snprintf(struct hist_entry *self, char *bf, return repsep_snprintf(bf, size, "%*s", width, self->thread->comm); } +static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) +{ + struct dso *dso_l = map_l ? map_l->dso : NULL; + struct dso *dso_r = map_r ? map_r->dso : NULL; + const char *dso_name_l, *dso_name_r; + + if (!dso_l || !dso_r) + return cmp_null(dso_l, dso_r); + + if (verbose) { + dso_name_l = dso_l->long_name; + dso_name_r = dso_r->long_name; + } else { + dso_name_l = dso_l->short_name; + dso_name_r = dso_r->short_name; + } + + return strcmp(dso_name_l, dso_name_r); +} + struct sort_entry sort_comm = { .se_header = "Command", .se_cmp = sort__comm_cmp, @@ -107,36 +128,74 @@ struct sort_entry sort_comm = { static int64_t sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) { - struct dso *dso_l = left->ms.map ? left->ms.map->dso : NULL; - struct dso *dso_r = right->ms.map ? right->ms.map->dso : NULL; - const char *dso_name_l, *dso_name_r; + return _sort__dso_cmp(left->ms.map, right->ms.map); +} - if (!dso_l || !dso_r) - return cmp_null(dso_l, dso_r); - if (verbose) { - dso_name_l = dso_l->long_name; - dso_name_r = dso_r->long_name; - } else { - dso_name_l = dso_l->short_name; - dso_name_r = dso_r->short_name; +static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r, + u64 ip_l, u64 ip_r) +{ + if (!sym_l || !sym_r) + return cmp_null(sym_l, sym_r); + + if (sym_l == sym_r) + return 0; + + if (sym_l) + ip_l = sym_l->start; + if (sym_r) + ip_r = sym_r->start; + + return (int64_t)(ip_r - ip_l); +} + +static int _hist_entry__dso_snprintf(struct map *map, char *bf, + size_t size, unsigned int width) +{ + if (map && map->dso) { + const char *dso_name = !verbose ? map->dso->short_name : + map->dso->long_name; + return repsep_snprintf(bf, size, "%-*s", width, dso_name); } - return strcmp(dso_name_l, dso_name_r); + return repsep_snprintf(bf, size, "%-*s", width, "[unknown]"); } static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width) { - if (self->ms.map && self->ms.map->dso) { - const char *dso_name = !verbose ? self->ms.map->dso->short_name : - self->ms.map->dso->long_name; - return repsep_snprintf(bf, size, "%-*s", width, dso_name); + return _hist_entry__dso_snprintf(self->ms.map, bf, size, width); +} + +static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, + u64 ip, char level, char *bf, size_t size, + unsigned int width __used) +{ + size_t ret = 0; + + if (verbose) { + char o = map ? dso__symtab_origin(map->dso) : '!'; + ret += repsep_snprintf(bf, size, "%-#*llx %c ", + BITS_PER_LONG / 4, ip, o); } - return repsep_snprintf(bf, size, "%-*s", width, "[unknown]"); + ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); + if (sym) + ret += repsep_snprintf(bf + ret, size - ret, "%-*s", + width - ret, + sym->name); + else { + size_t len = BITS_PER_LONG / 4; + ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", + len, ip); + ret += repsep_snprintf(bf + ret, size - ret, "%-*s", + width - ret, ""); + } + + return ret; } + struct sort_entry sort_dso = { .se_header = "Shared Object", .se_cmp = sort__dso_cmp, @@ -144,8 +203,14 @@ struct sort_entry sort_dso = { .se_width_idx = HISTC_DSO, }; -/* --sort symbol */ +static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, + size_t size, unsigned int width __used) +{ + return _hist_entry__sym_snprintf(self->ms.map, self->ms.sym, self->ip, + self->level, bf, size, width); +} +/* --sort symbol */ static int64_t sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) { @@ -163,31 +228,7 @@ sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) ip_l = left->ms.sym->start; ip_r = right->ms.sym->start; - return (int64_t)(ip_r - ip_l); -} - -static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, - size_t size, unsigned int width __used) -{ - size_t ret = 0; - - if (verbose) { - char o = self->ms.map ? dso__symtab_origin(self->ms.map->dso) : '!'; - ret += repsep_snprintf(bf, size, "%-#*llx %c ", - BITS_PER_LONG / 4, self->ip, o); - } - - if (!sort_dso.elide) - ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", self->level); - - if (self->ms.sym) - ret += repsep_snprintf(bf + ret, size - ret, "%s", - self->ms.sym->name); - else - ret += repsep_snprintf(bf + ret, size - ret, "%-#*llx", - BITS_PER_LONG / 4, self->ip); - - return ret; + return _sort__sym_cmp(left->ms.sym, right->ms.sym, ip_l, ip_r); } struct sort_entry sort_sym = { @@ -246,19 +287,155 @@ struct sort_entry sort_cpu = { .se_width_idx = HISTC_CPU, }; +static int64_t +sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return _sort__dso_cmp(left->branch_info->from.map, + right->branch_info->from.map); +} + +static int hist_entry__dso_from_snprintf(struct hist_entry *self, char *bf, + size_t size, unsigned int width) +{ + return _hist_entry__dso_snprintf(self->branch_info->from.map, + bf, size, width); +} + +struct sort_entry sort_dso_from = { + .se_header = "Source Shared Object", + .se_cmp = sort__dso_from_cmp, + .se_snprintf = hist_entry__dso_from_snprintf, + .se_width_idx = HISTC_DSO_FROM, +}; + +static int64_t +sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return _sort__dso_cmp(left->branch_info->to.map, + right->branch_info->to.map); +} + +static int hist_entry__dso_to_snprintf(struct hist_entry *self, char *bf, + size_t size, unsigned int width) +{ + return _hist_entry__dso_snprintf(self->branch_info->to.map, + bf, size, width); +} + +static int64_t +sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct addr_map_symbol *from_l = &left->branch_info->from; + struct addr_map_symbol *from_r = &right->branch_info->from; + + if (!from_l->sym && !from_r->sym) + return right->level - left->level; + + return _sort__sym_cmp(from_l->sym, from_r->sym, from_l->addr, + from_r->addr); +} + +static int64_t +sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct addr_map_symbol *to_l = &left->branch_info->to; + struct addr_map_symbol *to_r = &right->branch_info->to; + + if (!to_l->sym && !to_r->sym) + return right->level - left->level; + + return _sort__sym_cmp(to_l->sym, to_r->sym, to_l->addr, to_r->addr); +} + +static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf, + size_t size, unsigned int width __used) +{ + struct addr_map_symbol *from = &self->branch_info->from; + return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, + self->level, bf, size, width); + +} + +static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf, + size_t size, unsigned int width __used) +{ + struct addr_map_symbol *to = &self->branch_info->to; + return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, + self->level, bf, size, width); + +} + +struct sort_entry sort_dso_to = { + .se_header = "Target Shared Object", + .se_cmp = sort__dso_to_cmp, + .se_snprintf = hist_entry__dso_to_snprintf, + .se_width_idx = HISTC_DSO_TO, +}; + +struct sort_entry sort_sym_from = { + .se_header = "Source Symbol", + .se_cmp = sort__sym_from_cmp, + .se_snprintf = hist_entry__sym_from_snprintf, + .se_width_idx = HISTC_SYMBOL_FROM, +}; + +struct sort_entry sort_sym_to = { + .se_header = "Target Symbol", + .se_cmp = sort__sym_to_cmp, + .se_snprintf = hist_entry__sym_to_snprintf, + .se_width_idx = HISTC_SYMBOL_TO, +}; + +static int64_t +sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) +{ + const unsigned char mp = left->branch_info->flags.mispred != + right->branch_info->flags.mispred; + const unsigned char p = left->branch_info->flags.predicted != + right->branch_info->flags.predicted; + + return mp || p; +} + +static int hist_entry__mispredict_snprintf(struct hist_entry *self, char *bf, + size_t size, unsigned int width){ + static const char *out = "N/A"; + + if (self->branch_info->flags.predicted) + out = "N"; + else if (self->branch_info->flags.mispred) + out = "Y"; + + return repsep_snprintf(bf, size, "%-*s", width, out); +} + +struct sort_entry sort_mispredict = { + .se_header = "Branch Mispredicted", + .se_cmp = sort__mispredict_cmp, + .se_snprintf = hist_entry__mispredict_snprintf, + .se_width_idx = HISTC_MISPREDICT, +}; + struct sort_dimension { const char *name; struct sort_entry *entry; int taken; }; +#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } + static struct sort_dimension sort_dimensions[] = { - { .name = "pid", .entry = &sort_thread, }, - { .name = "comm", .entry = &sort_comm, }, - { .name = "dso", .entry = &sort_dso, }, - { .name = "symbol", .entry = &sort_sym, }, - { .name = "parent", .entry = &sort_parent, }, - { .name = "cpu", .entry = &sort_cpu, }, + DIM(SORT_PID, "pid", sort_thread), + DIM(SORT_COMM, "comm", sort_comm), + DIM(SORT_DSO, "dso", sort_dso), + DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), + DIM(SORT_DSO_TO, "dso_to", sort_dso_to), + DIM(SORT_SYM, "symbol", sort_sym), + DIM(SORT_SYM_TO, "symbol_from", sort_sym_from), + DIM(SORT_SYM_FROM, "symbol_to", sort_sym_to), + DIM(SORT_PARENT, "parent", sort_parent), + DIM(SORT_CPU, "cpu", sort_cpu), + DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), }; int sort_dimension__add(const char *tok) @@ -270,7 +447,6 @@ int sort_dimension__add(const char *tok) if (strncasecmp(tok, sd->name, strlen(tok))) continue; - if (sd->entry == &sort_parent) { int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); if (ret) { @@ -302,6 +478,16 @@ int sort_dimension__add(const char *tok) sort__first_dimension = SORT_PARENT; else if (!strcmp(sd->name, "cpu")) sort__first_dimension = SORT_CPU; + else if (!strcmp(sd->name, "symbol_from")) + sort__first_dimension = SORT_SYM_FROM; + else if (!strcmp(sd->name, "symbol_to")) + sort__first_dimension = SORT_SYM_TO; + else if (!strcmp(sd->name, "dso_from")) + sort__first_dimension = SORT_DSO_FROM; + else if (!strcmp(sd->name, "dso_to")) + sort__first_dimension = SORT_DSO_TO; + else if (!strcmp(sd->name, "mispredict")) + sort__first_dimension = SORT_MISPREDICT; } list_add_tail(&sd->entry->list, &hist_entry__sort_list); @@ -309,7 +495,6 @@ int sort_dimension__add(const char *tok) return 0; } - return -ESRCH; } diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 3f67ae3..7aa72a0 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -31,11 +31,14 @@ extern const char *parent_pattern; extern const char default_sort_order[]; extern int sort__need_collapse; extern int sort__has_parent; +extern bool sort__branch_mode; extern char *field_sep; extern struct sort_entry sort_comm; extern struct sort_entry sort_dso; extern struct sort_entry sort_sym; extern struct sort_entry sort_parent; +extern struct sort_entry sort_lbr_dso; +extern struct sort_entry sort_lbr_sym; extern enum sort_type sort__first_dimension; /** @@ -72,6 +75,7 @@ struct hist_entry { struct hist_entry *pair; struct rb_root sorted_chain; }; + struct branch_info *branch_info; struct callchain_root callchain[0]; }; @@ -82,6 +86,11 @@ enum sort_type { SORT_SYM, SORT_PARENT, SORT_CPU, + SORT_DSO_FROM, + SORT_DSO_TO, + SORT_SYM_FROM, + SORT_SYM_TO, + SORT_MISPREDICT, }; /* diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 2a683d4..5866ce6 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -5,6 +5,7 @@ #include #include #include "map.h" +#include "../perf.h" #include #include #include @@ -120,6 +121,18 @@ struct map_symbol { bool has_children; }; +struct addr_map_symbol { + struct map *map; + struct symbol *sym; + u64 addr; +}; + +struct branch_info { + struct addr_map_symbol from; + struct addr_map_symbol to; + struct branch_flags flags; +}; + struct addr_location { struct thread *thread; struct map *map; -- cgit v0.10.2 From bdfebd848f2a14e639031a0b0e61d7c7ee5e5fd2 Mon Sep 17 00:00:00 2001 From: Roberto Agostino Vitillo Date: Thu, 9 Feb 2012 23:21:02 +0100 Subject: perf record: Add support for sampling taken branch This patch adds a new option to enable taken branch stack sampling, i.e., leverage the PERF_SAMPLE_BRANCH_STACK feature of perf_events. There is a new option to active this mode: -b. It is possible to pass a set of filters to select the type of branches to sample. The following filters are available: - any : any type of branches - any_call : any function call or system call - any_ret : any function return or system call return - any_ind : any indirect branch - u: only when the branch target is at the user level - k: only when the branch target is in the kernel - hv: only when the branch target is in the hypervisor Filters can be combined by passing a comma separated list to the option: $ perf record -b any_call,u -e cycles:u branchy Signed-off-by: Roberto Agostino Vitillo Signed-off-by: Stephane Eranian Cc: peterz@infradead.org Cc: acme@redhat.com Cc: robert.richter@amd.com Cc: ming.m.lin@intel.com Cc: andi@firstfloor.org Cc: asharma@fb.com Cc: vweaver1@eecs.utk.edu Cc: khandual@linux.vnet.ibm.com Cc: dsahern@gmail.com Link: http://lkml.kernel.org/r/1328826068-11713-13-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index a5766b4..60bddaf 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -152,6 +152,31 @@ an empty cgroup (monitor all the time) using, e.g., -G foo,,bar. Cgroups must ha corresponding events, i.e., they always refer to events defined earlier on the command line. +-b:: +--branch-stack:: +Enable taken branch stack sampling. Each sample captures a series of consecutive +taken branches. The number of branches captured with each sample depends on the +underlying hardware, the type of branches of interest, and the executed code. +It is possible to select the types of branches captured by enabling filters. The +following filters are defined: + + - any : any type of branches + - any_call: any function call or system call + - any_ret: any function return or system call return + - any_ind: any indirect branch + - u: only when the branch target is at the user level + - k: only when the branch target is in the kernel + - hv: only when the target is at the hypervisor level + ++ +At least one of any, any_call, any_ret, any_ind must be provided. The privilege levels may +be ommitted, in which case, the privilege levels of the associated event are applied to the +branch filter. Both kernel (k) and hypervisor (hv) privilege levels are subject to +permissions. When sampling on multiple events, branch stack sampling is enabled for all +the sampling events. The sampled branch type is the same for all events. +Note that taken branch sampling may not be available on all processors. +The various filters must be specified as a comma separated list: -b any_ret,u,k + SEE ALSO -------- linkperf:perf-stat[1], linkperf:perf-list[1] diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 75d230f..1c49d4e 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -638,6 +638,77 @@ out_delete_session: return err; } +#define BRANCH_OPT(n, m) \ + { .name = n, .mode = (m) } + +#define BRANCH_END { .name = NULL } + +struct branch_mode { + const char *name; + int mode; +}; + +static const struct branch_mode branch_modes[] = { + BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER), + BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL), + BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV), + BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY), + BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL), + BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN), + BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL), + BRANCH_END +}; + +static int +parse_branch_stack(const struct option *opt, const char *str, int unset __used) +{ +#define ONLY_PLM \ + (PERF_SAMPLE_BRANCH_USER |\ + PERF_SAMPLE_BRANCH_KERNEL |\ + PERF_SAMPLE_BRANCH_HV) + + uint64_t *mode = (uint64_t *)opt->value; + const struct branch_mode *br; + char *s, *os, *p; + int ret = -1; + + *mode = 0; + + /* because str is read-only */ + s = os = strdup(str); + if (!s) + return -1; + + for (;;) { + p = strchr(s, ','); + if (p) + *p = '\0'; + + for (br = branch_modes; br->name; br++) { + if (!strcasecmp(s, br->name)) + break; + } + if (!br->name) + goto error; + + *mode |= br->mode; + + if (!p) + break; + + s = p + 1; + } + ret = 0; + + if ((*mode & ~ONLY_PLM) == 0) { + error("need at least one branch type with -b\n"); + ret = -1; + } +error: + free(os); + return ret; +} + static const char * const record_usage[] = { "perf record [] []", "perf record [] -- []", @@ -727,6 +798,9 @@ const struct option record_options[] = { "monitor event in cgroup name only", parse_cgroups), OPT_STRING('u', "uid", &record.uid_str, "user", "user to profile"), + OPT_CALLBACK('b', "branch-stack", &record.opts.branch_stack, + "branch mode mask", "branch stack sampling modes", + parse_branch_stack), OPT_END() }; diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 358f401..eec392e 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -222,6 +222,7 @@ struct perf_record_opts { unsigned int freq; unsigned int mmap_pages; unsigned int user_freq; + int branch_stack; u64 default_interval; u64 user_interval; const char *cpu_list; diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index a1fd1cd..f421f7c 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -126,6 +126,10 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts) attr->watermark = 0; attr->wakeup_events = 1; } + if (opts->branch_stack) { + attr->sample_type |= PERF_SAMPLE_BRANCH_STACK; + attr->branch_sample_type = opts->branch_stack; + } attr->mmap = track; attr->comm = track; -- cgit v0.10.2 From b50311dc2ac1c04ad19163c2359910b25e16caf6 Mon Sep 17 00:00:00 2001 From: Roberto Agostino Vitillo Date: Thu, 9 Feb 2012 23:21:03 +0100 Subject: perf report: Add support for taken branch sampling This patch adds support for taken branch sampling, i.e, the PERF_SAMPLE_BRANCH_STACK feature to perf report. In other words, to display histograms based on taken branches rather than executed instructions addresses. The new option is called -b and it takes no argument. To generate meaningful output, the perf.data must have been obtained using perf record -b xxx ... where xxx is a branch filter option. The output shows symbols, modules, sorted by 'who branches where' the most often. The percentages reported in the first column refer to the total number of branches captured and not the usual number of samples. Here is a quick example. Here branchy is simple test program which looks as follows: void f2(void) {} void f3(void) {} void f1(unsigned long n) { if (n & 1UL) f2(); else f3(); } int main(void) { unsigned long i; for (i=0; i < N; i++) f1(i); return 0; } Here is the output captured on Nehalem, if we are only interested in user level function calls. $ perf record -b any_call,u -e cycles:u branchy $ perf report -b --sort=symbol 52.34% [.] main [.] f1 24.04% [.] f1 [.] f3 23.60% [.] f1 [.] f2 0.01% [k] _IO_new_file_xsputn [k] _IO_file_overflow 0.01% [k] _IO_vfprintf_internal [k] _IO_new_file_xsputn 0.01% [k] _IO_vfprintf_internal [k] strchrnul 0.01% [k] __printf [k] _IO_vfprintf_internal 0.01% [k] main [k] __printf About half (52%) of the call branches captured are from main() -> f1(). The second half (24%+23%) is split in two equal shares between f1() -> f2(), f1() ->f3(). The output is as expected given the code. It should be noted, that using -b in perf record does not eliminate information in the perf.data file. Consequently, a typical profile can also be obtained by perf report by simply not using its -b option. It is possible to sort on branch related columns: - dso_from, symbol_from - dso_to, symbol_to - mispredict Signed-off-by: Roberto Agostino Vitillo Signed-off-by: Stephane Eranian Cc: peterz@infradead.org Cc: acme@redhat.com Cc: robert.richter@amd.com Cc: ming.m.lin@intel.com Cc: andi@firstfloor.org Cc: asharma@fb.com Cc: vweaver1@eecs.utk.edu Cc: khandual@linux.vnet.ibm.com Cc: dsahern@gmail.com Link: http://lkml.kernel.org/r/1328826068-11713-14-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt index 9b430e9..19b9092 100644 --- a/tools/perf/Documentation/perf-report.txt +++ b/tools/perf/Documentation/perf-report.txt @@ -153,6 +153,13 @@ OPTIONS information which may be very large and thus may clutter the display. It currently includes: cpu and numa topology of the host system. +-b:: +--branch-stack:: + Use the addresses of sampled taken branches instead of the instruction + address to build the histograms. To generate meaningful output, the + perf.data file must have been obtained using perf record -b xxx where + xxx is a branch filter option. + SEE ALSO -------- linkperf:perf-stat[1], linkperf:perf-annotate[1] diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 25d34d4..528789f 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -53,6 +53,50 @@ struct perf_report { DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); }; +static int perf_report__add_branch_hist_entry(struct perf_tool *tool, + struct addr_location *al, + struct perf_sample *sample, + struct perf_evsel *evsel, + struct machine *machine) +{ + struct perf_report *rep = container_of(tool, struct perf_report, tool); + struct symbol *parent = NULL; + int err = 0; + unsigned i; + struct hist_entry *he; + struct branch_info *bi; + + if ((sort__has_parent || symbol_conf.use_callchain) + && sample->callchain) { + err = machine__resolve_callchain(machine, evsel, al->thread, + sample->callchain, &parent); + if (err) + return err; + } + + bi = machine__resolve_bstack(machine, al->thread, + sample->branch_stack); + if (!bi) + return -ENOMEM; + + for (i = 0; i < sample->branch_stack->nr; i++) { + if (rep->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym)) + continue; + /* + * The report shows the percentage of total branches captured + * and not events sampled. Thus we use a pseudo period of 1. + */ + he = __hists__add_branch_entry(&evsel->hists, al, parent, + &bi[i], 1); + if (he) { + evsel->hists.stats.total_period += 1; + hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); + } else + return -ENOMEM; + } + return err; +} + static int perf_evsel__add_hist_entry(struct perf_evsel *evsel, struct addr_location *al, struct perf_sample *sample, @@ -126,14 +170,21 @@ static int process_sample_event(struct perf_tool *tool, if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap)) return 0; - if (al.map != NULL) - al.map->dso->hit = 1; + if (sort__branch_mode) { + if (perf_report__add_branch_hist_entry(tool, &al, sample, + evsel, machine)) { + pr_debug("problem adding lbr entry, skipping event\n"); + return -1; + } + } else { + if (al.map != NULL) + al.map->dso->hit = 1; - if (perf_evsel__add_hist_entry(evsel, &al, sample, machine)) { - pr_debug("problem incrementing symbol period, skipping event\n"); - return -1; + if (perf_evsel__add_hist_entry(evsel, &al, sample, machine)) { + pr_debug("problem incrementing symbol period, skipping event\n"); + return -1; + } } - return 0; } @@ -188,6 +239,15 @@ static int perf_report__setup_sample_type(struct perf_report *rep) } } + if (sort__branch_mode) { + if (!(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) { + fprintf(stderr, "selected -b but no branch data." + " Did you call perf record without" + " -b?\n"); + return -1; + } + } + return 0; } @@ -477,7 +537,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) OPT_BOOLEAN(0, "stdio", &report.use_stdio, "Use the stdio interface"), OPT_STRING('s', "sort", &sort_order, "key[,key2...]", - "sort by key(s): pid, comm, dso, symbol, parent"), + "sort by key(s): pid, comm, dso, symbol, parent, dso_to," + " dso_from, symbol_to, symbol_from, mispredict"), OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization, "Show sample percentage for different cpu modes"), OPT_STRING('p', "parent", &parent_pattern, "regex", @@ -517,6 +578,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) "Specify disassembler style (e.g. -M intel for intel syntax)"), OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period, "Show a column with the sum of periods"), + OPT_BOOLEAN('b', "branch-stack", &sort__branch_mode, + "use branch records for histogram filling"), OPT_END() }; @@ -537,10 +600,36 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) report.input_name = "perf.data"; } - if (strcmp(report.input_name, "-") != 0) + if (sort__branch_mode) { + if (use_browser) + fprintf(stderr, "Warning: TUI interface not supported" + " in branch mode\n"); + if (symbol_conf.dso_list_str != NULL) + fprintf(stderr, "Warning: dso filtering not supported" + " in branch mode\n"); + if (symbol_conf.sym_list_str != NULL) + fprintf(stderr, "Warning: symbol filtering not" + " supported in branch mode\n"); + + report.use_stdio = true; + use_browser = 0; setup_browser(true); - else + symbol_conf.dso_list_str = NULL; + symbol_conf.sym_list_str = NULL; + + /* + * if no sort_order is provided, then specify branch-mode + * specific order + */ + if (sort_order == default_sort_order) + sort_order = "comm,dso_from,symbol_from," + "dso_to,symbol_to"; + + } else if (strcmp(report.input_name, "-") != 0) { + setup_browser(true); + } else { use_browser = 0; + } /* * Only in the newt browser we are doing integrated annotation, -- cgit v0.10.2 From cb5d76999029ae7a517cb07dfa732c1b5a934fc2 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 9 Feb 2012 23:21:05 +0100 Subject: perf: Add ABI reference sizes This patch adds reference sizes for revision 1 and 2 of the perf_event ABI, i.e., the size of the perf_event_attr struct. With Rev1: config2 was added = +8 bytes With Rev2: branch_sample_type was added = +8 bytes Adds the definition for Rev1, Rev2. This is useful for tools trying to decode the revision numbers based on the size of the struct. Signed-off-by: Stephane Eranian Cc: peterz@infradead.org Cc: acme@redhat.com Cc: robert.richter@amd.com Cc: ming.m.lin@intel.com Cc: andi@firstfloor.org Cc: asharma@fb.com Cc: ravitillo@lbl.gov Cc: vweaver1@eecs.utk.edu Cc: khandual@linux.vnet.ibm.com Cc: dsahern@gmail.com Link: http://lkml.kernel.org/r/1328826068-11713-16-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index fbbf5e5..bd9f55a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -192,6 +192,8 @@ enum perf_event_read_format { }; #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ +#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */ +#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */ /* * Hardware event_id to monitor via a performance monitoring event: -- cgit v0.10.2 From 69996df486fc3921bbaaa17fca0d68f537f9eabf Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 9 Feb 2012 23:21:06 +0100 Subject: perf tools: Enable reading of perf.data files from different ABI rev This patch allows perf to process perf.data files generated using an ABI that has a different perf_event_attr struct size, i.e., a different ABI version. The perf_event_attr can be extended, yet perf needs to cope with older perf.data files. Similarly, perf must be able to cope with a perf.data file which is using a newer version of the ABI than what it knows about. This patch adds read_attr(), a routine that reads a perf_event_attr struct from a file incrementally based on its advertised size. If the on-file struct is smaller than what perf knows, then the extra fields are zeroed. If the on-file struct is bigger, then perf only uses what it knows about, the rest is skipped. Signed-off-by: Stephane Eranian Cc: peterz@infradead.org Cc: acme@redhat.com Cc: robert.richter@amd.com Cc: ming.m.lin@intel.com Cc: andi@firstfloor.org Cc: asharma@fb.com Cc: ravitillo@lbl.gov Cc: vweaver1@eecs.utk.edu Cc: khandual@linux.vnet.ibm.com Cc: dsahern@gmail.com Link: http://lkml.kernel.org/r/1328826068-11713-17-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 9f867d9..666f189 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1973,6 +1973,51 @@ static int perf_header__read_pipe(struct perf_session *session, int fd) return 0; } +static int read_attr(int fd, struct perf_header *ph, + struct perf_file_attr *f_attr) +{ + struct perf_event_attr *attr = &f_attr->attr; + size_t sz, left; + size_t our_sz = sizeof(f_attr->attr); + int ret; + + memset(f_attr, 0, sizeof(*f_attr)); + + /* read minimal guaranteed structure */ + ret = readn(fd, attr, PERF_ATTR_SIZE_VER0); + if (ret <= 0) { + pr_debug("cannot read %d bytes of header attr\n", + PERF_ATTR_SIZE_VER0); + return -1; + } + + /* on file perf_event_attr size */ + sz = attr->size; + if (ph->needs_swap) + sz = bswap_32(sz); + + if (sz == 0) { + /* assume ABI0 */ + sz = PERF_ATTR_SIZE_VER0; + } else if (sz > our_sz) { + pr_debug("file uses a more recent and unsupported ABI" + " (%zu bytes extra)\n", sz - our_sz); + return -1; + } + /* what we have not yet read and that we know about */ + left = sz - PERF_ATTR_SIZE_VER0; + if (left) { + void *ptr = attr; + ptr += PERF_ATTR_SIZE_VER0; + + ret = readn(fd, ptr, left); + } + /* read perf_file_section, ids are read in caller */ + ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids)); + + return ret <= 0 ? -1 : 0; +} + int perf_session__read_header(struct perf_session *session, int fd) { struct perf_header *header = &session->header; @@ -1988,19 +2033,17 @@ int perf_session__read_header(struct perf_session *session, int fd) if (session->fd_pipe) return perf_header__read_pipe(session, fd); - if (perf_file_header__read(&f_header, header, fd) < 0) { - pr_debug("incompatible file format\n"); + if (perf_file_header__read(&f_header, header, fd) < 0) return -EINVAL; - } - nr_attrs = f_header.attrs.size / sizeof(f_attr); + nr_attrs = f_header.attrs.size / f_header.attr_size; lseek(fd, f_header.attrs.offset, SEEK_SET); for (i = 0; i < nr_attrs; i++) { struct perf_evsel *evsel; off_t tmp; - if (readn(fd, &f_attr, sizeof(f_attr)) <= 0) + if (read_attr(fd, header, &f_attr) < 0) goto out_errno; if (header->needs_swap) diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index bec8a32..e650de8 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -24,7 +24,7 @@ static int perf_session__open(struct perf_session *self, bool force) self->fd = STDIN_FILENO; if (perf_session__read_header(self, self->fd) < 0) - pr_err("incompatible file format"); + pr_err("incompatible file format (rerun with -v to learn more)"); return 0; } @@ -56,7 +56,7 @@ static int perf_session__open(struct perf_session *self, bool force) } if (perf_session__read_header(self, self->fd) < 0) { - pr_err("incompatible file format"); + pr_err("incompatible file format (rerun with -v to learn more)"); goto out_close; } -- cgit v0.10.2 From 62db90681c0db8c76e5db006b929a2edd5d12ae6 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 9 Feb 2012 23:21:07 +0100 Subject: perf tools: Fix ABI compatibility bug in print_event_desc() This patches cleans up local variable types for msz and ret. They need to be size_t and ssize_t respectively. It also fixes a bug whereby perf would not read attr struct with a different size than what it knows about. Signed-off-by: Stephane Eranian Cc: peterz@infradead.org Cc: acme@redhat.com Cc: robert.richter@amd.com Cc: ming.m.lin@intel.com Cc: andi@firstfloor.org Cc: asharma@fb.com Cc: ravitillo@lbl.gov Cc: vweaver1@eecs.utk.edu Cc: khandual@linux.vnet.ibm.com Cc: dsahern@gmail.com Link: http://lkml.kernel.org/r/1328826068-11713-18-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 666f189..6d58026 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1144,8 +1144,9 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp) uint64_t id; void *buf = NULL; char *str; - u32 nre, sz, nr, i, j, msz; - int ret; + u32 nre, sz, nr, i, j; + ssize_t ret; + size_t msz; /* number of events */ ret = read(fd, &nre, sizeof(nre)); @@ -1162,25 +1163,23 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp) if (ph->needs_swap) sz = bswap_32(sz); - /* - * ensure it is at least to our ABI rev - */ - if (sz < (u32)sizeof(attr)) - goto error; - memset(&attr, 0, sizeof(attr)); - /* read entire region to sync up to next field */ + /* buffer to hold on file attr struct */ buf = malloc(sz); if (!buf) goto error; msz = sizeof(attr); - if (sz < msz) + if (sz < (ssize_t)msz) msz = sz; for (i = 0 ; i < nre; i++) { + /* + * must read entire on-file attr struct to + * sync up with layout. + */ ret = read(fd, buf, sz); if (ret != (ssize_t)sz) goto error; -- cgit v0.10.2 From 114382a0aea97974803c942f106d462cbca5c64d Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 9 Feb 2012 23:21:08 +0100 Subject: perf tools: Make perf able to read files from older ABIs This patches provides a way to handle legacy perf.data files. Legacy files are those using the older PERFFILE signature. For those, it is still necessary to detect endianness but without comparing their header->attr_size with the tool's own version as it may be different. Instead, we use a reference table for all known sizes from the legacy era. We try all the combinations for sizes and endianness. If we find a match, we proceed, otherwise we return: "incompatible file format". This is also done for the pipe-mode file format. Signed-off-by: Stephane Eranian Cc: peterz@infradead.org Cc: acme@redhat.com Cc: robert.richter@amd.com Cc: ming.m.lin@intel.com Cc: andi@firstfloor.org Cc: asharma@fb.com Cc: ravitillo@lbl.gov Cc: vweaver1@eecs.utk.edu Cc: khandual@linux.vnet.ibm.com Cc: dsahern@gmail.com Link: http://lkml.kernel.org/r/1328826068-11713-19-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 6d58026..c851495 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1803,35 +1803,101 @@ out_free: return err; } -static int check_magic_endian(u64 *magic, struct perf_file_header *header, - struct perf_header *ph) +static const int attr_file_abi_sizes[] = { + [0] = PERF_ATTR_SIZE_VER0, + [1] = PERF_ATTR_SIZE_VER1, + 0, +}; + +/* + * In the legacy file format, the magic number is not used to encode endianness. + * hdr_sz was used to encode endianness. But given that hdr_sz can vary based + * on ABI revisions, we need to try all combinations for all endianness to + * detect the endianness. + */ +static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph) { - int ret; + uint64_t ref_size, attr_size; + int i; - /* check for legacy format */ - ret = memcmp(magic, __perf_magic1, sizeof(*magic)); - if (ret == 0) { - pr_debug("legacy perf.data format\n"); - if (!header) - return -1; + for (i = 0 ; attr_file_abi_sizes[i]; i++) { + ref_size = attr_file_abi_sizes[i] + + sizeof(struct perf_file_section); + if (hdr_sz != ref_size) { + attr_size = bswap_64(hdr_sz); + if (attr_size != ref_size) + continue; - if (header->attr_size != sizeof(struct perf_file_attr)) { - u64 attr_size = bswap_64(header->attr_size); + ph->needs_swap = true; + } + pr_debug("ABI%d perf.data file detected, need_swap=%d\n", + i, + ph->needs_swap); + return 0; + } + /* could not determine endianness */ + return -1; +} - if (attr_size != sizeof(struct perf_file_attr)) - return -1; +#define PERF_PIPE_HDR_VER0 16 + +static const size_t attr_pipe_abi_sizes[] = { + [0] = PERF_PIPE_HDR_VER0, + 0, +}; + +/* + * In the legacy pipe format, there is an implicit assumption that endiannesss + * between host recording the samples, and host parsing the samples is the + * same. This is not always the case given that the pipe output may always be + * redirected into a file and analyzed on a different machine with possibly a + * different endianness and perf_event ABI revsions in the perf tool itself. + */ +static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph) +{ + u64 attr_size; + int i; + + for (i = 0 ; attr_pipe_abi_sizes[i]; i++) { + if (hdr_sz != attr_pipe_abi_sizes[i]) { + attr_size = bswap_64(hdr_sz); + if (attr_size != hdr_sz) + continue; ph->needs_swap = true; } + pr_debug("Pipe ABI%d perf.data file detected\n", i); return 0; } + return -1; +} + +static int check_magic_endian(u64 magic, uint64_t hdr_sz, + bool is_pipe, struct perf_header *ph) +{ + int ret; + + /* check for legacy format */ + ret = memcmp(&magic, __perf_magic1, sizeof(magic)); + if (ret == 0) { + pr_debug("legacy perf.data format\n"); + if (is_pipe) + return try_all_pipe_abis(hdr_sz, ph); + + return try_all_file_abis(hdr_sz, ph); + } + /* + * the new magic number serves two purposes: + * - unique number to identify actual perf.data files + * - encode endianness of file + */ - /* check magic number with same endianness */ - if (*magic == __perf_magic2) + /* check magic number with one endianness */ + if (magic == __perf_magic2) return 0; - /* check magic number but opposite endianness */ - if (*magic != __perf_magic2_sw) + /* check magic number with opposite endianness */ + if (magic != __perf_magic2_sw) return -1; ph->needs_swap = true; @@ -1850,8 +1916,11 @@ int perf_file_header__read(struct perf_file_header *header, if (ret <= 0) return -1; - if (check_magic_endian(&header->magic, header, ph) < 0) + if (check_magic_endian(header->magic, + header->attr_size, false, ph) < 0) { + pr_debug("magic/endian check failed\n"); return -1; + } if (ph->needs_swap) { mem_bswap_64(header, offsetof(struct perf_file_header, @@ -1938,21 +2007,17 @@ static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, if (ret <= 0) return -1; - if (check_magic_endian(&header->magic, NULL, ph) < 0) + if (check_magic_endian(header->magic, header->size, true, ph) < 0) { + pr_debug("endian/magic failed\n"); return -1; + } + + if (ph->needs_swap) + header->size = bswap_64(header->size); if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0) return -1; - if (header->size != sizeof(*header)) { - u64 size = bswap_64(header->size); - - if (size != sizeof(*header)) - return -1; - - ph->needs_swap = true; - } - return 0; } @@ -1992,6 +2057,7 @@ static int read_attr(int fd, struct perf_header *ph, /* on file perf_event_attr size */ sz = attr->size; + if (ph->needs_swap) sz = bswap_32(sz); -- cgit v0.10.2 From a5aabdacde9caff54886ae454e0fad2f26929753 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 8 Mar 2012 23:47:45 +0100 Subject: perf record: Provide default branch stack sampling mode option This patch chanegs the logic of the -b, --branch-stack options of perf record. Based on users' request, the patch provides a default filter mode with the -b (or --branch-any) option. With the option, any type of taken branches is sampled. With -j (or --branch-filter), the user can specify any valid combination of branch types and privilege levels if supported by the underlying hardware. The -b (--branch any) is a shortcut for: --branch-filter any. $ perf record -b foo or: $ perf record --branch-filter any foo For more specific filtering: $ perf record --branch-filter ind_call,u foo Signed-off-by: Stephane Eranian Cc: peterz@infradead.org Cc: acme@redhat.com Cc: asharma@fb.com Cc: ravitillo@lbl.gov Cc: vweaver1@eecs.utk.edu Cc: khandual@linux.vnet.ibm.com Cc: dsahern@gmail.com Link: http://lkml.kernel.org/r/1331246868-19905-2-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index 60bddaf..a1386b2 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -153,14 +153,19 @@ corresponding events, i.e., they always refer to events defined earlier on the c line. -b:: ---branch-stack:: +--branch-any:: +Enable taken branch stack sampling. Any type of taken branch may be sampled. +This is a shortcut for --branch-filter any. See --branch-filter for more infos. + +-j:: +--branch-filter:: Enable taken branch stack sampling. Each sample captures a series of consecutive taken branches. The number of branches captured with each sample depends on the underlying hardware, the type of branches of interest, and the executed code. It is possible to select the types of branches captured by enabling filters. The following filters are defined: - - any : any type of branches + - any: any type of branches - any_call: any function call or system call - any_ret: any function return or system call return - any_ind: any indirect branch @@ -169,13 +174,13 @@ following filters are defined: - hv: only when the target is at the hypervisor level + -At least one of any, any_call, any_ret, any_ind must be provided. The privilege levels may -be ommitted, in which case, the privilege levels of the associated event are applied to the -branch filter. Both kernel (k) and hypervisor (hv) privilege levels are subject to -permissions. When sampling on multiple events, branch stack sampling is enabled for all -the sampling events. The sampled branch type is the same for all events. -Note that taken branch sampling may not be available on all processors. -The various filters must be specified as a comma separated list: -b any_ret,u,k +The option requires at least one branch type among any, any_call, any_ret, ind_call. +The privilege levels may be ommitted, in which case, the privilege levels of the associated +event are applied to the branch filter. Both kernel (k) and hypervisor (hv) privilege +levels are subject to permissions. When sampling on multiple events, branch stack sampling +is enabled for all the sampling events. The sampled branch type is the same for all events. +The various filters must be specified as a comma separated list: --branch-filter any_ret,u,k +Note that this feature may not be available on all processors. SEE ALSO -------- diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 1c49d4e..a7c53a9 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -660,7 +660,7 @@ static const struct branch_mode branch_modes[] = { }; static int -parse_branch_stack(const struct option *opt, const char *str, int unset __used) +parse_branch_stack(const struct option *opt, const char *str, int unset) { #define ONLY_PLM \ (PERF_SAMPLE_BRANCH_USER |\ @@ -669,40 +669,53 @@ parse_branch_stack(const struct option *opt, const char *str, int unset __used) uint64_t *mode = (uint64_t *)opt->value; const struct branch_mode *br; - char *s, *os, *p; + char *s, *os = NULL, *p; int ret = -1; - *mode = 0; + if (unset) + return 0; - /* because str is read-only */ - s = os = strdup(str); - if (!s) + /* + * cannot set it twice, -b + --branch-filter for instance + */ + if (*mode) return -1; - for (;;) { - p = strchr(s, ','); - if (p) - *p = '\0'; - - for (br = branch_modes; br->name; br++) { - if (!strcasecmp(s, br->name)) - break; - } - if (!br->name) - goto error; + /* str may be NULL in case no arg is passed to -b */ + if (str) { + /* because str is read-only */ + s = os = strdup(str); + if (!s) + return -1; + + for (;;) { + p = strchr(s, ','); + if (p) + *p = '\0'; + + for (br = branch_modes; br->name; br++) { + if (!strcasecmp(s, br->name)) + break; + } + if (!br->name) { + ui__warning("unknown branch filter %s," + " check man page\n", s); + goto error; + } - *mode |= br->mode; + *mode |= br->mode; - if (!p) - break; + if (!p) + break; - s = p + 1; + s = p + 1; + } } ret = 0; + /* default to any branch */ if ((*mode & ~ONLY_PLM) == 0) { - error("need at least one branch type with -b\n"); - ret = -1; + *mode = PERF_SAMPLE_BRANCH_ANY; } error: free(os); @@ -798,8 +811,13 @@ const struct option record_options[] = { "monitor event in cgroup name only", parse_cgroups), OPT_STRING('u', "uid", &record.uid_str, "user", "user to profile"), - OPT_CALLBACK('b', "branch-stack", &record.opts.branch_stack, - "branch mode mask", "branch stack sampling modes", + + OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack, + "branch any", "sample any taken branches", + parse_branch_stack), + + OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack, + "branch filter mask", "branch stack filter modes", parse_branch_stack), OPT_END() }; -- cgit v0.10.2 From 330aa675b4f92a422cb6d3acbbfd16a628017520 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 8 Mar 2012 23:47:46 +0100 Subject: perf record: Add HEADER_BRANCH_STACK tag This patch adds a new feature bit, namely, HEADER_BRANCH_STACK. When present, it indicates that sample records may contain branch stack. This could be useful to a viewer to switch to branch mode without having to parse all the samples or without a specific cmdline option. This will be used in a subsequent patch to enhance perf report with branch stacks. Signed-off-by: Stephane Eranian Cc: peterz@infradead.org Cc: acme@redhat.com Cc: asharma@fb.com Cc: ravitillo@lbl.gov Cc: vweaver1@eecs.utk.edu Cc: khandual@linux.vnet.ibm.com Cc: dsahern@gmail.com Link: http://lkml.kernel.org/r/1331246868-19905-3-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index a7c53a9..be4e1ee 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -473,6 +473,9 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) if (!have_tracepoints(&evsel_list->entries)) perf_header__clear_feat(&session->header, HEADER_TRACE_INFO); + if (!rec->opts.branch_stack) + perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); + if (!rec->file_new) { err = perf_session__read_header(session, output); if (err < 0) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index c851495..0d9b6da 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1023,6 +1023,12 @@ write_it: return do_write_string(fd, buffer); } +static int write_branch_stack(int fd __used, struct perf_header *h __used, + struct perf_evlist *evlist __used) +{ + return 0; +} + static void print_hostname(struct perf_header *ph, int fd, FILE *fp) { char *str = do_read_string(fd, ph); @@ -1315,6 +1321,12 @@ static void print_cpuid(struct perf_header *ph, int fd, FILE *fp) free(str); } +static void print_branch_stack(struct perf_header *ph __used, int fd __used, + FILE *fp) +{ + fprintf(fp, "# contains samples with branch stack\n"); +} + static int __event_process_build_id(struct build_id_event *bev, char *filename, struct perf_session *session) @@ -1519,6 +1531,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { FEAT_OPA(HEADER_CMDLINE, cmdline), FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology), FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology), + FEAT_OPA(HEADER_BRANCH_STACK, branch_stack), }; struct header_print_data { diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index e68f617..21a6be0 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -27,7 +27,7 @@ enum { HEADER_EVENT_DESC, HEADER_CPU_TOPOLOGY, HEADER_NUMA_TOPOLOGY, - + HEADER_BRANCH_STACK, HEADER_LAST_FEATURE, HEADER_FEAT_BITS = 256, }; -- cgit v0.10.2 From 993ac88d5892629fbe1f8a54857f9947f49f0d96 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 8 Mar 2012 23:47:47 +0100 Subject: perf report: Auto-detect branch stack sampling mode This patch enhances perf report to auto-detect when the perf.data file contains samples with branch stacks. That way it is not necessary to use the -b option. To force branch view mode to off, simply use --no-branch-stack. Signed-off-by: Stephane Eranian Cc: peterz@infradead.org Cc: acme@redhat.com Cc: asharma@fb.com Cc: ravitillo@lbl.gov Cc: vweaver1@eecs.utk.edu Cc: khandual@linux.vnet.ibm.com Cc: dsahern@gmail.com Link: http://lkml.kernel.org/r/1331246868-19905-4-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt index 19b9092..87feeee 100644 --- a/tools/perf/Documentation/perf-report.txt +++ b/tools/perf/Documentation/perf-report.txt @@ -157,8 +157,11 @@ OPTIONS --branch-stack:: Use the addresses of sampled taken branches instead of the instruction address to build the histograms. To generate meaningful output, the - perf.data file must have been obtained using perf record -b xxx where - xxx is a branch filter option. + perf.data file must have been obtained using perf record -b or + perf record --branch-filter xxx where xxx is a branch filter option. + perf report is able to auto-detect whether a perf.data file contains + branch stacks and it will automatically switch to the branch view mode, + unless --no-branch-stack is used. SEE ALSO -------- diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 528789f..66e8523 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -170,7 +170,7 @@ static int process_sample_event(struct perf_tool *tool, if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap)) return 0; - if (sort__branch_mode) { + if (sort__branch_mode == 1) { if (perf_report__add_branch_hist_entry(tool, &al, sample, evsel, machine)) { pr_debug("problem adding lbr entry, skipping event\n"); @@ -239,7 +239,7 @@ static int perf_report__setup_sample_type(struct perf_report *rep) } } - if (sort__branch_mode) { + if (sort__branch_mode == 1) { if (!(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) { fprintf(stderr, "selected -b but no branch data." " Did you call perf record without" @@ -306,7 +306,7 @@ static int __cmd_report(struct perf_report *rep) { int ret = -EINVAL; u64 nr_samples; - struct perf_session *session; + struct perf_session *session = rep->session; struct perf_evsel *pos; struct map *kernel_map; struct kmap *kernel_kmap; @@ -314,13 +314,6 @@ static int __cmd_report(struct perf_report *rep) signal(SIGINT, sig_handler); - session = perf_session__new(rep->input_name, O_RDONLY, - rep->force, false, &rep->tool); - if (session == NULL) - return -ENOMEM; - - rep->session = session; - if (rep->cpu_list) { ret = perf_session__cpu_bitmap(session, rep->cpu_list, rep->cpu_bitmap); @@ -487,9 +480,19 @@ setup: return 0; } +static int +parse_branch_mode(const struct option *opt __used, const char *str __used, int unset) +{ + sort__branch_mode = !unset; + return 0; +} + int cmd_report(int argc, const char **argv, const char *prefix __used) { + struct perf_session *session; struct stat st; + bool has_br_stack = false; + int ret = -1; char callchain_default_opt[] = "fractal,0.5,callee"; const char * const report_usage[] = { "perf report []", @@ -578,8 +581,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) "Specify disassembler style (e.g. -M intel for intel syntax)"), OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period, "Show a column with the sum of periods"), - OPT_BOOLEAN('b', "branch-stack", &sort__branch_mode, - "use branch records for histogram filling"), + OPT_CALLBACK_NOOPT('b', "branch-stack", &sort__branch_mode, "", + "use branch records for histogram filling", parse_branch_mode), OPT_END() }; @@ -599,8 +602,20 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) else report.input_name = "perf.data"; } + session = perf_session__new(report.input_name, O_RDONLY, + report.force, false, &report.tool); + if (session == NULL) + return -ENOMEM; + + report.session = session; + + has_br_stack = perf_header__has_feat(&session->header, + HEADER_BRANCH_STACK); - if (sort__branch_mode) { + if (sort__branch_mode == -1 && has_br_stack) + sort__branch_mode = 1; + + if (sort__branch_mode == 1) { if (use_browser) fprintf(stderr, "Warning: TUI interface not supported" " in branch mode\n"); @@ -657,13 +672,13 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) } if (symbol__init() < 0) - return -1; + goto error; setup_sorting(report_usage, options); if (parent_pattern != default_parent_pattern) { if (sort_dimension__add("parent") < 0) - return -1; + goto error; /* * Only show the parent fields if we explicitly @@ -685,5 +700,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout); sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", stdout); - return __cmd_report(&report); + ret = __cmd_report(&report); +error: + perf_session__delete(session); + return ret; } diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 2739ed1..88dbcf6 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -8,7 +8,7 @@ const char default_sort_order[] = "comm,dso,symbol"; const char *sort_order = default_sort_order; int sort__need_collapse = 0; int sort__has_parent = 0; -bool sort__branch_mode; +int sort__branch_mode = -1; /* -1 = means not set */ enum sort_type sort__first_dimension; diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 7aa72a0..8505b9b 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -31,7 +31,7 @@ extern const char *parent_pattern; extern const char default_sort_order[]; extern int sort__need_collapse; extern int sort__has_parent; -extern bool sort__branch_mode; +extern int sort__branch_mode; extern char *field_sep; extern struct sort_entry sort_comm; extern struct sort_entry sort_dso; -- cgit v0.10.2 From a68c2c58171391ef368fced32a555b2f0ff106e5 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 8 Mar 2012 23:47:48 +0100 Subject: perf report: Enable TUI in branch view mode This patch updates perf report to support TUI mode when the perf.data file contains samples with branch stacks. For each row in the report, it is possible to annotate either the source or target of each branch. Signed-off-by: Stephane Eranian Cc: peterz@infradead.org Cc: acme@redhat.com Cc: asharma@fb.com Cc: ravitillo@lbl.gov Cc: vweaver1@eecs.utk.edu Cc: khandual@linux.vnet.ibm.com Cc: dsahern@gmail.com Link: http://lkml.kernel.org/r/1331246868-19905-5-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 66e8523..8e91c6e 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -64,7 +64,7 @@ static int perf_report__add_branch_hist_entry(struct perf_tool *tool, int err = 0; unsigned i; struct hist_entry *he; - struct branch_info *bi; + struct branch_info *bi, *bx; if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) { @@ -87,13 +87,45 @@ static int perf_report__add_branch_hist_entry(struct perf_tool *tool, * and not events sampled. Thus we use a pseudo period of 1. */ he = __hists__add_branch_entry(&evsel->hists, al, parent, - &bi[i], 1); + &bi[i], 1); if (he) { + struct annotation *notes; + err = -ENOMEM; + bx = he->branch_info; + if (bx->from.sym && use_browser > 0) { + notes = symbol__annotation(bx->from.sym); + if (!notes->src + && symbol__alloc_hist(bx->from.sym) < 0) + goto out; + + err = symbol__inc_addr_samples(bx->from.sym, + bx->from.map, + evsel->idx, + bx->from.al_addr); + if (err) + goto out; + } + + if (bx->to.sym && use_browser > 0) { + notes = symbol__annotation(bx->to.sym); + if (!notes->src + && symbol__alloc_hist(bx->to.sym) < 0) + goto out; + + err = symbol__inc_addr_samples(bx->to.sym, + bx->to.map, + evsel->idx, + bx->to.al_addr); + if (err) + goto out; + } evsel->hists.stats.total_period += 1; hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); + err = 0; } else return -ENOMEM; } +out: return err; } @@ -615,32 +647,19 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) if (sort__branch_mode == -1 && has_br_stack) sort__branch_mode = 1; + /* sort__branch_mode could be 0 if --no-branch-stack */ if (sort__branch_mode == 1) { - if (use_browser) - fprintf(stderr, "Warning: TUI interface not supported" - " in branch mode\n"); - if (symbol_conf.dso_list_str != NULL) - fprintf(stderr, "Warning: dso filtering not supported" - " in branch mode\n"); - if (symbol_conf.sym_list_str != NULL) - fprintf(stderr, "Warning: symbol filtering not" - " supported in branch mode\n"); - - report.use_stdio = true; - use_browser = 0; - setup_browser(true); - symbol_conf.dso_list_str = NULL; - symbol_conf.sym_list_str = NULL; - /* - * if no sort_order is provided, then specify branch-mode - * specific order + * if no sort_order is provided, then specify + * branch-mode specific order */ if (sort_order == default_sort_order) sort_order = "comm,dso_from,symbol_from," "dso_to,symbol_to"; - } else if (strcmp(report.input_name, "-") != 0) { + } + + if (strcmp(report.input_name, "-") != 0) { setup_browser(true); } else { use_browser = 0; @@ -696,9 +715,17 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) if (argc) usage_with_options(report_usage, options); - sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", stdout); sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout); - sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", stdout); + + if (sort__branch_mode == 1) { + sort_entry__setup_elide(&sort_dso_from, symbol_conf.dso_from_list, "dso_from", stdout); + sort_entry__setup_elide(&sort_dso_to, symbol_conf.dso_to_list, "dso_to", stdout); + sort_entry__setup_elide(&sort_sym_from, symbol_conf.sym_from_list, "sym_from", stdout); + sort_entry__setup_elide(&sort_sym_to, symbol_conf.sym_to_list, "sym_to", stdout); + } else { + sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", stdout); + sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", stdout); + } ret = __cmd_report(&report); error: diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index e650de8..002ebbf 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -263,6 +263,7 @@ static void ip__resolve_ams(struct machine *self, struct thread *thread, } found: ams->addr = ip; + ams->al_addr = al.addr; ams->sym = al.sym; ams->map = al.map; } diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 8505b9b..472aa5a 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -37,8 +37,10 @@ extern struct sort_entry sort_comm; extern struct sort_entry sort_dso; extern struct sort_entry sort_sym; extern struct sort_entry sort_parent; -extern struct sort_entry sort_lbr_dso; -extern struct sort_entry sort_lbr_sym; +extern struct sort_entry sort_dso_from; +extern struct sort_entry sort_dso_to; +extern struct sort_entry sort_sym_from; +extern struct sort_entry sort_sym_to; extern enum sort_type sort__first_dimension; /** diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 5866ce6..ac49ef2 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -97,7 +97,11 @@ struct symbol_conf { *col_width_list_str; struct strlist *dso_list, *comm_list, - *sym_list; + *sym_list, + *dso_from_list, + *dso_to_list, + *sym_from_list, + *sym_to_list; const char *symfs; }; @@ -125,6 +129,7 @@ struct addr_map_symbol { struct map *map; struct symbol *sym; u64 addr; + u64 al_addr; }; struct branch_info { diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c index bfba049..951e2e9 100644 --- a/tools/perf/util/ui/browsers/hists.c +++ b/tools/perf/util/ui/browsers/hists.c @@ -805,8 +805,11 @@ static struct hist_browser *hist_browser__new(struct hists *hists) self->hists = hists; self->b.refresh = hist_browser__refresh; self->b.seek = ui_browser__hists_seek; - self->b.use_navkeypressed = true, - self->has_symbols = sort_sym.list.next != NULL; + self->b.use_navkeypressed = true; + if (sort__branch_mode == 1) + self->has_symbols = sort_sym_from.list.next != NULL; + else + self->has_symbols = sort_sym.list.next != NULL; } return self; @@ -861,6 +864,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, { struct hists *self = &evsel->hists; struct hist_browser *browser = hist_browser__new(self); + struct branch_info *bi; struct pstack *fstack; int key = -1; @@ -879,7 +883,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, char *options[16]; int nr_options = 0, choice = 0, i, annotate = -2, zoom_dso = -2, zoom_thread = -2, - browse_map = -2; + annotate_f = -2, annotate_t = -2, browse_map = -2; key = hist_browser__run(browser, ev_name, timer, arg, delay_secs); @@ -887,7 +891,6 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, thread = hist_browser__selected_thread(browser); dso = browser->selection->map ? browser->selection->map->dso : NULL; } - switch (key) { case K_TAB: case K_UNTAB: @@ -902,7 +905,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, if (!browser->has_symbols) { ui_browser__warning(&browser->b, delay_secs * 2, "Annotation is only available for symbolic views, " - "include \"sym\" in --sort to use it."); + "include \"sym*\" in --sort to use it."); continue; } @@ -972,12 +975,32 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, if (!browser->has_symbols) goto add_exit_option; - if (browser->selection != NULL && - browser->selection->sym != NULL && - !browser->selection->map->dso->annotate_warned && - asprintf(&options[nr_options], "Annotate %s", - browser->selection->sym->name) > 0) - annotate = nr_options++; + if (sort__branch_mode == 1) { + bi = browser->he_selection->branch_info; + if (browser->selection != NULL && + bi && + bi->from.sym != NULL && + !bi->from.map->dso->annotate_warned && + asprintf(&options[nr_options], "Annotate %s", + bi->from.sym->name) > 0) + annotate_f = nr_options++; + + if (browser->selection != NULL && + bi && + bi->to.sym != NULL && + !bi->to.map->dso->annotate_warned && + asprintf(&options[nr_options], "Annotate %s", + bi->to.sym->name) > 0) + annotate_t = nr_options++; + } else { + + if (browser->selection != NULL && + browser->selection->sym != NULL && + !browser->selection->map->dso->annotate_warned && + asprintf(&options[nr_options], "Annotate %s", + browser->selection->sym->name) > 0) + annotate = nr_options++; + } if (thread != NULL && asprintf(&options[nr_options], "Zoom %s %s(%d) thread", @@ -1010,13 +1033,28 @@ add_exit_option: if (choice == -1) continue; - if (choice == annotate) { + if (choice == annotate || choice == annotate_t || choice == annotate_f) { struct hist_entry *he; int err; do_annotate: he = hist_browser__selected_entry(browser); if (he == NULL) continue; + + /* + * we stash the branch_info symbol + map into the + * the ms so we don't have to rewrite all the annotation + * code to use branch_info. + * in branch mode, the ms struct is not used + */ + if (choice == annotate_f) { + he->ms.sym = he->branch_info->from.sym; + he->ms.map = he->branch_info->from.map; + } else if (choice == annotate_t) { + he->ms.sym = he->branch_info->to.sym; + he->ms.map = he->branch_info->to.map; + } + /* * Don't let this be freed, say, by hists__decay_entry. */ -- cgit v0.10.2 From f9b4eeb809c6d031cc9561cc34dd691701cb2c2a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 12 Mar 2012 12:44:35 +0100 Subject: perf/x86: Prettify pmu config literals I got somewhat tired of having to decode hex numbers.. Signed-off-by: Peter Zijlstra Acked-by: Thomas Gleixner Cc: Stephane Eranian Cc: Robert Richter Link: http://lkml.kernel.org/n/tip-0vsy1sgywc4uar3mu1szm0rg@git.kernel.org Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 82db83b..66fda0c 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -268,6 +268,29 @@ struct x86_pmu_quirk { void (*func)(void); }; +union x86_pmu_config { + struct { + u64 event:8, + umask:8, + usr:1, + os:1, + edge:1, + pc:1, + interrupt:1, + __reserved1:1, + en:1, + inv:1, + cmask:8, + event2:4, + __reserved2:4, + go:1, + ho:1; + } bits; + u64 value; +}; + +#define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value + /* * struct x86_pmu - generic x86 pmu */ diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 61d4f79..4bd9c9e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1288,7 +1288,8 @@ static int intel_pmu_hw_config(struct perf_event *event) * * Thereby we gain a PEBS capable cycle counter. */ - u64 alt_config = 0x108000c0; /* INST_RETIRED.TOTAL_CYCLES */ + u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); + alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); event->hw.config = alt_config; @@ -1690,9 +1691,11 @@ __init int intel_pmu_init(void) x86_pmu.extra_regs = intel_nehalem_extra_regs; /* UOPS_ISSUED.STALLED_CYCLES */ - intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = + X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ - intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = + X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); x86_add_quirk(intel_nehalem_quirk); @@ -1727,9 +1730,11 @@ __init int intel_pmu_init(void) x86_pmu.er_flags |= ERF_HAS_RSP_1; /* UOPS_ISSUED.STALLED_CYCLES */ - intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = + X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ - intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = + X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); pr_cont("Westmere events, "); break; @@ -1750,9 +1755,11 @@ __init int intel_pmu_init(void) x86_pmu.er_flags |= ERF_NO_HT_SHARING; /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ - intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = + X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/ - intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x18001b1; + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = + X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1); pr_cont("SandyBridge events, "); break; -- cgit v0.10.2 From 8bcd65fd29e9abfb633bfd6eedaf24b22e211f3a Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Mon, 12 Mar 2012 16:13:29 +0100 Subject: perf report: Remove duplicate annotate choice in branch view mode This patch removes the duplicated annotate selection when browsing in branch view mode. If the sym and dso oof the branch source and target are the same, then only one annotate choice is proposed. Signed-off-by: Stephane Eranian Cc: peterz@infradead.org Cc: acme@redhat.com Cc: asharma@fb.com Cc: ravitillo@lbl.gov Cc: vweaver1@eecs.utk.edu Cc: khandual@linux.vnet.ibm.com Cc: dsahern@gmail.com Link: http://lkml.kernel.org/r/1331565210-10865-2-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c index 951e2e9..40fd424 100644 --- a/tools/perf/util/ui/browsers/hists.c +++ b/tools/perf/util/ui/browsers/hists.c @@ -989,6 +989,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, bi && bi->to.sym != NULL && !bi->to.map->dso->annotate_warned && + (bi->to.sym != bi->from.sym || + bi->to.map->dso != bi->from.map->dso) && asprintf(&options[nr_options], "Annotate %s", bi->to.sym->name) > 0) annotate_t = nr_options++; -- cgit v0.10.2 From 24bff2dc0f77b1f186b7bdf30060caf3df191a68 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Mon, 12 Mar 2012 16:13:30 +0100 Subject: perf report: Fix annotate double quit issue in branch view mode This patch fixes perf report to not go back two levels when pressing the 'q' key while annotating in branch view mode. When pressing 'q' in annotate mode and if the branch source and target belong to different functions, perf now brings up the annotation popup menu again to offer the option to annotate the other branch source or target. As part of the code restructuring in perf_evsel__hists_browse() we also fix a memory leak on options[] in case of error. Signed-off-by: Stephane Eranian Cc: peterz@infradead.org Cc: acme@redhat.com Cc: asharma@fb.com Cc: ravitillo@lbl.gov Cc: vweaver1@eecs.utk.edu Cc: khandual@linux.vnet.ibm.com Cc: dsahern@gmail.com Link: http://lkml.kernel.org/r/1331565210-10865-3-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c index 40fd424..de8ece8 100644 --- a/tools/perf/util/ui/browsers/hists.c +++ b/tools/perf/util/ui/browsers/hists.c @@ -856,6 +856,16 @@ static int hists__browser_title(struct hists *self, char *bf, size_t size, return printed; } +static inline void free_popup_options(char **options, int n) +{ + int i; + + for (i = 0; i < n; ++i) { + free(options[i]); + options[i] = NULL; + } +} + static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, const char *helpline, const char *ev_name, bool left_exits, @@ -866,6 +876,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, struct hist_browser *browser = hist_browser__new(self); struct branch_info *bi; struct pstack *fstack; + char *options[16]; + int nr_options = 0; int key = -1; if (browser == NULL) @@ -877,14 +889,17 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, ui_helpline__push(helpline); + memset(options, 0, sizeof(options)); + while (1) { const struct thread *thread = NULL; const struct dso *dso = NULL; - char *options[16]; - int nr_options = 0, choice = 0, i, + int choice = 0, annotate = -2, zoom_dso = -2, zoom_thread = -2, annotate_f = -2, annotate_t = -2, browse_map = -2; + nr_options = 0; + key = hist_browser__run(browser, ev_name, timer, arg, delay_secs); if (browser->he_selection != NULL) { @@ -1023,17 +1038,16 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, browse_map = nr_options++; add_exit_option: options[nr_options++] = (char *)"Exit"; - +retry_popup_menu: choice = ui__popup_menu(nr_options, options); - for (i = 0; i < nr_options - 1; ++i) - free(options[i]); - if (choice == nr_options - 1) break; - if (choice == -1) + if (choice == -1) { + free_popup_options(options, nr_options - 1); continue; + } if (choice == annotate || choice == annotate_t || choice == annotate_f) { struct hist_entry *he; @@ -1064,9 +1078,18 @@ do_annotate: err = hist_entry__tui_annotate(he, evsel->idx, timer, arg, delay_secs); he->used = false; + /* + * offer option to annotate the other branch source or target + * (if they exists) when returning from annotate + */ + if ((err == 'q' || err == CTRL('c')) + && annotate_t != -2 && annotate_f != -2) + goto retry_popup_menu; + ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries); if (err) ui_browser__handle_resize(&browser->b); + } else if (choice == browse_map) map__browse(browser->selection->map); else if (choice == zoom_dso) { @@ -1112,6 +1135,7 @@ out_free_stack: pstack__delete(fstack); out: hist_browser__delete(browser); + free_popup_options(options, nr_options - 1); return key; } -- cgit v0.10.2