From 3f6da3905398826d85731247e7fbcf53400c18bd Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 5 Mar 2010 13:01:18 +0100 Subject: perf: Rework and fix the arch CPU-hotplug hooks Remove the hw_perf_event_*() hotplug hooks in favour of per PMU hotplug notifiers. This has the advantage of reducing the static weak interface as well as exposing all hotplug actions to the PMU. Use this to fix x86 hotplug usage where we did things in ONLINE which should have been done in UP_PREPARE or STARTING. Signed-off-by: Peter Zijlstra Cc: Paul Mundt Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com Cc: Arnaldo Carvalho de Melo LKML-Reference: <20100305154128.736225361@chello.nl> Signed-off-by: Ingo Molnar diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 5120bd4..fbe101d 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -1287,7 +1287,7 @@ static void perf_event_interrupt(struct pt_regs *regs) irq_exit(); } -void hw_perf_event_setup(int cpu) +static void power_pmu_setup(int cpu) { struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); @@ -1297,6 +1297,23 @@ void hw_perf_event_setup(int cpu) cpuhw->mmcr[0] = MMCR0_FC; } +static int __cpuinit +power_pmu_notify(struct notifier_block *self, unsigned long action, void *hcpu) +{ + unsigned int cpu = (long)hcpu; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_UP_PREPARE: + power_pmu_setup(cpu); + break; + + default: + break; + } + + return NOTIFY_OK; +} + int register_power_pmu(struct power_pmu *pmu) { if (ppmu) @@ -1314,5 +1331,7 @@ int register_power_pmu(struct power_pmu *pmu) freeze_events_kernel = MMCR0_FCHV; #endif /* CONFIG_PPC64 */ + perf_cpu_notifier(power_pmu_notifier); + return 0; } diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 7ff0943..9f253e9 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c @@ -275,13 +275,30 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) return &pmu; } -void hw_perf_event_setup(int cpu) +static void sh_pmu_setup(int cpu) { struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); memset(cpuhw, 0, sizeof(struct cpu_hw_events)); } +static int __cpuinit +sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) +{ + unsigned int cpu = (long)hcpu; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_UP_PREPARE: + sh_pmu_setup(cpu); + break; + + default: + break; + } + + return NOTIFY_OK; +} + void hw_perf_enable(void) { if (!sh_pmu_initialized()) @@ -308,5 +325,6 @@ int register_sh_pmu(struct sh_pmu *pmu) WARN_ON(pmu->num_events > MAX_HWEVENTS); + perf_cpu_notifier(sh_pmu_notifier); return 0; } diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 42aafd1..585d560 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -157,6 +157,11 @@ struct x86_pmu { void (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event); struct event_constraint *event_constraints; + + void (*cpu_prepare)(int cpu); + void (*cpu_starting)(int cpu); + void (*cpu_dying)(int cpu); + void (*cpu_dead)(int cpu); }; static struct x86_pmu x86_pmu __read_mostly; @@ -293,7 +298,7 @@ static inline bool bts_available(void) return x86_pmu.enable_bts != NULL; } -static inline void init_debug_store_on_cpu(int cpu) +static void init_debug_store_on_cpu(int cpu) { struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; @@ -305,7 +310,7 @@ static inline void init_debug_store_on_cpu(int cpu) (u32)((u64)(unsigned long)ds >> 32)); } -static inline void fini_debug_store_on_cpu(int cpu) +static void fini_debug_store_on_cpu(int cpu) { if (!per_cpu(cpu_hw_events, cpu).ds) return; @@ -1337,6 +1342,39 @@ undo: #include "perf_event_p6.c" #include "perf_event_intel.c" +static int __cpuinit +x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) +{ + unsigned int cpu = (long)hcpu; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_UP_PREPARE: + if (x86_pmu.cpu_prepare) + x86_pmu.cpu_prepare(cpu); + break; + + case CPU_STARTING: + if (x86_pmu.cpu_starting) + x86_pmu.cpu_starting(cpu); + break; + + case CPU_DYING: + if (x86_pmu.cpu_dying) + x86_pmu.cpu_dying(cpu); + break; + + case CPU_DEAD: + if (x86_pmu.cpu_dead) + x86_pmu.cpu_dead(cpu); + break; + + default: + break; + } + + return NOTIFY_OK; +} + static void __init pmu_check_apic(void) { if (cpu_has_apic) @@ -1415,6 +1453,8 @@ void __init init_hw_perf_events(void) pr_info("... max period: %016Lx\n", x86_pmu.max_period); pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed); pr_info("... event mask: %016Lx\n", perf_event_mask); + + perf_cpu_notifier(x86_pmu_notifier); } static inline void x86_pmu_read(struct perf_event *event) @@ -1674,29 +1714,3 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) return entry; } - -void hw_perf_event_setup_online(int cpu) -{ - init_debug_store_on_cpu(cpu); - - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_AMD: - amd_pmu_cpu_online(cpu); - break; - default: - return; - } -} - -void hw_perf_event_setup_offline(int cpu) -{ - init_debug_store_on_cpu(cpu); - - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_AMD: - amd_pmu_cpu_offline(cpu); - break; - default: - return; - } -} diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 8f3dbfd..014528b 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -271,28 +271,6 @@ done: return &emptyconstraint; } -static __initconst struct x86_pmu amd_pmu = { - .name = "AMD", - .handle_irq = x86_pmu_handle_irq, - .disable_all = x86_pmu_disable_all, - .enable_all = x86_pmu_enable_all, - .enable = x86_pmu_enable_event, - .disable = x86_pmu_disable_event, - .eventsel = MSR_K7_EVNTSEL0, - .perfctr = MSR_K7_PERFCTR0, - .event_map = amd_pmu_event_map, - .raw_event = amd_pmu_raw_event, - .max_events = ARRAY_SIZE(amd_perfmon_event_map), - .num_events = 4, - .event_bits = 48, - .event_mask = (1ULL << 48) - 1, - .apic = 1, - /* use highest bit to detect overflow */ - .max_period = (1ULL << 47) - 1, - .get_event_constraints = amd_get_event_constraints, - .put_event_constraints = amd_put_event_constraints -}; - static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) { struct amd_nb *nb; @@ -378,6 +356,31 @@ static void amd_pmu_cpu_offline(int cpu) raw_spin_unlock(&amd_nb_lock); } +static __initconst struct x86_pmu amd_pmu = { + .name = "AMD", + .handle_irq = x86_pmu_handle_irq, + .disable_all = x86_pmu_disable_all, + .enable_all = x86_pmu_enable_all, + .enable = x86_pmu_enable_event, + .disable = x86_pmu_disable_event, + .eventsel = MSR_K7_EVNTSEL0, + .perfctr = MSR_K7_PERFCTR0, + .event_map = amd_pmu_event_map, + .raw_event = amd_pmu_raw_event, + .max_events = ARRAY_SIZE(amd_perfmon_event_map), + .num_events = 4, + .event_bits = 48, + .event_mask = (1ULL << 48) - 1, + .apic = 1, + /* use highest bit to detect overflow */ + .max_period = (1ULL << 47) - 1, + .get_event_constraints = amd_get_event_constraints, + .put_event_constraints = amd_put_event_constraints, + + .cpu_prepare = amd_pmu_cpu_online, + .cpu_dead = amd_pmu_cpu_offline, +}; + static __init int amd_pmu_init(void) { /* Performance-monitoring supported from K7 and later: */ @@ -390,11 +393,6 @@ static __init int amd_pmu_init(void) memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - /* - * explicitly initialize the boot cpu, other cpus will get - * the cpu hotplug callbacks from smp_init() - */ - amd_pmu_cpu_online(smp_processor_id()); return 0; } @@ -405,12 +403,4 @@ static int amd_pmu_init(void) return 0; } -static void amd_pmu_cpu_online(int cpu) -{ -} - -static void amd_pmu_cpu_offline(int cpu) -{ -} - #endif diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 44b60c8..12e811a 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -870,7 +870,10 @@ static __initconst struct x86_pmu intel_pmu = { .max_period = (1ULL << 31) - 1, .enable_bts = intel_pmu_enable_bts, .disable_bts = intel_pmu_disable_bts, - .get_event_constraints = intel_get_event_constraints + .get_event_constraints = intel_get_event_constraints, + + .cpu_starting = init_debug_store_on_cpu, + .cpu_dying = fini_debug_store_on_cpu, }; static __init int intel_pmu_init(void) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 6f8cd7d..80acbf3 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -936,5 +936,21 @@ static inline void perf_event_disable(struct perf_event *event) { } #define perf_output_put(handle, x) \ perf_output_copy((handle), &(x), sizeof(x)) +/* + * This has to have a higher priority than migration_notifier in sched.c. + */ +#define perf_cpu_notifier(fn) \ +do { \ + static struct notifier_block fn##_nb __cpuinitdata = \ + { .notifier_call = fn, .priority = 20 }; \ + fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ + (void *)(unsigned long)smp_processor_id()); \ + fn(&fn##_nb, (unsigned long)CPU_STARTING, \ + (void *)(unsigned long)smp_processor_id()); \ + fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ + (void *)(unsigned long)smp_processor_id()); \ + register_cpu_notifier(&fn##_nb); \ +} while (0) + #endif /* __KERNEL__ */ #endif /* _LINUX_PERF_EVENT_H */ diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 4393b9e..73329de 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -81,10 +81,6 @@ extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event) void __weak hw_perf_disable(void) { barrier(); } void __weak hw_perf_enable(void) { barrier(); } -void __weak hw_perf_event_setup(int cpu) { barrier(); } -void __weak hw_perf_event_setup_online(int cpu) { barrier(); } -void __weak hw_perf_event_setup_offline(int cpu) { barrier(); } - int __weak hw_perf_group_sched_in(struct perf_event *group_leader, struct perf_cpu_context *cpuctx, @@ -5382,8 +5378,6 @@ static void __cpuinit perf_event_init_cpu(int cpu) spin_lock(&perf_resource_lock); cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; spin_unlock(&perf_resource_lock); - - hw_perf_event_setup(cpu); } #ifdef CONFIG_HOTPLUG_CPU @@ -5423,20 +5417,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) perf_event_init_cpu(cpu); break; - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - hw_perf_event_setup_online(cpu); - break; - case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: perf_event_exit_cpu(cpu); break; - case CPU_DEAD: - hw_perf_event_setup_offline(cpu); - break; - default: break; } -- cgit v0.10.2 From 32975a4f114be52286f9a5bf6c230dbb8c0e1903 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 6 Mar 2010 19:49:19 +0100 Subject: perf: Optimize perf_disable Currently we always call hw_perf_disable(), even if its already disabled, this seems superflous, esp. since it cannot be made NMI safe (see further patches). Signed-off-by: Peter Zijlstra Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 73329de..d810846 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -93,25 +93,15 @@ void __weak perf_event_print_debug(void) { } static DEFINE_PER_CPU(int, perf_disable_count); -void __perf_disable(void) -{ - __get_cpu_var(perf_disable_count)++; -} - -bool __perf_enable(void) -{ - return !--__get_cpu_var(perf_disable_count); -} - void perf_disable(void) { - __perf_disable(); - hw_perf_disable(); + if (!__get_cpu_var(perf_disable_count)++) + hw_perf_disable(); } void perf_enable(void) { - if (__perf_enable()) + if (!--__get_cpu_var(perf_disable_count)) hw_perf_enable(); } -- cgit v0.10.2 From 3fb2b8ddcc6a7aa62af6bd2cb939edfd4c460506 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 8 Mar 2010 13:51:01 +0100 Subject: perf, x86, Do not user perf_disable from NMI context Explicitly use intel_pmu_{disable,enable}_all() in intel_pmu_handle_irq() to avoid the NMI race conditions in perf_{disable,enable} Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com LKML-Reference: Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 12e811a..c582449 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -745,11 +745,11 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) cpuc = &__get_cpu_var(cpu_hw_events); - perf_disable(); + intel_pmu_disable_all(); intel_pmu_drain_bts_buffer(); status = intel_pmu_get_status(); if (!status) { - perf_enable(); + intel_pmu_enable_all(); return 0; } @@ -759,8 +759,7 @@ again: WARN_ONCE(1, "perfevents: irq loop stuck!\n"); perf_event_print_debug(); intel_pmu_reset(); - perf_enable(); - return 1; + goto done; } inc_irq_stat(apic_perf_irqs); @@ -790,8 +789,8 @@ again: if (status) goto again; - perf_enable(); - +done: + intel_pmu_enable_all(); return 1; } -- cgit v0.10.2 From 07088edb88164c2a2406cd2d9a7be19d8515214b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Mar 2010 20:16:01 +0100 Subject: perf, x86: Remove superfluous arguments to x86_perf_event_set_period() The second and third argument to x86_perf_event_set_period() are superfluous since they are simple expressions of the first argument. Hence remove them. Signed-off-by: Peter Zijlstra Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com Cc: Arnaldo Carvalho de Melo LKML-Reference: <20100304140100.006500906@chello.nl> Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 585d560..fcf1788 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -170,8 +170,7 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; -static int x86_perf_event_set_period(struct perf_event *event, - struct hw_perf_event *hwc, int idx); +static int x86_perf_event_set_period(struct perf_event *event); /* * Generalized hw caching related hw_event table, filled @@ -835,7 +834,7 @@ void hw_perf_enable(void) if (hwc->idx == -1) { x86_assign_hw_event(event, cpuc, i); - x86_perf_event_set_period(event, hwc, hwc->idx); + x86_perf_event_set_period(event); } /* * need to mark as active because x86_pmu_disable() @@ -876,12 +875,12 @@ static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); * To be called with the event disabled in hw: */ static int -x86_perf_event_set_period(struct perf_event *event, - struct hw_perf_event *hwc, int idx) +x86_perf_event_set_period(struct perf_event *event) { + struct hw_perf_event *hwc = &event->hw; s64 left = atomic64_read(&hwc->period_left); s64 period = hwc->sample_period; - int err, ret = 0; + int err, ret = 0, idx = hwc->idx; if (idx == X86_PMC_IDX_FIXED_BTS) return 0; @@ -979,7 +978,7 @@ static int x86_pmu_start(struct perf_event *event) if (hwc->idx == -1) return -EAGAIN; - x86_perf_event_set_period(event, hwc, hwc->idx); + x86_perf_event_set_period(event); x86_pmu.enable(hwc, hwc->idx); return 0; @@ -1123,7 +1122,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) handled = 1; data.period = event->hw.last_period; - if (!x86_perf_event_set_period(event, hwc, idx)) + if (!x86_perf_event_set_period(event)) continue; if (perf_event_overflow(event, 1, &data, regs)) diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index c582449..6dbdf91 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -699,7 +699,7 @@ static int intel_pmu_save_and_restart(struct perf_event *event) int ret; x86_perf_event_update(event, hwc, idx); - ret = x86_perf_event_set_period(event, hwc, idx); + ret = x86_perf_event_set_period(event); return ret; } -- cgit v0.10.2 From cc2ad4ba8792b9d4ff893ae3b845d2c5a6206fc9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Mar 2010 20:18:39 +0100 Subject: perf, x86: Remove superfluous arguments to x86_perf_event_update() The second and third argument to x86_perf_event_update() are superfluous since they are simple expressions of the first argument. Hence remove them. Signed-off-by: Peter Zijlstra Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com Cc: Arnaldo Carvalho de Melo LKML-Reference: <20100304140100.089468871@chello.nl> Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index fcf1788..086127b 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -193,11 +193,12 @@ static u64 __read_mostly hw_cache_event_ids * Returns the delta events processed. */ static u64 -x86_perf_event_update(struct perf_event *event, - struct hw_perf_event *hwc, int idx) +x86_perf_event_update(struct perf_event *event) { + struct hw_perf_event *hwc = &event->hw; int shift = 64 - x86_pmu.event_bits; u64 prev_raw_count, new_raw_count; + int idx = hwc->idx; s64 delta; if (idx == X86_PMC_IDX_FIXED_BTS) @@ -1064,7 +1065,7 @@ static void x86_pmu_stop(struct perf_event *event) * Drain the remaining delta count out of a event * that we are disabling: */ - x86_perf_event_update(event, hwc, idx); + x86_perf_event_update(event); cpuc->events[idx] = NULL; } @@ -1112,7 +1113,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) event = cpuc->events[idx]; hwc = &event->hw; - val = x86_perf_event_update(event, hwc, idx); + val = x86_perf_event_update(event); if (val & (1ULL << (x86_pmu.event_bits - 1))) continue; @@ -1458,7 +1459,7 @@ void __init init_hw_perf_events(void) static inline void x86_pmu_read(struct perf_event *event) { - x86_perf_event_update(event, &event->hw, event->hw.idx); + x86_perf_event_update(event); } static const struct pmu pmu = { diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 6dbdf91..a4c9f16 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -694,14 +694,8 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) */ static int intel_pmu_save_and_restart(struct perf_event *event) { - struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx; - int ret; - - x86_perf_event_update(event, hwc, idx); - ret = x86_perf_event_set_period(event); - - return ret; + x86_perf_event_update(event); + return x86_perf_event_set_period(event); } static void intel_pmu_reset(void) -- cgit v0.10.2 From aff3d91a913c9ae0c2f56b65b27cbd00c7d27ee3 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Mar 2010 20:32:08 +0100 Subject: perf, x86: Change x86_pmu.{enable,disable} calling convention Pass the full perf_event into the x86_pmu functions so that those may make use of more than the hw_perf_event, and while doing this, remove the superfluous second argument. Signed-off-by: Peter Zijlstra Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com Cc: Arnaldo Carvalho de Melo LKML-Reference: <20100304140100.165166129@chello.nl> Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 086127b..2dd704f 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -133,8 +133,8 @@ struct x86_pmu { int (*handle_irq)(struct pt_regs *); void (*disable_all)(void); void (*enable_all)(void); - void (*enable)(struct hw_perf_event *, int); - void (*disable)(struct hw_perf_event *, int); + void (*enable)(struct perf_event *); + void (*disable)(struct perf_event *); unsigned eventsel; unsigned perfctr; u64 (*event_map)(int); @@ -845,7 +845,7 @@ void hw_perf_enable(void) set_bit(hwc->idx, cpuc->active_mask); cpuc->events[hwc->idx] = event; - x86_pmu.enable(hwc, hwc->idx); + x86_pmu.enable(event); perf_event_update_userpage(event); } cpuc->n_added = 0; @@ -858,15 +858,16 @@ void hw_perf_enable(void) x86_pmu.enable_all(); } -static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) +static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc) { - (void)checking_wrmsrl(hwc->config_base + idx, + (void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE); } -static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) +static inline void x86_pmu_disable_event(struct perf_event *event) { - (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); + struct hw_perf_event *hwc = &event->hw; + (void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config); } static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); @@ -927,11 +928,11 @@ x86_perf_event_set_period(struct perf_event *event) return ret; } -static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) +static void x86_pmu_enable_event(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (cpuc->enabled) - __x86_pmu_enable_event(hwc, idx); + __x86_pmu_enable_event(&event->hw); } /* @@ -974,13 +975,11 @@ static int x86_pmu_enable(struct perf_event *event) static int x86_pmu_start(struct perf_event *event) { - struct hw_perf_event *hwc = &event->hw; - - if (hwc->idx == -1) + if (event->hw.idx == -1) return -EAGAIN; x86_perf_event_set_period(event); - x86_pmu.enable(hwc, hwc->idx); + x86_pmu.enable(event); return 0; } @@ -994,7 +993,7 @@ static void x86_pmu_unthrottle(struct perf_event *event) cpuc->events[hwc->idx] != event)) return; - x86_pmu.enable(hwc, hwc->idx); + x86_pmu.enable(event); } void perf_event_print_debug(void) @@ -1059,7 +1058,7 @@ static void x86_pmu_stop(struct perf_event *event) * could reenable again: */ clear_bit(idx, cpuc->active_mask); - x86_pmu.disable(hwc, idx); + x86_pmu.disable(event); /* * Drain the remaining delta count out of a event @@ -1127,7 +1126,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) continue; if (perf_event_overflow(event, 1, &data, regs)) - x86_pmu.disable(hwc, idx); + x86_pmu.disable(event); } if (handled) diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index a4c9f16..a840948 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -548,9 +548,9 @@ static inline void intel_pmu_ack_status(u64 ack) } static inline void -intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx) +intel_pmu_disable_fixed(struct hw_perf_event *hwc) { - int idx = __idx - X86_PMC_IDX_FIXED; + int idx = hwc->idx - X86_PMC_IDX_FIXED; u64 ctrl_val, mask; mask = 0xfULL << (idx * 4); @@ -621,26 +621,28 @@ static void intel_pmu_drain_bts_buffer(void) } static inline void -intel_pmu_disable_event(struct hw_perf_event *hwc, int idx) +intel_pmu_disable_event(struct perf_event *event) { - if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { + struct hw_perf_event *hwc = &event->hw; + + if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { intel_pmu_disable_bts(); intel_pmu_drain_bts_buffer(); return; } if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { - intel_pmu_disable_fixed(hwc, idx); + intel_pmu_disable_fixed(hwc); return; } - x86_pmu_disable_event(hwc, idx); + x86_pmu_disable_event(event); } static inline void -intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) +intel_pmu_enable_fixed(struct hw_perf_event *hwc) { - int idx = __idx - X86_PMC_IDX_FIXED; + int idx = hwc->idx - X86_PMC_IDX_FIXED; u64 ctrl_val, bits, mask; int err; @@ -670,9 +672,11 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) err = checking_wrmsrl(hwc->config_base, ctrl_val); } -static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) +static void intel_pmu_enable_event(struct perf_event *event) { - if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { + struct hw_perf_event *hwc = &event->hw; + + if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { if (!__get_cpu_var(cpu_hw_events).enabled) return; @@ -681,11 +685,11 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) } if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { - intel_pmu_enable_fixed(hwc, idx); + intel_pmu_enable_fixed(hwc); return; } - __x86_pmu_enable_event(hwc, idx); + __x86_pmu_enable_event(hwc); } /* @@ -771,7 +775,7 @@ again: data.period = event->hw.last_period; if (perf_event_overflow(event, 1, &data, regs)) - intel_pmu_disable_event(&event->hw, bit); + intel_pmu_disable_event(event); } intel_pmu_ack_status(ack); diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c index a4e67b9..a330485 100644 --- a/arch/x86/kernel/cpu/perf_event_p6.c +++ b/arch/x86/kernel/cpu/perf_event_p6.c @@ -77,27 +77,29 @@ static void p6_pmu_enable_all(void) } static inline void -p6_pmu_disable_event(struct hw_perf_event *hwc, int idx) +p6_pmu_disable_event(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; u64 val = P6_NOP_EVENT; if (cpuc->enabled) val |= ARCH_PERFMON_EVENTSEL_ENABLE; - (void)checking_wrmsrl(hwc->config_base + idx, val); + (void)checking_wrmsrl(hwc->config_base + hwc->idx, val); } -static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx) +static void p6_pmu_enable_event(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; u64 val; val = hwc->config; if (cpuc->enabled) val |= ARCH_PERFMON_EVENTSEL_ENABLE; - (void)checking_wrmsrl(hwc->config_base + idx, val); + (void)checking_wrmsrl(hwc->config_base + hwc->idx, val); } static __initconst struct x86_pmu p6_pmu = { -- cgit v0.10.2 From 34538ee77b39a12702e0f4c3ed9e8fa2dd5eb92c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Mar 2010 21:16:55 +0100 Subject: perf, x86: Use unlocked bitops There is no concurrency on these variables, so don't use LOCK'ed ops. As to the intel_pmu_handle_irq() status bit clean, nobody uses that so remove it all together. Signed-off-by: Peter Zijlstra Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com Cc: Arnaldo Carvalho de Melo LKML-Reference: <20100304140100.240023029@chello.nl> Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 2dd704f..01b1667 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -643,7 +643,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) if (test_bit(hwc->idx, used_mask)) break; - set_bit(hwc->idx, used_mask); + __set_bit(hwc->idx, used_mask); if (assign) assign[i] = hwc->idx; } @@ -692,7 +692,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) if (j == X86_PMC_IDX_MAX) break; - set_bit(j, used_mask); + __set_bit(j, used_mask); if (assign) assign[i] = j; @@ -842,7 +842,7 @@ void hw_perf_enable(void) * clear active_mask and events[] yet it preserves * idx */ - set_bit(hwc->idx, cpuc->active_mask); + __set_bit(hwc->idx, cpuc->active_mask); cpuc->events[hwc->idx] = event; x86_pmu.enable(event); @@ -1057,7 +1057,7 @@ static void x86_pmu_stop(struct perf_event *event) * Must be done before we disable, otherwise the nmi handler * could reenable again: */ - clear_bit(idx, cpuc->active_mask); + __clear_bit(idx, cpuc->active_mask); x86_pmu.disable(event); /* diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 014528b..573458f 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -287,7 +287,7 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) * initialize all possible NB constraints */ for (i = 0; i < x86_pmu.num_events; i++) { - set_bit(i, nb->event_constraints[i].idxmsk); + __set_bit(i, nb->event_constraints[i].idxmsk); nb->event_constraints[i].weight = 1; } return nb; diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index a840948..d87421c 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -765,7 +765,6 @@ again: for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { struct perf_event *event = cpuc->events[bit]; - clear_bit(bit, (unsigned long *) &status); if (!test_bit(bit, cpuc->active_mask)) continue; -- cgit v0.10.2 From c08053e627d23490a03431285b78b7a5b617fbad Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 6 Mar 2010 13:19:24 +0100 Subject: perf, x86: Fix x86_pmu_start pmu::start should undo pmu::stop, make it so. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com LKML-Reference: Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 01b1667..9757b96 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -785,6 +785,7 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc, hwc->last_tag == cpuc->tags[i]; } +static int x86_pmu_start(struct perf_event *event); static void x86_pmu_stop(struct perf_event *event); void hw_perf_enable(void) @@ -833,20 +834,10 @@ void hw_perf_enable(void) event = cpuc->event_list[i]; hwc = &event->hw; - if (hwc->idx == -1) { + if (hwc->idx == -1) x86_assign_hw_event(event, cpuc, i); - x86_perf_event_set_period(event); - } - /* - * need to mark as active because x86_pmu_disable() - * clear active_mask and events[] yet it preserves - * idx - */ - __set_bit(hwc->idx, cpuc->active_mask); - cpuc->events[hwc->idx] = event; - x86_pmu.enable(event); - perf_event_update_userpage(event); + x86_pmu_start(event); } cpuc->n_added = 0; perf_events_lapic_init(); @@ -975,11 +966,17 @@ static int x86_pmu_enable(struct perf_event *event) static int x86_pmu_start(struct perf_event *event) { - if (event->hw.idx == -1) + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int idx = event->hw.idx; + + if (idx == -1) return -EAGAIN; x86_perf_event_set_period(event); + cpuc->events[idx] = event; + __set_bit(idx, cpuc->active_mask); x86_pmu.enable(event); + perf_event_update_userpage(event); return 0; } -- cgit v0.10.2 From 71e2d2828046133ed985696a02e2e1499ca0bfb8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 8 Mar 2010 17:51:33 +0100 Subject: perf, x86: Avoid double disable on throttle vs ioctl(PERF_IOC_DISABLE) Calling ioctl(PERF_EVENT_IOC_DISABLE) on a thottled counter would result in a double disable, cure this by using x86_pmu_{start,stop} for throttle/unthrottle and teach x86_pmu_stop() to check ->active_mask. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com LKML-Reference: Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 9757b96..b68c4fb 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -983,14 +983,8 @@ static int x86_pmu_start(struct perf_event *event) static void x86_pmu_unthrottle(struct perf_event *event) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - struct hw_perf_event *hwc = &event->hw; - - if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX || - cpuc->events[hwc->idx] != event)) - return; - - x86_pmu.enable(event); + int ret = x86_pmu_start(event); + WARN_ON_ONCE(ret); } void perf_event_print_debug(void) @@ -1050,11 +1044,9 @@ static void x86_pmu_stop(struct perf_event *event) struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; - /* - * Must be done before we disable, otherwise the nmi handler - * could reenable again: - */ - __clear_bit(idx, cpuc->active_mask); + if (!__test_and_clear_bit(idx, cpuc->active_mask)) + return; + x86_pmu.disable(event); /* @@ -1123,7 +1115,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) continue; if (perf_event_overflow(event, 1, &data, regs)) - x86_pmu.disable(event); + x86_pmu_stop(event); } if (handled) diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index d87421c..84bfde6 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -774,7 +774,7 @@ again: data.period = event->hw.last_period; if (perf_event_overflow(event, 1, &data, regs)) - intel_pmu_disable_event(event); + x86_pmu_stop(event); } intel_pmu_ack_status(ack); -- cgit v0.10.2 From 356e1f2e0ace2d4b100c8eda9d49b709e8323da5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 6 Mar 2010 13:49:56 +0100 Subject: perf, x86: Properly account n_added Make sure n_added is properly accounted so that we can rely on the value to reflect the number of added counters. This is needed if its going to be used for more than a boolean check. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com LKML-Reference: Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index b68c4fb..071c840 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -959,7 +959,7 @@ static int x86_pmu_enable(struct perf_event *event) memcpy(cpuc->assign, assign, n*sizeof(int)); cpuc->n_events = n; - cpuc->n_added = n - n0; + cpuc->n_added += n - n0; return 0; } @@ -1302,7 +1302,7 @@ int hw_perf_group_sched_in(struct perf_event *leader, memcpy(cpuc->assign, assign, n0*sizeof(int)); cpuc->n_events = n0; - cpuc->n_added = n1; + cpuc->n_added += n1; ctx->nr_active += n1; /* -- cgit v0.10.2 From 19925ce778f9fc371b9607625de3bff04c60121e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 6 Mar 2010 13:20:40 +0100 Subject: perf, x86: Fix double disable calls hw_perf_enable() would disable events that were not yet enabled. This causes problems with code that assumes that ->enable/->disable calls are balanced (like the LBR code does). What happens is that we disable newly added counters that match their previous assignment, even though they are not yet programmed on the hardware. Avoid this by only doing the first pass over the existing events. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com LKML-Reference: Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 071c840..045cc0b 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -802,6 +802,7 @@ void hw_perf_enable(void) return; if (cpuc->n_added) { + int n_running = cpuc->n_events - cpuc->n_added; /* * apply assignment obtained either from * hw_perf_group_sched_in() or x86_pmu_enable() @@ -809,7 +810,7 @@ void hw_perf_enable(void) * step1: save events moving to new counters * step2: reprogram moved events into new counters */ - for (i = 0; i < cpuc->n_events; i++) { + for (i = 0; i < n_running; i++) { event = cpuc->event_list[i]; hwc = &event->hw; -- cgit v0.10.2 From f3d46b2e6fa57547f9884330798792afc83f4b04 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 6 Mar 2010 13:24:58 +0100 Subject: perf, x86: Fix double enable calls hw_perf_enable() would enable already enabled events. This causes problems with code that assumes that ->enable/->disable calls are balanced (like the LBR code does). What happens is that events that were already running and left in place would get enabled again. Avoid this by only enabling new events that match their previous assignment. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com LKML-Reference: Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 045cc0b..1d665a0b 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -835,6 +835,10 @@ void hw_perf_enable(void) event = cpuc->event_list[i]; hwc = &event->hw; + if (i < n_running && + match_prev_assignment(hwc, cpuc, i)) + continue; + if (hwc->idx == -1) x86_assign_hw_event(event, cpuc, i); -- cgit v0.10.2 From d4944a06666054707d23e11888e480af239e5abf Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 8 Mar 2010 13:51:20 +0100 Subject: perf: Provide better condition for event rotation Try to avoid useless rotation and PMU disables. [ Could be improved by keeping a nr_runnable count to better account for the < PERF_STAT_INACTIVE counters ] Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com LKML-Reference: Signed-off-by: Ingo Molnar diff --git a/kernel/perf_event.c b/kernel/perf_event.c index d810846..52c69a3 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1524,12 +1524,15 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) */ if (interrupts == MAX_INTERRUPTS) { perf_log_throttle(event, 1); + perf_disable(); event->pmu->unthrottle(event); + perf_enable(); } if (!event->attr.freq || !event->attr.sample_freq) continue; + perf_disable(); event->pmu->read(event); now = atomic64_read(&event->count); delta = now - hwc->freq_count_stamp; @@ -1537,6 +1540,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) if (delta > 0) perf_adjust_period(event, TICK_NSEC, delta); + perf_enable(); } raw_spin_unlock(&ctx->lock); } @@ -1546,9 +1550,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) */ static void rotate_ctx(struct perf_event_context *ctx) { - if (!ctx->nr_events) - return; - raw_spin_lock(&ctx->lock); /* Rotate the first entry last of non-pinned groups */ @@ -1561,19 +1562,28 @@ void perf_event_task_tick(struct task_struct *curr) { struct perf_cpu_context *cpuctx; struct perf_event_context *ctx; + int rotate = 0; if (!atomic_read(&nr_events)) return; cpuctx = &__get_cpu_var(perf_cpu_context); - ctx = curr->perf_event_ctxp; + if (cpuctx->ctx.nr_events && + cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) + rotate = 1; - perf_disable(); + ctx = curr->perf_event_ctxp; + if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active) + rotate = 1; perf_ctx_adjust_freq(&cpuctx->ctx); if (ctx) perf_ctx_adjust_freq(ctx); + if (!rotate) + return; + + perf_disable(); cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); if (ctx) task_ctx_sched_out(ctx, EVENT_FLEXIBLE); @@ -1585,7 +1595,6 @@ void perf_event_task_tick(struct task_struct *curr) cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); if (ctx) task_ctx_sched_in(curr, EVENT_FLEXIBLE); - perf_enable(); } -- cgit v0.10.2 From 1224550969e0bf18785786a1a9f801cd86d68586 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 5 Mar 2010 11:54:02 -0300 Subject: perf tools: Don't trow away old map slices not overlapped by new maps MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Arnaldo Carvalho de Melo Cc: David S. Miller Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1267800842-22324-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 21b9216..9024fa1 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -183,8 +183,8 @@ struct thread *perf_session__findnew(struct perf_session *self, pid_t pid) return th; } -static void map_groups__remove_overlappings(struct map_groups *self, - struct map *map) +static int map_groups__fixup_overlappings(struct map_groups *self, + struct map *map) { struct rb_root *root = &self->maps[map->type]; struct rb_node *next = rb_first(root); @@ -209,7 +209,36 @@ static void map_groups__remove_overlappings(struct map_groups *self, * list. */ list_add_tail(&pos->node, &self->removed_maps[map->type]); + /* + * Now check if we need to create new maps for areas not + * overlapped by the new map: + */ + if (map->start > pos->start) { + struct map *before = map__clone(pos); + + if (before == NULL) + return -ENOMEM; + + before->end = map->start - 1; + map_groups__insert(self, before); + if (verbose >= 2) + map__fprintf(before, stderr); + } + + if (map->end < pos->end) { + struct map *after = map__clone(pos); + + if (after == NULL) + return -ENOMEM; + + after->start = map->end + 1; + map_groups__insert(self, after); + if (verbose >= 2) + map__fprintf(after, stderr); + } } + + return 0; } void maps__insert(struct rb_root *maps, struct map *map) @@ -254,7 +283,7 @@ struct map *maps__find(struct rb_root *maps, u64 ip) void thread__insert_map(struct thread *self, struct map *map) { - map_groups__remove_overlappings(&self->mg, map); + map_groups__fixup_overlappings(&self->mg, map); map_groups__insert(&self->mg, map); } -- cgit v0.10.2 From accd3cc45a0e1d11090ea66888405987de77bdca Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 5 Mar 2010 12:51:04 -0300 Subject: perf probe: Add missing variable initialization MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit cc1: warnings being treated as errors util/probe-finder.c: In function 'find_line_range': util/probe-finder.c:172: warning: 'src' may be used uninitialized in this function make: *** [util/probe-finder.o] Error 1 Signed-off-by: Arnaldo Carvalho de Melo Acked-by: Masami Hiramatsu Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1267804269-22660-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index e77dc88..1e6c65e 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -169,7 +169,7 @@ static const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname) { Dwarf_Files *files; size_t nfiles, i; - const char *src; + const char *src = NULL; int ret; if (!fname) -- cgit v0.10.2 From 8907fd607b66e36636469a2de9833db643869db8 Mon Sep 17 00:00:00 2001 From: Eric B Munson Date: Fri, 5 Mar 2010 12:51:05 -0300 Subject: perf record: Add ID and to recorded event data when recording multiple events Currently perf record does not write the ID or the to disk for events. This doesn't allow report to tell if an event stream contains one or more types of events. This patch adds this entry to the list of data that record will write to disk if more than one event was requested. Signed-off-by: Eric B Munson Signed-off-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1267804269-22660-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 771533c..f573bbb 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -244,6 +244,9 @@ static void create_counter(int counter, int cpu, pid_t pid) attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID; + if (nr_counters > 1) + attr->sample_type |= PERF_SAMPLE_ID; + if (freq) { attr->sample_type |= PERF_SAMPLE_PERIOD; attr->freq = 1; -- cgit v0.10.2 From d403d0acc9c5afa679a3f61e71489530d7fa0606 Mon Sep 17 00:00:00 2001 From: Eric B Munson Date: Fri, 5 Mar 2010 12:51:06 -0300 Subject: perf session: Change add_hist_entry to take the tree root instead of session In order to minimize the impact of storing multiple events in a report this function will now take the root of the histogram tree so that the logic for selecting the proper tree can be inserted before the call. Signed-off-by: Eric B Munson Signed-off-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1267804269-22660-3-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 5ec5de9..4b734c7 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -116,7 +116,7 @@ static int perf_session__add_hist_entry(struct perf_session *self, return 0; } - he = __perf_session__add_hist_entry(self, al, NULL, count, &hit); + he = __perf_session__add_hist_entry(&self->hists, al, NULL, count, &hit); if (he == NULL) return -ENOMEM; diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 18b3f50..20df735 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -26,7 +26,8 @@ static int perf_session__add_hist_entry(struct perf_session *self, struct addr_location *al, u64 count) { bool hit; - struct hist_entry *he = __perf_session__add_hist_entry(self, al, NULL, + struct hist_entry *he = __perf_session__add_hist_entry(&self->hists, + al, NULL, count, &hit); if (he == NULL) return -ENOMEM; diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index cfc655d..cd16e6a 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -56,7 +56,8 @@ static int perf_session__add_hist_entry(struct perf_session *self, if ((sort__has_parent || symbol_conf.use_callchain) && chain) syms = perf_session__resolve_callchain(self, al->thread, chain, &parent); - he = __perf_session__add_hist_entry(self, al, parent, count, &hit); + he = __perf_session__add_hist_entry(&self->hists, al, parent, + count, &hit); if (he == NULL) return -ENOMEM; diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index e8daf5c..55dd911 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -12,12 +12,12 @@ struct callchain_param callchain_param = { * histogram, sorted on item, collects counts */ -struct hist_entry *__perf_session__add_hist_entry(struct perf_session *self, +struct hist_entry *__perf_session__add_hist_entry(struct rb_root *hists, struct addr_location *al, struct symbol *sym_parent, u64 count, bool *hit) { - struct rb_node **p = &self->hists.rb_node; + struct rb_node **p = &hists->rb_node; struct rb_node *parent = NULL; struct hist_entry *he; struct hist_entry entry = { @@ -53,7 +53,7 @@ struct hist_entry *__perf_session__add_hist_entry(struct perf_session *self, return NULL; *he = entry; rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &self->hists); + rb_insert_color(&he->rb_node, hists); *hit = false; return he; } diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index e5f99b2..7b48590 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -10,8 +10,9 @@ struct perf_session; struct hist_entry; struct addr_location; struct symbol; +struct rb_root; -struct hist_entry *__perf_session__add_hist_entry(struct perf_session *self, +struct hist_entry *__perf_session__add_hist_entry(struct rb_root *hists, struct addr_location *al, struct symbol *parent, u64 count, bool *hit); -- cgit v0.10.2 From cb8f09393646c5058056db771583c86e0ed1d92f Mon Sep 17 00:00:00 2001 From: Eric B Munson Date: Fri, 5 Mar 2010 12:51:07 -0300 Subject: perf session: Add storage for seperating event types in report This patch adds the structures necessary to count each event type independently in perf report. Signed-off-by: Eric B Munson Signed-off-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1267804269-22660-4-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 50a7132..a33b949 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -99,6 +99,15 @@ struct events_stats { u64 lost; }; +struct event_stat_id { + struct rb_node rb_node; + struct rb_root hists; + struct events_stats stats; + u64 config; + u64 event_stream; + u32 type; +}; + void event__print_totals(void); struct perf_session; diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 0de7258..eed1cb8 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -70,6 +70,7 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc memcpy(self->filename, filename, len); self->threads = RB_ROOT; + self->stats_by_id = RB_ROOT; self->last_match = NULL; self->mmap_window = 32; self->cwd = NULL; diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 31950fc..5c33417 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -20,6 +20,7 @@ struct perf_session { struct thread *last_match; struct map *vmlinux_maps[MAP__NR_TYPES]; struct events_stats events_stats; + struct rb_root stats_by_id; unsigned long event_total[PERF_RECORD_MAX]; unsigned long unknown_events; struct rb_root hists; -- cgit v0.10.2 From eefc465cdd49cb89a742083fac2807c718ddad31 Mon Sep 17 00:00:00 2001 From: Eric B Munson Date: Fri, 5 Mar 2010 12:51:08 -0300 Subject: perf session: Change perf_session post processing functions to take histogram tree Now that report can store historgrams for multiple events we need to be able to do the post processing work for each histogram. This patch changes the post processing functions so that they can be called individually for each event's histogram. Signed-off-by: Eric B Munson [ Guarantee bisectabilty by fixing up builtin-report.c ] Signed-off-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1267804269-22660-5-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 4b734c7..6ad7148 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -564,8 +564,8 @@ static int __cmd_annotate(void) if (verbose > 2) dsos__fprintf(stdout); - perf_session__collapse_resort(session); - perf_session__output_resort(session, session->event_total[0]); + perf_session__collapse_resort(&session->hists); + perf_session__output_resort(&session->hists, session->event_total[0]); perf_session__find_annotations(session); out_delete: perf_session__delete(session); diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 20df735..1ea15d8 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -115,7 +115,7 @@ static void perf_session__resort_hist_entries(struct perf_session *self) static void perf_session__set_hist_entries_positions(struct perf_session *self) { - perf_session__output_resort(self, self->events_stats.total); + perf_session__output_resort(&self->hists, self->events_stats.total); perf_session__resort_hist_entries(self); } @@ -167,13 +167,15 @@ static int __cmd_diff(void) goto out_delete; } - perf_session__output_resort(session[1], session[1]->events_stats.total); + perf_session__output_resort(&session[1]->hists, + session[1]->events_stats.total); if (show_displacement) perf_session__set_hist_entries_positions(session[0]); perf_session__match_hists(session[0], session[1]); - perf_session__fprintf_hists(session[1], session[0], - show_displacement, stdout); + perf_session__fprintf_hists(&session[1]->hists, session[0], + show_displacement, stdout, + session[1]->events_stats.total); out_delete: for (i = 0; i < 2; ++i) perf_session__delete(session[i]); diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index cd16e6a..294b4cf 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -225,10 +225,12 @@ static int __cmd_report(void) if (verbose > 2) dsos__fprintf(stdout); - perf_session__collapse_resort(session); - perf_session__output_resort(session, session->events_stats.total); + perf_session__collapse_resort(&session->hists); + perf_session__output_resort(&session->hists, + session->events_stats.total); fprintf(stdout, "# Samples: %Ld\n#\n", session->events_stats.total); - perf_session__fprintf_hists(session, NULL, false, stdout); + perf_session__fprintf_hists(&session->hists, NULL, false, stdout, + session->events_stats.total); if (sort_order == default_sort_order && parent_pattern == default_parent_pattern) fprintf(stdout, "#\n# (For a higher level overview, try: perf report --sort comm,dso)\n#\n"); diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 55dd911..73ebb6f 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -130,7 +130,7 @@ static void collapse__insert_entry(struct rb_root *root, struct hist_entry *he) rb_insert_color(&he->rb_node, root); } -void perf_session__collapse_resort(struct perf_session *self) +void perf_session__collapse_resort(struct rb_root *hists) { struct rb_root tmp; struct rb_node *next; @@ -140,17 +140,17 @@ void perf_session__collapse_resort(struct perf_session *self) return; tmp = RB_ROOT; - next = rb_first(&self->hists); + next = rb_first(hists); while (next) { n = rb_entry(next, struct hist_entry, rb_node); next = rb_next(&n->rb_node); - rb_erase(&n->rb_node, &self->hists); + rb_erase(&n->rb_node, hists); collapse__insert_entry(&tmp, n); } - self->hists = tmp; + *hists = tmp; } /* @@ -183,7 +183,7 @@ static void perf_session__insert_output_hist_entry(struct rb_root *root, rb_insert_color(&he->rb_node, root); } -void perf_session__output_resort(struct perf_session *self, u64 total_samples) +void perf_session__output_resort(struct rb_root *hists, u64 total_samples) { struct rb_root tmp; struct rb_node *next; @@ -194,18 +194,18 @@ void perf_session__output_resort(struct perf_session *self, u64 total_samples) total_samples * (callchain_param.min_percent / 100); tmp = RB_ROOT; - next = rb_first(&self->hists); + next = rb_first(hists); while (next) { n = rb_entry(next, struct hist_entry, rb_node); next = rb_next(&n->rb_node); - rb_erase(&n->rb_node, &self->hists); + rb_erase(&n->rb_node, hists); perf_session__insert_output_hist_entry(&tmp, n, min_callchain_hits); } - self->hists = tmp; + *hists = tmp; } static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) @@ -456,10 +456,10 @@ static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self, } static size_t hist_entry__fprintf(struct hist_entry *self, - struct perf_session *session, struct perf_session *pair_session, bool show_displacement, - long displacement, FILE *fp) + long displacement, FILE *fp, + u64 session_total) { struct sort_entry *se; u64 count, total; @@ -474,7 +474,7 @@ static size_t hist_entry__fprintf(struct hist_entry *self, total = pair_session->events_stats.total; } else { count = self->count; - total = session->events_stats.total; + total = session_total; } if (total) @@ -496,8 +496,8 @@ static size_t hist_entry__fprintf(struct hist_entry *self, if (total > 0) old_percent = (count * 100.0) / total; - if (session->events_stats.total > 0) - new_percent = (self->count * 100.0) / session->events_stats.total; + if (session_total > 0) + new_percent = (self->count * 100.0) / session_total; diff = new_percent - old_percent; @@ -544,16 +544,17 @@ static size_t hist_entry__fprintf(struct hist_entry *self, left_margin -= thread__comm_len(self->thread); } - hist_entry_callchain__fprintf(fp, self, session->events_stats.total, + hist_entry_callchain__fprintf(fp, self, session_total, left_margin); } return ret; } -size_t perf_session__fprintf_hists(struct perf_session *self, +size_t perf_session__fprintf_hists(struct rb_root *hists, struct perf_session *pair, - bool show_displacement, FILE *fp) + bool show_displacement, FILE *fp, + u64 session_total) { struct sort_entry *se; struct rb_node *nd; @@ -641,7 +642,7 @@ size_t perf_session__fprintf_hists(struct perf_session *self, fprintf(fp, "\n#\n"); print_entries: - for (nd = rb_first(&self->hists); nd; nd = rb_next(nd)) { + for (nd = rb_first(hists); nd; nd = rb_next(nd)) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); if (show_displacement) { @@ -652,8 +653,8 @@ print_entries: displacement = 0; ++position; } - ret += hist_entry__fprintf(h, self, pair, show_displacement, - displacement, fp); + ret += hist_entry__fprintf(h, pair, show_displacement, + displacement, fp, session_total); } free(rem_sq_bracket); diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 7b48590..16f360c 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -20,9 +20,10 @@ extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *); extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *); void hist_entry__free(struct hist_entry *); -void perf_session__output_resort(struct perf_session *self, u64 total_samples); -void perf_session__collapse_resort(struct perf_session *self); -size_t perf_session__fprintf_hists(struct perf_session *self, +void perf_session__output_resort(struct rb_root *hists, u64 total_samples); +void perf_session__collapse_resort(struct rb_root *hists); +size_t perf_session__fprintf_hists(struct rb_root *hists, struct perf_session *pair, - bool show_displacement, FILE *fp); + bool show_displacement, FILE *fp, + u64 session_total); #endif /* __PERF_HIST_H */ -- cgit v0.10.2 From cbbc79a53278b83bf7f834127751459f9299e402 Mon Sep 17 00:00:00 2001 From: Eric B Munson Date: Fri, 5 Mar 2010 12:51:09 -0300 Subject: perf report: Add multiple event support Perf report does not handle multiple events being reported, even though perf record stores them properly on disk. This patch addresses that issue by adding the logic to perf report to use the event stream id that is saved by record and the new data structures to seperate the event streams and report them individually. Signed-off-by: Eric B Munson Signed-off-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1267804269-22660-6-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 294b4cf..f815de2 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -45,29 +45,71 @@ static char *pretty_printing_style = default_pretty_printing_style; static char callchain_default_opt[] = "fractal,0.5"; +static struct event_stat_id *get_stats(struct perf_session *self, + u64 event_stream, u32 type, u64 config) +{ + struct rb_node **p = &self->stats_by_id.rb_node; + struct rb_node *parent = NULL; + struct event_stat_id *iter, *new; + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct event_stat_id, rb_node); + if (iter->config == config) + return iter; + + + if (config > iter->config) + p = &(*p)->rb_right; + else + p = &(*p)->rb_left; + } + + new = malloc(sizeof(struct event_stat_id)); + if (new == NULL) + return NULL; + memset(new, 0, sizeof(struct event_stat_id)); + new->event_stream = event_stream; + new->config = config; + new->type = type; + rb_link_node(&new->rb_node, parent, p); + rb_insert_color(&new->rb_node, &self->stats_by_id); + return new; +} + static int perf_session__add_hist_entry(struct perf_session *self, struct addr_location *al, - struct ip_callchain *chain, u64 count) + struct sample_data *data) { struct symbol **syms = NULL, *parent = NULL; bool hit; struct hist_entry *he; + struct event_stat_id *stats; + struct perf_event_attr *attr; - if ((sort__has_parent || symbol_conf.use_callchain) && chain) + if ((sort__has_parent || symbol_conf.use_callchain) && data->callchain) syms = perf_session__resolve_callchain(self, al->thread, - chain, &parent); - he = __perf_session__add_hist_entry(&self->hists, al, parent, - count, &hit); + data->callchain, &parent); + + attr = perf_header__find_attr(data->id, &self->header); + if (attr) + stats = get_stats(self, data->id, attr->type, attr->config); + else + stats = get_stats(self, data->id, 0, 0); + if (stats == NULL) + return -ENOMEM; + he = __perf_session__add_hist_entry(&stats->hists, al, parent, + data->period, &hit); if (he == NULL) return -ENOMEM; if (hit) - he->count += count; + he->count += data->period; if (symbol_conf.use_callchain) { if (!hit) callchain_init(&he->callchain); - append_chain(&he->callchain, chain, syms); + append_chain(&he->callchain, data->callchain, syms); free(syms); } @@ -87,10 +129,30 @@ static int validate_chain(struct ip_callchain *chain, event_t *event) return 0; } +static int add_event_total(struct perf_session *session, + struct sample_data *data, + struct perf_event_attr *attr) +{ + struct event_stat_id *stats; + + if (attr) + stats = get_stats(session, data->id, attr->type, attr->config); + else + stats = get_stats(session, data->id, 0, 0); + + if (!stats) + return -ENOMEM; + + stats->stats.total += data->period; + session->events_stats.total += data->period; + return 0; +} + static int process_sample_event(event_t *event, struct perf_session *session) { struct sample_data data = { .period = 1, }; struct addr_location al; + struct perf_event_attr *attr; event__parse_sample(event, session->sample_type, &data); @@ -124,12 +186,18 @@ static int process_sample_event(event_t *event, struct perf_session *session) if (al.filtered || (hide_unresolved && al.sym == NULL)) return 0; - if (perf_session__add_hist_entry(session, &al, data.callchain, data.period)) { + if (perf_session__add_hist_entry(session, &al, &data)) { pr_debug("problem incrementing symbol count, skipping event\n"); return -1; } - session->events_stats.total += data.period; + attr = perf_header__find_attr(data.id, &session->header); + + if (add_event_total(session, &data, attr)) { + pr_debug("problem adding event count\n"); + return -1; + } + return 0; } @@ -198,6 +266,7 @@ static int __cmd_report(void) { int ret = -EINVAL; struct perf_session *session; + struct rb_node *next; session = perf_session__new(input_name, O_RDONLY, force); if (session == NULL) @@ -225,12 +294,28 @@ static int __cmd_report(void) if (verbose > 2) dsos__fprintf(stdout); - perf_session__collapse_resort(&session->hists); - perf_session__output_resort(&session->hists, - session->events_stats.total); - fprintf(stdout, "# Samples: %Ld\n#\n", session->events_stats.total); - perf_session__fprintf_hists(&session->hists, NULL, false, stdout, - session->events_stats.total); + next = rb_first(&session->stats_by_id); + while (next) { + struct event_stat_id *stats; + + stats = rb_entry(next, struct event_stat_id, rb_node); + perf_session__collapse_resort(&stats->hists); + perf_session__output_resort(&stats->hists, stats->stats.total); + if (rb_first(&session->stats_by_id) == + rb_last(&session->stats_by_id)) + fprintf(stdout, "# Samples: %Ld\n#\n", + stats->stats.total); + else + fprintf(stdout, "# Samples: %Ld %s\n#\n", + stats->stats.total, + __event_name(stats->type, stats->config)); + + perf_session__fprintf_hists(&stats->hists, NULL, false, stdout, + stats->stats.total); + fprintf(stdout, "\n\n"); + next = rb_next(&stats->rb_node); + } + if (sort_order == default_sort_order && parent_pattern == default_parent_pattern) fprintf(stdout, "#\n# (For a higher level overview, try: perf report --sort comm,dso)\n#\n"); -- cgit v0.10.2 From 65f2ed2b2fa6034ef9890b60c8fd39fbe76b9d37 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 9 Mar 2010 15:58:17 -0300 Subject: perf report: Print the map table just after samples for which no map was found MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If -vv is used just the map table will be printed, -vvv will print the symbol table too, with it we can see that we have a bug where some samples are not being resolved to a map when we get them in the perf.data stream, but after we have it all processed, we can find the right map, some reordering probably is happening. Upcoming patches will provide ways to ask for most PERF_SAMPLE_ conditional samples to be taken for !PERF_RECORD_SAMPLE events too, then we'll be able to ask for PERF_SAMPLE_TIME and PERF_SAMPLE_CPU to help diagnose this. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1268161097-17761-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 73ebb6f..bdcfd61 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -655,6 +655,11 @@ print_entries: } ret += hist_entry__fprintf(h, pair, show_displacement, displacement, fp, session_total); + if (h->map == NULL && verbose > 1) { + __map_groups__fprintf_maps(&h->thread->mg, + MAP__FUNCTION, fp); + fprintf(fp, "%.10s end\n", graph_dotted_line); + } } free(rem_sq_bracket); diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 9024fa1..fa96831 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -79,8 +79,8 @@ int thread__comm_len(struct thread *self) return self->comm_len; } -static size_t __map_groups__fprintf_maps(struct map_groups *self, - enum map_type type, FILE *fp) +size_t __map_groups__fprintf_maps(struct map_groups *self, + enum map_type type, FILE *fp) { size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); struct rb_node *nd; @@ -89,7 +89,7 @@ static size_t __map_groups__fprintf_maps(struct map_groups *self, struct map *pos = rb_entry(nd, struct map, rb_node); printed += fprintf(fp, "Map:"); printed += map__fprintf(pos, fp); - if (verbose > 1) { + if (verbose > 2) { printed += dso__fprintf(pos->dso, type, fp); printed += fprintf(fp, "--\n"); } diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 0a28f39..dcf7030 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -10,6 +10,9 @@ struct map_groups { struct list_head removed_maps[MAP__NR_TYPES]; }; +size_t __map_groups__fprintf_maps(struct map_groups *self, + enum map_type type, FILE *fp); + struct thread { struct rb_node rb_node; struct map_groups mg; -- cgit v0.10.2 From db2c4c7791cd04512093d05afc693c3511a65fd7 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 2 Feb 2010 23:34:40 +0100 Subject: lockdep: Move lock events under lockdep recursion protection There are rcu locked read side areas in the path where we submit a trace event. And these rcu_read_(un)lock() trigger lock events, which create recursive events. One pair in do_perf_sw_event: __lock_acquire | |--96.11%-- lock_acquire | | | |--27.21%-- do_perf_sw_event | | perf_tp_event | | | | | |--49.62%-- ftrace_profile_lock_release | | | lock_release | | | | | | | |--33.85%-- _raw_spin_unlock Another pair in perf_output_begin/end: __lock_acquire |--23.40%-- perf_output_begin | | __perf_event_overflow | | perf_swevent_overflow | | perf_swevent_add | | perf_swevent_ctx_event | | do_perf_sw_event | | perf_tp_event | | | | | |--55.37%-- ftrace_profile_lock_acquire | | | lock_acquire | | | | | | | |--37.31%-- _raw_spin_lock The problem is not that much the trace recursion itself, as we have a recursion protection already (though it's always wasteful to recurse). But the trace events are outside the lockdep recursion protection, then each lockdep event triggers a lock trace, which will trigger two other lockdep events. Here the recursive lock trace event won't be taken because of the trace recursion, so the recursion stops there but lockdep will still analyse these new events: To sum up, for each lockdep events we have: lock_*() | trace lock_acquire | ----- rcu_read_lock() | | | lock_acquire() | | | trace_lock_acquire() (stopped) | | | lockdep analyze | ----- rcu_read_unlock() | lock_release | trace_lock_release() (stopped) | lockdep analyze And you can repeat the above two times as we have two rcu read side sections when we submit an event. This is fixed in this patch by moving the lock trace event under the lockdep recursion protection. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Paul Mackerras Cc: Hitoshi Mitake Cc: Li Zefan Cc: Lai Jiangshan Cc: Masami Hiramatsu Cc: Jens Axboe diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 0c30d04..65b5f5b 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -3211,8 +3211,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, { unsigned long flags; - trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); - if (unlikely(current->lockdep_recursion)) return; @@ -3220,6 +3218,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, check_flags(flags); current->lockdep_recursion = 1; + trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); __lock_acquire(lock, subclass, trylock, read, check, irqs_disabled_flags(flags), nest_lock, ip, 0); current->lockdep_recursion = 0; @@ -3232,14 +3231,13 @@ void lock_release(struct lockdep_map *lock, int nested, { unsigned long flags; - trace_lock_release(lock, nested, ip); - if (unlikely(current->lockdep_recursion)) return; raw_local_irq_save(flags); check_flags(flags); current->lockdep_recursion = 1; + trace_lock_release(lock, nested, ip); __lock_release(lock, nested, ip); current->lockdep_recursion = 0; raw_local_irq_restore(flags); @@ -3413,8 +3411,6 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) { unsigned long flags; - trace_lock_contended(lock, ip); - if (unlikely(!lock_stat)) return; @@ -3424,6 +3420,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) raw_local_irq_save(flags); check_flags(flags); current->lockdep_recursion = 1; + trace_lock_contended(lock, ip); __lock_contended(lock, ip); current->lockdep_recursion = 0; raw_local_irq_restore(flags); -- cgit v0.10.2 From 61e67fb9d3ed13e6a7f58652ae4979b9c872fa57 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 3 Mar 2010 07:38:37 +0100 Subject: perf/x86-64: Use frame pointer to walk on irq and process stacks We were using the frame pointer based stack walker on every contexts in x86-32, but not in x86-64 where we only use the seven-league boots on the exception stacks. Use it also on irq and process stacks. This utterly accelerate the captures. Signed-off-by: Frederic Weisbecker Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index d5e2a2e..272c9f1 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -208,7 +208,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, if (in_irq_stack(stack, irq_stack, irq_stack_end)) { if (ops->stack(data, "IRQ") < 0) break; - bp = print_context_stack(tinfo, stack, bp, + bp = ops->walk_stack(tinfo, stack, bp, ops, data, irq_stack_end, &graph); /* * We link to the next stack (which would be @@ -229,7 +229,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, /* * This handles the process stack: */ - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph); + bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph); put_cpu(); } EXPORT_SYMBOL(dump_trace); -- cgit v0.10.2 From 5331d7b84613b8325362dde53dc2bff2fb87d351 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 4 Mar 2010 21:15:56 +0100 Subject: perf: Introduce new perf_fetch_caller_regs() for hot regs snapshot Events that trigger overflows by interrupting a context can use get_irq_regs() or task_pt_regs() to retrieve the state when the event triggered. But this is not the case for some other class of events like trace events as tracepoints are executed in the same context than the code that triggered the event. It means we need a different api to capture the regs there, namely we need a hot snapshot to get the most important informations for perf: the instruction pointer to get the event origin, the frame pointer for the callchain, the code segment for user_mode() tests (we always use __KERNEL_CS as trace events always occur from the kernel) and the eflags for further purposes. v2: rename perf_save_regs to perf_fetch_caller_regs as per Masami's suggestion. Signed-off-by: Frederic Weisbecker Cc: Ingo Molnar Cc: Thomas Gleixner Cc: H. Peter Anvin Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt Cc: Arnaldo Carvalho de Melo Cc: Masami Hiramatsu Cc: Jason Baron Cc: Archs diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 1d665a0b..c6bde7d 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1707,3 +1707,15 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) return entry; } + +void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) +{ + regs->ip = ip; + /* + * perf_arch_fetch_caller_regs adds another call, we need to increment + * the skip level + */ + regs->bp = rewind_frame_pointer(skip + 1); + regs->cs = __KERNEL_CS; + local_save_flags(regs->flags); +} diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h index 4fd1420..29e5f7c 100644 --- a/arch/x86/kernel/dumpstack.h +++ b/arch/x86/kernel/dumpstack.h @@ -29,4 +29,19 @@ struct stack_frame { struct stack_frame *next_frame; unsigned long return_address; }; + +static inline unsigned long rewind_frame_pointer(int n) +{ + struct stack_frame *frame; + + get_bp(frame); + +#ifdef CONFIG_FRAME_POINTER + while (n--) + frame = frame->next_frame; #endif + + return (unsigned long)frame; +} + +#endif /* DUMPSTACK_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 80acbf3..70cffd0 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -452,6 +452,7 @@ enum perf_callchain_context { #include #include #include +#include #include #define PERF_MAX_STACK_DEPTH 255 @@ -847,6 +848,44 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) __perf_sw_event(event_id, nr, nmi, regs, addr); } +extern void +perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); + +/* + * Take a snapshot of the regs. Skip ip and frame pointer to + * the nth caller. We only need a few of the regs: + * - ip for PERF_SAMPLE_IP + * - cs for user_mode() tests + * - bp for callchains + * - eflags, for future purposes, just in case + */ +static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip) +{ + unsigned long ip; + + memset(regs, 0, sizeof(*regs)); + + switch (skip) { + case 1 : + ip = CALLER_ADDR0; + break; + case 2 : + ip = CALLER_ADDR1; + break; + case 3 : + ip = CALLER_ADDR2; + break; + case 4: + ip = CALLER_ADDR3; + break; + /* No need to support further for now */ + default: + ip = 0; + } + + return perf_arch_fetch_caller_regs(regs, ip, skip); +} + extern void __perf_event_mmap(struct vm_area_struct *vma); static inline void perf_event_mmap(struct vm_area_struct *vma) @@ -880,7 +919,8 @@ static inline bool perf_paranoid_kernel(void) } extern void perf_event_init(void); -extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size); +extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, + int entry_size, struct pt_regs *regs); extern void perf_bp_event(struct perf_event *event, void *data); #ifndef perf_misc_flags diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 52c69a3..359d7f6 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2786,6 +2786,11 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) return NULL; } +__weak +void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) +{ +} + /* * Output */ -- cgit v0.10.2 From c530665c31c0140b74ca7689e7f836177796e5bd Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 3 Mar 2010 07:16:16 +0100 Subject: perf: Take a hot regs snapshot for trace events We are taking a wrong regs snapshot when a trace event triggers. Either we use get_irq_regs(), which gives us the interrupted registers if we are in an interrupt, or we use task_pt_regs() which gives us the state before we entered the kernel, assuming we are lucky enough to be no kernel thread, in which case task_pt_regs() returns the initial set of regs when the kernel thread was started. What we want is different. We need a hot snapshot of the regs, so that we can get the instruction pointer to record in the sample, the frame pointer for the callchain, and some other things. Let's use the new perf_fetch_caller_regs() for that. Comparison with perf record -e lock: -R -a -f -g Before: perf [kernel] [k] __do_softirq | --- __do_softirq | |--55.16%-- __open | --44.84%-- __write_nocancel After: perf [kernel] [k] perf_tp_event | --- perf_tp_event | |--41.07%-- lock_acquire | | | |--39.36%-- _raw_spin_lock | | | | | |--7.81%-- hrtimer_interrupt | | | smp_apic_timer_interrupt | | | apic_timer_interrupt The old case was producing unreliable callchains. Now having right frame and instruction pointers, we have the trace we want. Also syscalls and kprobe events already have the right regs, let's use them instead of wasting a retrieval. v2: Follow the rename perf_save_regs() -> perf_fetch_caller_regs() Signed-off-by: Frederic Weisbecker Cc: Ingo Molnar Cc: Thomas Gleixner Cc: H. Peter Anvin Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt Cc: Arnaldo Carvalho de Melo Cc: Masami Hiramatsu Cc: Jason Baron Cc: Archs diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 6b7c444..ac424f1 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -187,6 +187,9 @@ do { \ #ifdef CONFIG_PERF_EVENTS struct perf_event; + +DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); + extern int ftrace_profile_enable(int event_id); extern void ftrace_profile_disable(int event_id); extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, @@ -198,11 +201,11 @@ ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp, static inline void ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr, - u64 count, unsigned long irq_flags) + u64 count, unsigned long irq_flags, struct pt_regs *regs) { struct trace_entry *entry = raw_data; - perf_tp_event(entry->type, addr, count, raw_data, size); + perf_tp_event(entry->type, addr, count, raw_data, size, regs); perf_swevent_put_recursion_context(rctx); local_irq_restore(irq_flags); } diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 0804cd5..f31bb8b 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -764,6 +764,7 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ struct ftrace_raw_##call *entry; \ u64 __addr = 0, __count = 1; \ unsigned long irq_flags; \ + struct pt_regs *__regs; \ int __entry_size; \ int __data_size; \ int rctx; \ @@ -784,8 +785,11 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ \ { assign; } \ \ + __regs = &__get_cpu_var(perf_trace_regs); \ + perf_fetch_caller_regs(__regs, 2); \ + \ ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \ - __count, irq_flags); \ + __count, irq_flags, __regs); \ } #undef DEFINE_EVENT diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 359d7f6..45b4b6e 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4318,9 +4318,8 @@ static const struct pmu perf_ops_task_clock = { #ifdef CONFIG_EVENT_TRACING void perf_tp_event(int event_id, u64 addr, u64 count, void *record, - int entry_size) + int entry_size, struct pt_regs *regs) { - struct pt_regs *regs = get_irq_regs(); struct perf_sample_data data; struct perf_raw_record raw = { .size = entry_size, @@ -4330,12 +4329,9 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record, perf_sample_data_init(&data, addr); data.raw = &raw; - if (!regs) - regs = task_pt_regs(current); - /* Trace events already protected against recursion */ do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, - &data, regs); + &data, regs); } EXPORT_SYMBOL_GPL(perf_tp_event); diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index f0d6930..e66d21e 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c @@ -2,13 +2,14 @@ * trace event based perf counter profiling * * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra - * + * Copyright (C) 2009-2010 Frederic Weisbecker */ #include #include #include "trace.h" +DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); static char *perf_trace_buf; static char *perf_trace_buf_nmi; diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 505c922..f7a20a8 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1240,7 +1240,7 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp, for (i = 0; i < tp->nr_args; i++) entry->args[i] = call_fetch(&tp->args[i].fetch, regs); - ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags); + ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs); } /* Kretprobe profile handler */ @@ -1271,7 +1271,8 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, for (i = 0; i < tp->nr_args; i++) entry->args[i] = call_fetch(&tp->args[i].fetch, regs); - ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags); + ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, + irq_flags, regs); } static int probe_profile_enable(struct ftrace_event_call *call) diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index cba47d7..7e6e84f 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -467,7 +467,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) rec->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, (unsigned long *)&rec->args); - ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); + ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs); } int prof_sysenter_enable(struct ftrace_event_call *call) @@ -542,7 +542,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) rec->nr = syscall_nr; rec->ret = syscall_get_return_value(current, regs); - ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); + ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs); } int prof_sysexit_enable(struct ftrace_event_call *call) -- cgit v0.10.2 From 97d5a22005f38057b4bc0d95f81cd26510268794 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 5 Mar 2010 05:35:37 +0100 Subject: perf: Drop the obsolete profile naming for trace events Drop the obsolete "profile" naming used by perf for trace events. Perf can now do more than simple events counting, so generalize the API naming. Signed-off-by: Frederic Weisbecker Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt Cc: Masami Hiramatsu Cc: Jason Baron diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index ac424f1..c0f4b36 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -131,12 +131,12 @@ struct ftrace_event_call { void *mod; void *data; - int profile_count; - int (*profile_enable)(struct ftrace_event_call *); - void (*profile_disable)(struct ftrace_event_call *); + int perf_refcount; + int (*perf_event_enable)(struct ftrace_event_call *); + void (*perf_event_disable)(struct ftrace_event_call *); }; -#define FTRACE_MAX_PROFILE_SIZE 2048 +#define PERF_MAX_TRACE_SIZE 2048 #define MAX_FILTER_PRED 32 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ @@ -190,17 +190,17 @@ struct perf_event; DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); -extern int ftrace_profile_enable(int event_id); -extern void ftrace_profile_disable(int event_id); +extern int perf_trace_enable(int event_id); +extern void perf_trace_disable(int event_id); extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str); extern void ftrace_profile_free_filter(struct perf_event *event); extern void * -ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp, +perf_trace_buf_prepare(int size, unsigned short type, int *rctxp, unsigned long *irq_flags); static inline void -ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr, +perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, u64 count, unsigned long irq_flags, struct pt_regs *regs) { struct trace_entry *entry = raw_data; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 8126f23..51435bc 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -101,18 +101,18 @@ struct perf_event_attr; #ifdef CONFIG_PERF_EVENTS -#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ - .profile_enable = prof_sysenter_enable, \ - .profile_disable = prof_sysenter_disable, +#define TRACE_SYS_ENTER_PERF_INIT(sname) \ + .perf_event_enable = perf_sysenter_enable, \ + .perf_event_disable = perf_sysenter_disable, -#define TRACE_SYS_EXIT_PROFILE_INIT(sname) \ - .profile_enable = prof_sysexit_enable, \ - .profile_disable = prof_sysexit_disable, +#define TRACE_SYS_EXIT_PERF_INIT(sname) \ + .perf_event_enable = perf_sysexit_enable, \ + .perf_event_disable = perf_sysexit_disable, #else -#define TRACE_SYS_ENTER_PROFILE(sname) -#define TRACE_SYS_ENTER_PROFILE_INIT(sname) -#define TRACE_SYS_EXIT_PROFILE(sname) -#define TRACE_SYS_EXIT_PROFILE_INIT(sname) +#define TRACE_SYS_ENTER_PERF(sname) +#define TRACE_SYS_ENTER_PERF_INIT(sname) +#define TRACE_SYS_EXIT_PERF(sname) +#define TRACE_SYS_EXIT_PERF_INIT(sname) #endif /* CONFIG_PERF_EVENTS */ #ifdef CONFIG_FTRACE_SYSCALLS @@ -149,7 +149,7 @@ struct perf_event_attr; .regfunc = reg_event_syscall_enter, \ .unregfunc = unreg_event_syscall_enter, \ .data = (void *)&__syscall_meta_##sname,\ - TRACE_SYS_ENTER_PROFILE_INIT(sname) \ + TRACE_SYS_ENTER_PERF_INIT(sname) \ } #define SYSCALL_TRACE_EXIT_EVENT(sname) \ @@ -171,7 +171,7 @@ struct perf_event_attr; .regfunc = reg_event_syscall_exit, \ .unregfunc = unreg_event_syscall_exit, \ .data = (void *)&__syscall_meta_##sname,\ - TRACE_SYS_EXIT_PROFILE_INIT(sname) \ + TRACE_SYS_EXIT_PERF_INIT(sname) \ } #define SYSCALL_METADATA(sname, nb) \ diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index f31bb8b..25ab56f 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -401,18 +401,18 @@ static inline notrace int ftrace_get_offsets_##call( \ #undef DEFINE_EVENT #define DEFINE_EVENT(template, name, proto, args) \ \ -static void ftrace_profile_##name(proto); \ +static void perf_trace_##name(proto); \ \ static notrace int \ -ftrace_profile_enable_##name(struct ftrace_event_call *unused) \ +perf_trace_enable_##name(struct ftrace_event_call *unused) \ { \ - return register_trace_##name(ftrace_profile_##name); \ + return register_trace_##name(perf_trace_##name); \ } \ \ static notrace void \ -ftrace_profile_disable_##name(struct ftrace_event_call *unused) \ +perf_trace_disable_##name(struct ftrace_event_call *unused) \ { \ - unregister_trace_##name(ftrace_profile_##name); \ + unregister_trace_##name(perf_trace_##name); \ } #undef DEFINE_EVENT_PRINT @@ -507,12 +507,12 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \ #ifdef CONFIG_PERF_EVENTS -#define _TRACE_PROFILE_INIT(call) \ - .profile_enable = ftrace_profile_enable_##call, \ - .profile_disable = ftrace_profile_disable_##call, +#define _TRACE_PERF_INIT(call) \ + .perf_event_enable = perf_trace_enable_##call, \ + .perf_event_disable = perf_trace_disable_##call, #else -#define _TRACE_PROFILE_INIT(call) +#define _TRACE_PERF_INIT(call) #endif /* CONFIG_PERF_EVENTS */ #undef __entry @@ -638,7 +638,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ .unregfunc = ftrace_raw_unreg_event_##call, \ .print_fmt = print_fmt_##template, \ .define_fields = ftrace_define_fields_##template, \ - _TRACE_PROFILE_INIT(call) \ + _TRACE_PERF_INIT(call) \ } #undef DEFINE_EVENT_PRINT @@ -657,18 +657,18 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ .unregfunc = ftrace_raw_unreg_event_##call, \ .print_fmt = print_fmt_##call, \ .define_fields = ftrace_define_fields_##template, \ - _TRACE_PROFILE_INIT(call) \ + _TRACE_PERF_INIT(call) \ } #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* - * Define the insertion callback to profile events + * Define the insertion callback to perf events * * The job is very similar to ftrace_raw_event_ except that we don't * insert in the ring buffer but in a perf counter. * - * static void ftrace_profile_(proto) + * static void ftrace_perf_(proto) * { * struct ftrace_data_offsets_ __maybe_unused __data_offsets; * struct ftrace_event_call *event_call = &event_; @@ -757,7 +757,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static notrace void \ -ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ +perf_trace_templ_##call(struct ftrace_event_call *event_call, \ proto) \ { \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ @@ -774,10 +774,10 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ sizeof(u64)); \ __entry_size -= sizeof(u32); \ \ - if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ + if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \ "profile buffer not large enough")) \ return; \ - entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare( \ + entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ __entry_size, event_call->id, &rctx, &irq_flags); \ if (!entry) \ return; \ @@ -788,17 +788,17 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ __regs = &__get_cpu_var(perf_trace_regs); \ perf_fetch_caller_regs(__regs, 2); \ \ - ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \ + perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ __count, irq_flags, __regs); \ } #undef DEFINE_EVENT #define DEFINE_EVENT(template, call, proto, args) \ -static notrace void ftrace_profile_##call(proto) \ +static notrace void perf_trace_##call(proto) \ { \ struct ftrace_event_call *event_call = &event_##call; \ \ - ftrace_profile_templ_##template(event_call, args); \ + perf_trace_templ_##template(event_call, args); \ } #undef DEFINE_EVENT_PRINT diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 0387100..e5e5f48 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h @@ -47,10 +47,10 @@ enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); #endif #ifdef CONFIG_PERF_EVENTS -int prof_sysenter_enable(struct ftrace_event_call *call); -void prof_sysenter_disable(struct ftrace_event_call *call); -int prof_sysexit_enable(struct ftrace_event_call *call); -void prof_sysexit_disable(struct ftrace_event_call *call); +int perf_sysenter_enable(struct ftrace_event_call *call); +void perf_sysenter_disable(struct ftrace_event_call *call); +int perf_sysexit_enable(struct ftrace_event_call *call); +void perf_sysexit_disable(struct ftrace_event_call *call); #endif #endif /* _TRACE_SYSCALL_H */ diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 45b4b6e..c502b18 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4347,7 +4347,7 @@ static int perf_tp_event_match(struct perf_event *event, static void tp_perf_event_destroy(struct perf_event *event) { - ftrace_profile_disable(event->attr.config); + perf_trace_disable(event->attr.config); } static const struct pmu *tp_perf_event_init(struct perf_event *event) @@ -4361,7 +4361,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event) !capable(CAP_SYS_ADMIN)) return ERR_PTR(-EPERM); - if (ftrace_profile_enable(event->attr.config)) + if (perf_trace_enable(event->attr.config)) return NULL; event->destroy = tp_perf_event_destroy; diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index d00c6fe..78edc64 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -52,7 +52,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events.o obj-$(CONFIG_EVENT_TRACING) += trace_export.o obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o ifeq ($(CONFIG_PERF_EVENTS),y) -obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o +obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o endif obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c new file mode 100644 index 0000000..f315b12 --- /dev/null +++ b/kernel/trace/trace_event_perf.c @@ -0,0 +1,165 @@ +/* + * trace event based perf event profiling/tracing + * + * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra + * Copyright (C) 2009-2010 Frederic Weisbecker + */ + +#include +#include +#include "trace.h" + +DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); + +static char *perf_trace_buf; +static char *perf_trace_buf_nmi; + +typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ; + +/* Count the events in use (per event id, not per instance) */ +static int total_ref_count; + +static int perf_trace_event_enable(struct ftrace_event_call *event) +{ + char *buf; + int ret = -ENOMEM; + + if (event->perf_refcount++ > 0) + return 0; + + if (!total_ref_count) { + buf = (char *)alloc_percpu(perf_trace_t); + if (!buf) + goto fail_buf; + + rcu_assign_pointer(perf_trace_buf, buf); + + buf = (char *)alloc_percpu(perf_trace_t); + if (!buf) + goto fail_buf_nmi; + + rcu_assign_pointer(perf_trace_buf_nmi, buf); + } + + ret = event->perf_event_enable(event); + if (!ret) { + total_ref_count++; + return 0; + } + +fail_buf_nmi: + if (!total_ref_count) { + free_percpu(perf_trace_buf_nmi); + free_percpu(perf_trace_buf); + perf_trace_buf_nmi = NULL; + perf_trace_buf = NULL; + } +fail_buf: + event->perf_refcount--; + + return ret; +} + +int perf_trace_enable(int event_id) +{ + struct ftrace_event_call *event; + int ret = -EINVAL; + + mutex_lock(&event_mutex); + list_for_each_entry(event, &ftrace_events, list) { + if (event->id == event_id && event->perf_event_enable && + try_module_get(event->mod)) { + ret = perf_trace_event_enable(event); + break; + } + } + mutex_unlock(&event_mutex); + + return ret; +} + +static void perf_trace_event_disable(struct ftrace_event_call *event) +{ + char *buf, *nmi_buf; + + if (--event->perf_refcount > 0) + return; + + event->perf_event_disable(event); + + if (!--total_ref_count) { + buf = perf_trace_buf; + rcu_assign_pointer(perf_trace_buf, NULL); + + nmi_buf = perf_trace_buf_nmi; + rcu_assign_pointer(perf_trace_buf_nmi, NULL); + + /* + * Ensure every events in profiling have finished before + * releasing the buffers + */ + synchronize_sched(); + + free_percpu(buf); + free_percpu(nmi_buf); + } +} + +void perf_trace_disable(int event_id) +{ + struct ftrace_event_call *event; + + mutex_lock(&event_mutex); + list_for_each_entry(event, &ftrace_events, list) { + if (event->id == event_id) { + perf_trace_event_disable(event); + module_put(event->mod); + break; + } + } + mutex_unlock(&event_mutex); +} + +__kprobes void *perf_trace_buf_prepare(int size, unsigned short type, + int *rctxp, unsigned long *irq_flags) +{ + struct trace_entry *entry; + char *trace_buf, *raw_data; + int pc, cpu; + + pc = preempt_count(); + + /* Protect the per cpu buffer, begin the rcu read side */ + local_irq_save(*irq_flags); + + *rctxp = perf_swevent_get_recursion_context(); + if (*rctxp < 0) + goto err_recursion; + + cpu = smp_processor_id(); + + if (in_nmi()) + trace_buf = rcu_dereference(perf_trace_buf_nmi); + else + trace_buf = rcu_dereference(perf_trace_buf); + + if (!trace_buf) + goto err; + + raw_data = per_cpu_ptr(trace_buf, cpu); + + /* zero the dead bytes from align to not leak stack to user */ + *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; + + entry = (struct trace_entry *)raw_data; + tracing_generic_entry_update(entry, *irq_flags, pc); + entry->type = type; + + return raw_data; +err: + perf_swevent_put_recursion_context(*rctxp); +err_recursion: + local_irq_restore(*irq_flags); + return NULL; +} +EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c deleted file mode 100644 index e66d21e..0000000 --- a/kernel/trace/trace_event_profile.c +++ /dev/null @@ -1,165 +0,0 @@ -/* - * trace event based perf counter profiling - * - * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra - * Copyright (C) 2009-2010 Frederic Weisbecker - */ - -#include -#include -#include "trace.h" - -DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); - -static char *perf_trace_buf; -static char *perf_trace_buf_nmi; - -typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ; - -/* Count the events in use (per event id, not per instance) */ -static int total_profile_count; - -static int ftrace_profile_enable_event(struct ftrace_event_call *event) -{ - char *buf; - int ret = -ENOMEM; - - if (event->profile_count++ > 0) - return 0; - - if (!total_profile_count) { - buf = (char *)alloc_percpu(perf_trace_t); - if (!buf) - goto fail_buf; - - rcu_assign_pointer(perf_trace_buf, buf); - - buf = (char *)alloc_percpu(perf_trace_t); - if (!buf) - goto fail_buf_nmi; - - rcu_assign_pointer(perf_trace_buf_nmi, buf); - } - - ret = event->profile_enable(event); - if (!ret) { - total_profile_count++; - return 0; - } - -fail_buf_nmi: - if (!total_profile_count) { - free_percpu(perf_trace_buf_nmi); - free_percpu(perf_trace_buf); - perf_trace_buf_nmi = NULL; - perf_trace_buf = NULL; - } -fail_buf: - event->profile_count--; - - return ret; -} - -int ftrace_profile_enable(int event_id) -{ - struct ftrace_event_call *event; - int ret = -EINVAL; - - mutex_lock(&event_mutex); - list_for_each_entry(event, &ftrace_events, list) { - if (event->id == event_id && event->profile_enable && - try_module_get(event->mod)) { - ret = ftrace_profile_enable_event(event); - break; - } - } - mutex_unlock(&event_mutex); - - return ret; -} - -static void ftrace_profile_disable_event(struct ftrace_event_call *event) -{ - char *buf, *nmi_buf; - - if (--event->profile_count > 0) - return; - - event->profile_disable(event); - - if (!--total_profile_count) { - buf = perf_trace_buf; - rcu_assign_pointer(perf_trace_buf, NULL); - - nmi_buf = perf_trace_buf_nmi; - rcu_assign_pointer(perf_trace_buf_nmi, NULL); - - /* - * Ensure every events in profiling have finished before - * releasing the buffers - */ - synchronize_sched(); - - free_percpu(buf); - free_percpu(nmi_buf); - } -} - -void ftrace_profile_disable(int event_id) -{ - struct ftrace_event_call *event; - - mutex_lock(&event_mutex); - list_for_each_entry(event, &ftrace_events, list) { - if (event->id == event_id) { - ftrace_profile_disable_event(event); - module_put(event->mod); - break; - } - } - mutex_unlock(&event_mutex); -} - -__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type, - int *rctxp, unsigned long *irq_flags) -{ - struct trace_entry *entry; - char *trace_buf, *raw_data; - int pc, cpu; - - pc = preempt_count(); - - /* Protect the per cpu buffer, begin the rcu read side */ - local_irq_save(*irq_flags); - - *rctxp = perf_swevent_get_recursion_context(); - if (*rctxp < 0) - goto err_recursion; - - cpu = smp_processor_id(); - - if (in_nmi()) - trace_buf = rcu_dereference(perf_trace_buf_nmi); - else - trace_buf = rcu_dereference(perf_trace_buf); - - if (!trace_buf) - goto err; - - raw_data = per_cpu_ptr(trace_buf, cpu); - - /* zero the dead bytes from align to not leak stack to user */ - *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; - - entry = (struct trace_entry *)raw_data; - tracing_generic_entry_update(entry, *irq_flags, pc); - entry->type = type; - - return raw_data; -err: - perf_swevent_put_recursion_context(*rctxp); -err_recursion: - local_irq_restore(*irq_flags); - return NULL; -} -EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare); diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 3f972ad9..beab8bf 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -938,7 +938,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, trace_create_file("enable", 0644, call->dir, call, enable); - if (call->id && call->profile_enable) + if (call->id && call->perf_event_enable) trace_create_file("id", 0444, call->dir, call, id); diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index f7a20a8..1251e36 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1214,7 +1214,7 @@ static int set_print_fmt(struct trace_probe *tp) #ifdef CONFIG_PERF_EVENTS /* Kprobe profile handler */ -static __kprobes void kprobe_profile_func(struct kprobe *kp, +static __kprobes void kprobe_perf_func(struct kprobe *kp, struct pt_regs *regs) { struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); @@ -1227,11 +1227,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp, __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); size = ALIGN(__size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); - if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, + if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) return; - entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); + entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); if (!entry) return; @@ -1240,11 +1240,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp, for (i = 0; i < tp->nr_args; i++) entry->args[i] = call_fetch(&tp->args[i].fetch, regs); - ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs); + perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs); } /* Kretprobe profile handler */ -static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, +static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, struct pt_regs *regs) { struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); @@ -1257,11 +1257,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); size = ALIGN(__size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); - if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, + if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) return; - entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); + entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); if (!entry) return; @@ -1271,11 +1271,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, for (i = 0; i < tp->nr_args; i++) entry->args[i] = call_fetch(&tp->args[i].fetch, regs); - ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, + perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags, regs); } -static int probe_profile_enable(struct ftrace_event_call *call) +static int probe_perf_enable(struct ftrace_event_call *call) { struct trace_probe *tp = (struct trace_probe *)call->data; @@ -1287,7 +1287,7 @@ static int probe_profile_enable(struct ftrace_event_call *call) return enable_kprobe(&tp->rp.kp); } -static void probe_profile_disable(struct ftrace_event_call *call) +static void probe_perf_disable(struct ftrace_event_call *call) { struct trace_probe *tp = (struct trace_probe *)call->data; @@ -1312,7 +1312,7 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) kprobe_trace_func(kp, regs); #ifdef CONFIG_PERF_EVENTS if (tp->flags & TP_FLAG_PROFILE) - kprobe_profile_func(kp, regs); + kprobe_perf_func(kp, regs); #endif return 0; /* We don't tweek kernel, so just return 0 */ } @@ -1326,7 +1326,7 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) kretprobe_trace_func(ri, regs); #ifdef CONFIG_PERF_EVENTS if (tp->flags & TP_FLAG_PROFILE) - kretprobe_profile_func(ri, regs); + kretprobe_perf_func(ri, regs); #endif return 0; /* We don't tweek kernel, so just return 0 */ } @@ -1359,8 +1359,8 @@ static int register_probe_event(struct trace_probe *tp) call->unregfunc = probe_event_disable; #ifdef CONFIG_PERF_EVENTS - call->profile_enable = probe_profile_enable; - call->profile_disable = probe_profile_disable; + call->perf_event_enable = probe_perf_enable; + call->perf_event_disable = probe_perf_disable; #endif call->data = tp; ret = trace_add_event_call(call); diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 7e6e84f..33c2a5b 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -428,12 +428,12 @@ core_initcall(init_ftrace_syscalls); #ifdef CONFIG_PERF_EVENTS -static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); -static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls); -static int sys_prof_refcount_enter; -static int sys_prof_refcount_exit; +static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls); +static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls); +static int sys_perf_refcount_enter; +static int sys_perf_refcount_exit; -static void prof_syscall_enter(struct pt_regs *regs, long id) +static void perf_syscall_enter(struct pt_regs *regs, long id) { struct syscall_metadata *sys_data; struct syscall_trace_enter *rec; @@ -443,7 +443,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) int size; syscall_nr = syscall_get_nr(current, regs); - if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) + if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) return; sys_data = syscall_nr_to_meta(syscall_nr); @@ -455,11 +455,11 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) size = ALIGN(size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); - if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, - "profile buffer not large enough")) + if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, + "perf buffer not large enough")) return; - rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size, + rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, sys_data->enter_event->id, &rctx, &flags); if (!rec) return; @@ -467,10 +467,10 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) rec->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, (unsigned long *)&rec->args); - ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs); + perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs); } -int prof_sysenter_enable(struct ftrace_event_call *call) +int perf_sysenter_enable(struct ftrace_event_call *call) { int ret = 0; int num; @@ -478,34 +478,34 @@ int prof_sysenter_enable(struct ftrace_event_call *call) num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); - if (!sys_prof_refcount_enter) - ret = register_trace_sys_enter(prof_syscall_enter); + if (!sys_perf_refcount_enter) + ret = register_trace_sys_enter(perf_syscall_enter); if (ret) { pr_info("event trace: Could not activate" "syscall entry trace point"); } else { - set_bit(num, enabled_prof_enter_syscalls); - sys_prof_refcount_enter++; + set_bit(num, enabled_perf_enter_syscalls); + sys_perf_refcount_enter++; } mutex_unlock(&syscall_trace_lock); return ret; } -void prof_sysenter_disable(struct ftrace_event_call *call) +void perf_sysenter_disable(struct ftrace_event_call *call) { int num; num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); - sys_prof_refcount_enter--; - clear_bit(num, enabled_prof_enter_syscalls); - if (!sys_prof_refcount_enter) - unregister_trace_sys_enter(prof_syscall_enter); + sys_perf_refcount_enter--; + clear_bit(num, enabled_perf_enter_syscalls); + if (!sys_perf_refcount_enter) + unregister_trace_sys_enter(perf_syscall_enter); mutex_unlock(&syscall_trace_lock); } -static void prof_syscall_exit(struct pt_regs *regs, long ret) +static void perf_syscall_exit(struct pt_regs *regs, long ret) { struct syscall_metadata *sys_data; struct syscall_trace_exit *rec; @@ -515,7 +515,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) int size; syscall_nr = syscall_get_nr(current, regs); - if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) + if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) return; sys_data = syscall_nr_to_meta(syscall_nr); @@ -530,11 +530,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) * Impossible, but be paranoid with the future * How to put this check outside runtime? */ - if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, - "exit event has grown above profile buffer size")) + if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, + "exit event has grown above perf buffer size")) return; - rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size, + rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, sys_data->exit_event->id, &rctx, &flags); if (!rec) return; @@ -542,10 +542,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) rec->nr = syscall_nr; rec->ret = syscall_get_return_value(current, regs); - ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs); + perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs); } -int prof_sysexit_enable(struct ftrace_event_call *call) +int perf_sysexit_enable(struct ftrace_event_call *call) { int ret = 0; int num; @@ -553,30 +553,30 @@ int prof_sysexit_enable(struct ftrace_event_call *call) num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); - if (!sys_prof_refcount_exit) - ret = register_trace_sys_exit(prof_syscall_exit); + if (!sys_perf_refcount_exit) + ret = register_trace_sys_exit(perf_syscall_exit); if (ret) { pr_info("event trace: Could not activate" "syscall exit trace point"); } else { - set_bit(num, enabled_prof_exit_syscalls); - sys_prof_refcount_exit++; + set_bit(num, enabled_perf_exit_syscalls); + sys_perf_refcount_exit++; } mutex_unlock(&syscall_trace_lock); return ret; } -void prof_sysexit_disable(struct ftrace_event_call *call) +void perf_sysexit_disable(struct ftrace_event_call *call) { int num; num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); - sys_prof_refcount_exit--; - clear_bit(num, enabled_prof_exit_syscalls); - if (!sys_prof_refcount_exit) - unregister_trace_sys_exit(prof_syscall_exit); + sys_perf_refcount_exit--; + clear_bit(num, enabled_perf_exit_syscalls); + if (!sys_perf_refcount_exit) + unregister_trace_sys_exit(perf_syscall_exit); mutex_unlock(&syscall_trace_lock); } -- cgit v0.10.2 From 220b140b52ab6cc133f674a7ffec8fa792054f25 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 10 Mar 2010 20:45:52 +1100 Subject: perf_event: Fix oops triggered by cpu offline/online Anton Blanchard found that he could reliably make the kernel hit a BUG_ON in the slab allocator by taking a cpu offline and then online while a system-wide perf record session was running. The reason is that when the cpu comes up, we completely reinitialize the ctx field of the struct perf_cpu_context for the cpu. If there is a system-wide perf record session running, then there will be a struct perf_event that has a reference to the context, so its refcount will be 2. (The perf_event has been removed from the context's group_entry and event_entry lists by perf_event_exit_cpu(), but that doesn't remove the perf_event's reference to the context and doesn't decrement the context's refcount.) When the cpu comes up, perf_event_init_cpu() gets called, and it calls __perf_event_init_context() on the cpu's context. That resets the refcount to 1. Then when the perf record session finishes and the perf_event is closed, the refcount gets decremented to 0 and the context gets kfreed after an RCU grace period. Since the context wasn't kmalloced -- it's part of a per-cpu variable -- bad things happen. In fact we don't need to completely reinitialize the context when the cpu comes up. It's sufficient to initialize the context once at boot, but we need to do it for all possible cpus. This moves the context initialization to happen at boot time. With this, we don't trash the refcount and the context never gets kfreed, and we don't hit the BUG_ON. Reported-by: Anton Blanchard Signed-off-by: Paul Mackerras Tested-by: Anton Blanchard Acked-by: Peter Zijlstra Cc: Signed-off-by: Ingo Molnar diff --git a/kernel/perf_event.c b/kernel/perf_event.c index c502b18..fb3031c 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -5368,12 +5368,22 @@ int perf_event_init_task(struct task_struct *child) return ret; } +static void __init perf_event_init_all_cpus(void) +{ + int cpu; + struct perf_cpu_context *cpuctx; + + for_each_possible_cpu(cpu) { + cpuctx = &per_cpu(perf_cpu_context, cpu); + __perf_event_init_context(&cpuctx->ctx, NULL); + } +} + static void __cpuinit perf_event_init_cpu(int cpu) { struct perf_cpu_context *cpuctx; cpuctx = &per_cpu(perf_cpu_context, cpu); - __perf_event_init_context(&cpuctx->ctx, NULL); spin_lock(&perf_resource_lock); cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; @@ -5439,6 +5449,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = { void __init perf_event_init(void) { + perf_event_init_all_cpus(); perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, (void *)(long)smp_processor_id()); perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, -- cgit v0.10.2 From a12b51c478899fe0b7e874a559b05ba35f1128ee Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 10 Mar 2010 20:36:09 +1100 Subject: perf tools: Fix sparse CPU numbering related bugs At present, the perf subcommands that do system-wide monitoring (perf stat, perf record and perf top) don't work properly unless the online cpus are numbered 0, 1, ..., N-1. These tools ask for the number of online cpus with sysconf(_SC_NPROCESSORS_ONLN) and then try to create events for cpus 0, 1, ..., N-1. This creates problems for systems where the online cpus are numbered sparsely. For example, a POWER6 system in single-threaded mode (i.e. only running 1 hardware thread per core) will have only even-numbered cpus online. This fixes the problem by reading the /sys/devices/system/cpu/online file to find out which cpus are online. The code that does that is in tools/perf/util/cpumap.[ch], and consists of a read_cpu_map() function that sets up a cpumap[] array and returns the number of online cpus. If /sys/devices/system/cpu/online can't be read or can't be parsed successfully, it falls back to using sysconf to ask how many cpus are online and sets up an identity map in cpumap[]. The perf record, perf stat and perf top code then calls read_cpu_map() in the system-wide monitoring case (instead of sysconf) and uses cpumap[] to get the cpu numbers to pass to perf_event_open. Signed-off-by: Paul Mackerras Cc: Anton Blanchard Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo LKML-Reference: <20100310093609.GA3959@brick.ozlabs.ibm.com> Signed-off-by: Ingo Molnar diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 2d53738..5840499 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -387,6 +387,7 @@ LIB_H += util/thread.h LIB_H += util/trace-event.h LIB_H += util/probe-finder.h LIB_H += util/probe-event.h +LIB_H += util/cpumap.h LIB_OBJS += util/abspath.o LIB_OBJS += util/alias.o @@ -433,6 +434,7 @@ LIB_OBJS += util/sort.o LIB_OBJS += util/hist.o LIB_OBJS += util/probe-event.o LIB_OBJS += util/util.o +LIB_OBJS += util/cpumap.o BUILTIN_OBJS += builtin-annotate.o diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index f573bbb..b09d3b2 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -22,6 +22,7 @@ #include "util/debug.h" #include "util/session.h" #include "util/symbol.h" +#include "util/cpumap.h" #include #include @@ -421,9 +422,6 @@ static int __cmd_record(int argc, const char **argv) char buf; page_size = sysconf(_SC_PAGE_SIZE); - nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); - assert(nr_cpus <= MAX_NR_CPUS); - assert(nr_cpus >= 0); atexit(sig_atexit); signal(SIGCHLD, sig_handler); @@ -547,8 +545,9 @@ static int __cmd_record(int argc, const char **argv) if ((!system_wide && !inherit) || profile_cpu != -1) { open_counters(profile_cpu, target_pid); } else { + nr_cpus = read_cpu_map(); for (i = 0; i < nr_cpus; i++) - open_counters(i, target_pid); + open_counters(cpumap[i], target_pid); } if (file_new) { diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index e8c85d5..95db31c 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -45,6 +45,7 @@ #include "util/event.h" #include "util/debug.h" #include "util/header.h" +#include "util/cpumap.h" #include #include @@ -151,7 +152,7 @@ static void create_perf_stat_counter(int counter, int pid) unsigned int cpu; for (cpu = 0; cpu < nr_cpus; cpu++) { - fd[cpu][counter] = sys_perf_event_open(attr, -1, cpu, -1, 0); + fd[cpu][counter] = sys_perf_event_open(attr, -1, cpumap[cpu], -1, 0); if (fd[cpu][counter] < 0 && verbose) fprintf(stderr, ERR_PERF_OPEN, counter, fd[cpu][counter], strerror(errno)); @@ -519,9 +520,10 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) nr_counters = ARRAY_SIZE(default_attrs); } - nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); - assert(nr_cpus <= MAX_NR_CPUS); - assert((int)nr_cpus >= 0); + if (system_wide) + nr_cpus = read_cpu_map(); + else + nr_cpus = 1; /* * We dont want to block the signals - that would cause diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 31f2e59..0b719e3 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -28,6 +28,7 @@ #include #include "util/parse-options.h" #include "util/parse-events.h" +#include "util/cpumap.h" #include "util/debug.h" @@ -1123,7 +1124,7 @@ static void start_counter(int i, int counter) cpu = profile_cpu; if (target_pid == -1 && profile_cpu == -1) - cpu = i; + cpu = cpumap[i]; attr = attrs + counter; @@ -1347,12 +1348,10 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) attrs[counter].sample_period = default_interval; } - nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); - assert(nr_cpus <= MAX_NR_CPUS); - assert(nr_cpus >= 0); - if (target_pid != -1 || profile_cpu != -1) nr_cpus = 1; + else + nr_cpus = read_cpu_map(); get_term_dimensions(&winsize); if (print_entries == 0) { diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c new file mode 100644 index 0000000..4e01490 --- /dev/null +++ b/tools/perf/util/cpumap.c @@ -0,0 +1,59 @@ +#include "util.h" +#include "../perf.h" +#include "cpumap.h" +#include +#include + +int cpumap[MAX_NR_CPUS]; + +static int default_cpu_map(void) +{ + int nr_cpus, i; + + nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); + assert(nr_cpus <= MAX_NR_CPUS); + assert((int)nr_cpus >= 0); + + for (i = 0; i < nr_cpus; ++i) + cpumap[i] = i; + + return nr_cpus; +} + +int read_cpu_map(void) +{ + FILE *onlnf; + int nr_cpus = 0; + int n, cpu, prev; + char sep; + + onlnf = fopen("/sys/devices/system/cpu/online", "r"); + if (!onlnf) + return default_cpu_map(); + + sep = 0; + prev = -1; + for (;;) { + n = fscanf(onlnf, "%u%c", &cpu, &sep); + if (n <= 0) + break; + if (prev >= 0) { + assert(nr_cpus + cpu - prev - 1 < MAX_NR_CPUS); + while (++prev < cpu) + cpumap[nr_cpus++] = prev; + } + assert (nr_cpus < MAX_NR_CPUS); + cpumap[nr_cpus++] = cpu; + if (n == 2 && sep == '-') + prev = cpu; + else + prev = -1; + if (n == 1 || sep == '\n') + break; + } + fclose(onlnf); + if (nr_cpus > 0) + return nr_cpus; + + return default_cpu_map(); +} diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h new file mode 100644 index 0000000..86c78bb --- /dev/null +++ b/tools/perf/util/cpumap.h @@ -0,0 +1,7 @@ +#ifndef __PERF_CPUMAP_H +#define __PERF_CPUMAP_H + +extern int read_cpu_map(void); +extern int cpumap[]; + +#endif /* __PERF_CPUMAP_H */ -- cgit v0.10.2 From 83ff56f46a8532488ee364bb93a9cb2a59490d33 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 9 Mar 2010 10:22:19 -0500 Subject: kprobes: Calculate the index correctly when freeing the out-of-line execution slot From : Ananth N Mavinakayanahalli When freeing the instruction slot, the arithmetic to calculate the index of the slot in the page needs to account for the total size of the instruction on the various architectures. Calculate the index correctly when freeing the out-of-line execution slot. Reported-by: Sachin Sant Reported-by: Heiko Carstens Signed-off-by: Ananth N Mavinakayanahalli Signed-off-by: Masami Hiramatsu LKML-Reference: <4B9667AB.9050507@redhat.com> Signed-off-by: Ingo Molnar diff --git a/kernel/kprobes.c b/kernel/kprobes.c index fa034d2..0ed46f3 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -259,7 +259,8 @@ static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c, struct kprobe_insn_page *kip; list_for_each_entry(kip, &c->pages, list) { - long idx = ((long)slot - (long)kip->insns) / c->insn_size; + long idx = ((long)slot - (long)kip->insns) / + (c->insn_size * sizeof(kprobe_opcode_t)); if (idx >= 0 && idx < slots_per_page(c)) { WARN_ON(kip->slot_used[idx] != SLOT_USED); if (dirty) { -- cgit v0.10.2 From 7ae5f21361fea11f58c398701da635f778635d13 Mon Sep 17 00:00:00 2001 From: John Kacur Date: Thu, 11 Mar 2010 13:57:00 +0100 Subject: perf: Make the install relative to DESTDIR if specified Without this change, the install path is relative to prefix/DESTDIR where prefix is automatically set to $HOME. This can produce unexpected results. For example: make -C tools/perf DESTDIR=/home/jkacur/tmp install-man creates the directory: /home/jkacur/home/jkacur/tmp/share/... instead of the expected: /home/jkacur/tmp/share/... Signed-off-by: John Kacur Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Tom Zanussi Cc: Kyle McMartin Cc: LKML-Reference: <1268312220-12880-1-git-send-email-jkacur@redhat.com> Signed-off-by: Ingo Molnar diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile index bdd3b7e..bd498d4 100644 --- a/tools/perf/Documentation/Makefile +++ b/tools/perf/Documentation/Makefile @@ -24,7 +24,10 @@ DOC_MAN1=$(patsubst %.txt,%.1,$(MAN1_TXT)) DOC_MAN5=$(patsubst %.txt,%.5,$(MAN5_TXT)) DOC_MAN7=$(patsubst %.txt,%.7,$(MAN7_TXT)) +# Make the path relative to DESTDIR, not prefix +ifndef DESTDIR prefix?=$(HOME) +endif bindir?=$(prefix)/bin htmldir?=$(prefix)/share/doc/perf-doc pdfdir?=$(prefix)/share/doc/perf-doc @@ -32,7 +35,6 @@ mandir?=$(prefix)/share/man man1dir=$(mandir)/man1 man5dir=$(mandir)/man5 man7dir=$(mandir)/man7 -# DESTDIR= ASCIIDOC=asciidoc ASCIIDOC_EXTRA = --unsafe diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 5840499..8a8f52d 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -216,7 +216,10 @@ STRIP ?= strip # runtime figures out where they are based on the path to the executable. # This can help installing the suite in a relocatable way. +# Make the path relative to DESTDIR, not to prefix +ifndef DESTDIR prefix = $(HOME) +endif bindir_relative = bin bindir = $(prefix)/$(bindir_relative) mandir = share/man @@ -233,7 +236,6 @@ sysconfdir = $(prefix)/etc ETC_PERFCONFIG = etc/perfconfig endif lib = lib -# DESTDIR= export prefix bindir sharedir sysconfdir -- cgit v0.10.2 From 85cfabbcd10f8d112feee6e2ec64ee78033b6d3c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 11 Mar 2010 13:06:56 +0100 Subject: perf, ppc: Fix compile error due to new cpu notifiers Fix: arch/powerpc/kernel/perf_event.c:1334: error: 'power_pmu_notifier' undeclared (first use in this function) arch/powerpc/kernel/perf_event.c:1334: error: (Each undeclared identifier is reported only once arch/powerpc/kernel/perf_event.c:1334: error: for each function it appears in.) arch/powerpc/kernel/perf_event.c:1334: error: implicit declaration of function 'power_pmu_notifier' arch/powerpc/kernel/perf_event.c:1334: error: implicit declaration of function 'register_cpu_notifier' Due to commit 3f6da390 (perf: Rework and fix the arch CPU-hotplug hooks). Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index fbe101d..08460a2 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -1298,7 +1298,7 @@ static void power_pmu_setup(int cpu) } static int __cpuinit -power_pmu_notify(struct notifier_block *self, unsigned long action, void *hcpu) +power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (long)hcpu; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 70cffd0..9547703 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -453,6 +453,7 @@ enum perf_callchain_context { #include #include #include +#include #include #define PERF_MAX_STACK_DEPTH 255 -- cgit v0.10.2 From 45e16a6834b6af098702e5ea6c9a40de42ff77d8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 11 Mar 2010 13:40:30 +0100 Subject: perf, x86: Fix hw_perf_enable() event assignment What happens is that we schedule badly like: <...>-1987 [019] 280.252808: x86_pmu_start: event-46/1300c0: idx: 0 <...>-1987 [019] 280.252811: x86_pmu_start: event-47/1300c0: idx: 1 <...>-1987 [019] 280.252812: x86_pmu_start: event-48/1300c0: idx: 2 <...>-1987 [019] 280.252813: x86_pmu_start: event-49/1300c0: idx: 3 <...>-1987 [019] 280.252814: x86_pmu_start: event-50/1300c0: idx: 32 <...>-1987 [019] 280.252825: x86_pmu_stop: event-46/1300c0: idx: 0 <...>-1987 [019] 280.252826: x86_pmu_stop: event-47/1300c0: idx: 1 <...>-1987 [019] 280.252827: x86_pmu_stop: event-48/1300c0: idx: 2 <...>-1987 [019] 280.252828: x86_pmu_stop: event-49/1300c0: idx: 3 <...>-1987 [019] 280.252829: x86_pmu_stop: event-50/1300c0: idx: 32 <...>-1987 [019] 280.252834: x86_pmu_start: event-47/1300c0: idx: 1 <...>-1987 [019] 280.252834: x86_pmu_start: event-48/1300c0: idx: 2 <...>-1987 [019] 280.252835: x86_pmu_start: event-49/1300c0: idx: 3 <...>-1987 [019] 280.252836: x86_pmu_start: event-50/1300c0: idx: 32 <...>-1987 [019] 280.252837: x86_pmu_start: event-51/1300c0: idx: 32 *FAIL* This happens because we only iterate the n_running events in the first pass, and reset their index to -1 if they don't match to force a re-assignment. Now, in our RR example, n_running == 0 because we fully unscheduled, so event-50 will retain its idx==32, even though in scheduling it will have gotten idx=0, and we don't trigger the re-assign path. The easiest way to fix this is the below patch, which simply validates the full assignment in the second pass. Reported-by: Stephane Eranian Signed-off-by: Peter Zijlstra LKML-Reference: <1268311069.5037.31.camel@laptop> Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index c6bde7d..5fb490c 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -811,7 +811,6 @@ void hw_perf_enable(void) * step2: reprogram moved events into new counters */ for (i = 0; i < n_running; i++) { - event = cpuc->event_list[i]; hwc = &event->hw; @@ -826,21 +825,16 @@ void hw_perf_enable(void) continue; x86_pmu_stop(event); - - hwc->idx = -1; } for (i = 0; i < cpuc->n_events; i++) { - event = cpuc->event_list[i]; hwc = &event->hw; - if (i < n_running && - match_prev_assignment(hwc, cpuc, i)) - continue; - - if (hwc->idx == -1) + if (!match_prev_assignment(hwc, cpuc, i)) x86_assign_hw_event(event, cpuc, i); + else if (i < n_running) + continue; x86_pmu_start(event); } -- cgit v0.10.2 From 639fe4b12f92b54c9c3b38c82cdafaa38cfd3e63 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Thu, 11 Mar 2010 15:30:35 +0800 Subject: perf: export perf_trace_regs and perf_arch_fetch_caller_regs Export perf_trace_regs and perf_arch_fetch_caller_regs since module will use these. Signed-off-by: Xiao Guangrong [ use EXPORT_PER_CPU_SYMBOL_GPL() ] Signed-off-by: Peter Zijlstra LKML-Reference: <4B989C1B.2090407@cn.fujitsu.com> Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 5fb490c..7645fae 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1713,3 +1713,4 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski regs->cs = __KERNEL_CS; local_save_flags(regs->flags); } +EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index f315b12..0709e4f 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -10,6 +10,7 @@ #include "trace.h" DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); +EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs); static char *perf_trace_buf; static char *perf_trace_buf_nmi; -- cgit v0.10.2 From 9f591fd76afdc0e5192e9ed00a36f8efc0b4dfe6 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 11 Mar 2010 15:53:11 -0300 Subject: perf record: Don't try to find buildids in a zero sized file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixing this symptom: [acme@mica linux-2.6-tip]$ perf record -a -f Fatal: Permission error - are you root? Bus error [acme@mica linux-2.6-tip]$ I.e. if for some reason no data is collected, in this case a non root user trying to do systemwide profiling, no data will be collected, and then we end up trying to mmap a zero sized file and access the file header, b00m. Reported-by: Ingo Molnar Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras Cc: LKML-Reference: <1268333592-30872-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index b09d3b2..3b8b638 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -395,6 +395,9 @@ static int process_buildids(void) { u64 size = lseek(output, 0, SEEK_CUR); + if (size == 0) + return 0; + session->fd = output; return __perf_session__process_events(session, post_processing_offset, size - post_processing_offset, -- cgit v0.10.2 From dcd5c1662db59a6b82942f47fb6ac9dd63f6d3dd Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 16 Mar 2010 01:05:02 +0100 Subject: perf: Fix unexported generic perf_arch_fetch_caller_regs perf_arch_fetch_caller_regs() is exported for the overriden x86 version, but not for the generic weak version. As a general rule, weak functions should not have their symbol exported in the same file they are defined. So let's export it on trace_event_perf.c as it is used by trace events only. This fixes: ERROR: ".perf_arch_fetch_caller_regs" [fs/xfs/xfs.ko] undefined! ERROR: ".perf_arch_fetch_caller_regs" [arch/powerpc/platforms/cell/spufs/spufs.ko] undefined! -v2: And also only build it if trace events are enabled. -v3: Fix changelog mistake Reported-by: Stephen Rothwell Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Xiao Guangrong Cc: Paul Mackerras LKML-Reference: <1268697902-9518-1-git-send-regression-fweisbec@gmail.com> Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 7645fae..60398a0 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1702,6 +1702,7 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) return entry; } +#ifdef CONFIG_EVENT_TRACING void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) { regs->ip = ip; @@ -1713,4 +1714,4 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski regs->cs = __KERNEL_CS; local_save_flags(regs->flags); } -EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); +#endif diff --git a/kernel/perf_event.c b/kernel/perf_event.c index fb3031c..574ee58 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2786,10 +2786,12 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) return NULL; } +#ifdef CONFIG_EVENT_TRACING __weak void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) { } +#endif /* * Output diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 0709e4f..7d79a10 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -12,6 +12,8 @@ DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs); +EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); + static char *perf_trace_buf; static char *perf_trace_buf_nmi; -- cgit v0.10.2