From 6c0aca288e726405b01dacb12cac556454d34b2a Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 11 Nov 2010 21:18:43 +0100 Subject: x86: Ignore trap bits on single step exceptions When a single step exception fires, the trap bits, used to signal hardware breakpoints, are in a random state. These trap bits might be set if another exception will follow, like a breakpoint in the next instruction, or a watchpoint in the previous one. Or there can be any junk there. So if we handle these trap bits during the single step exception, we are going to handle an exception twice, or we are going to handle junk. Just ignore them in this case. This fixes https://bugzilla.kernel.org/show_bug.cgi?id=21332 Reported-by: Michael Stefaniuc Signed-off-by: Frederic Weisbecker Cc: Rafael J. Wysocki Cc: Maciej Rutecki Cc: Alexandre Julliard Cc: Jason Wessel Cc: All since 2.6.33.x diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index ff15c9d..42c5942 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -433,6 +433,10 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args) dr6_p = (unsigned long *)ERR_PTR(args->err); dr6 = *dr6_p; + /* If it's a single step, TRAP bits are random */ + if (dr6 & DR_STEP) + return NOTIFY_DONE; + /* Do an early return if no trap bits are set in DR6 */ if ((dr6 & DR_TRAP_BITS) == 0) return NOTIFY_DONE; -- cgit v0.10.2 From 3c502e7a0255d82621ff25d60cc816624830497e Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Thu, 4 Nov 2010 17:33:01 -0500 Subject: perf,hw_breakpoint: Initialize hardware api earlier When using early debugging, the kernel does not initialize the hw_breakpoint API early enough and causes the late initialization of the kernel debugger to fail. The boot arguments are: earlyprintk=vga ekgdboc=kbd kgdbwait Then simply type "go" at the kdb prompt and boot. The kernel will later emit the message: kgdb: Could not allocate hwbreakpoints And at that point the kernel debugger will cease to work correctly. The solution is to initialize the hw_breakpoint at the same time that all the other perf call backs are initialized instead of using a core_initcall() initialization which happens well after the kernel debugger can make use of hardware breakpoints. Signed-off-by: Jason Wessel CC: Frederic Weisbecker CC: Ingo Molnar CC: Peter Zijlstra LKML-Reference: <4CD3396D.1090308@windriver.com> Signed-off-by: Frederic Weisbecker diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index a2d6ea4..d1e55fe 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -33,6 +33,8 @@ enum bp_type_idx { #ifdef CONFIG_HAVE_HW_BREAKPOINT +extern int __init init_hw_breakpoint(void); + static inline void hw_breakpoint_init(struct perf_event_attr *attr) { memset(attr, 0, sizeof(*attr)); @@ -108,6 +110,8 @@ static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp) #else /* !CONFIG_HAVE_HW_BREAKPOINT */ +static inline int __init init_hw_breakpoint(void) { return 0; } + static inline struct perf_event * register_user_hw_breakpoint(struct perf_event_attr *attr, perf_overflow_handler_t triggered, diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 2c9120f..e532582 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -620,7 +620,7 @@ static struct pmu perf_breakpoint = { .read = hw_breakpoint_pmu_read, }; -static int __init init_hw_breakpoint(void) +int __init init_hw_breakpoint(void) { unsigned int **task_bp_pinned; int cpu, err_cpu; @@ -655,6 +655,5 @@ static int __init init_hw_breakpoint(void) return -ENOMEM; } -core_initcall(init_hw_breakpoint); diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 517d827..05b7d8c 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -31,6 +31,7 @@ #include #include #include +#include #include @@ -6295,6 +6296,8 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) void __init perf_event_init(void) { + int ret; + perf_event_init_all_cpus(); init_srcu_struct(&pmus_srcu); perf_pmu_register(&perf_swevent); @@ -6302,4 +6305,7 @@ void __init perf_event_init(void) perf_pmu_register(&perf_task_clock); perf_tp_register(); perf_cpu_notifier(perf_cpu_notify); + + ret = init_hw_breakpoint(); + WARN(ret, "hw_breakpoint initialization failed with: %d", ret); } -- cgit v0.10.2 From 0e2af2a9abf94b408ff70679b692a8644fed4aab Mon Sep 17 00:00:00 2001 From: Rakib Mullick Date: Fri, 12 Nov 2010 09:50:54 -0500 Subject: x86, hw_nmi: Move backtrace_mask declaration under ARCH_HAS_NMI_WATCHDOG MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit backtrace_mask has been used under the code context of ARCH_HAS_NMI_WATCHDOG. So put it into that context. We were warned by the following warning: arch/x86/kernel/apic/hw_nmi.c:21: warning: ‘backtrace_mask’ defined but not used Signed-off-by: Rakib Mullick Signed-off-by: Don Zickus LKML-Reference: <1289573455-3410-2-git-send-email-dzickus@redhat.com> Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index cefd694..62f6e1e 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c @@ -17,15 +17,16 @@ #include #include -/* For reliability, we're prepared to waste bits here. */ -static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; - u64 hw_nmi_get_sample_period(void) { return (u64)(cpu_khz) * 1000 * 60; } #ifdef ARCH_HAS_NMI_WATCHDOG + +/* For reliability, we're prepared to waste bits here. */ +static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; + void arch_trigger_all_cpu_backtrace(void) { int i; -- cgit v0.10.2 From 8882135bcd332f294df5455747ea43ba9e6f77ad Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 9 Nov 2010 19:01:43 +0100 Subject: perf: Fix owner-list vs exit Oleg noticed that a perf-fd keeping a reference on the creating task leads to a few funny side effects. There's two different aspects to this: - kernel based perf-events, these should not take out a reference on the creating task and appear on the task's event list since they're not bound to fds nor visible to userspace. - fork() and pthread_create(), these can lead to the creating task dying (and thus the task's event-list becomming useless) but keeping the list and ref alive until the event is closed. Combined they lead to malfunction of the ptrace hw_tracepoints. Cure this by not considering kernel based perf_events for the owner-list and destroying the owner-list when the owner dies. Reported-by: Oleg Nesterov Signed-off-by: Peter Zijlstra Acked-by: Oleg Nesterov LKML-Reference: <1289576883.2084.286.camel@laptop> Signed-off-by: Ingo Molnar diff --git a/kernel/perf_event.c b/kernel/perf_event.c index f818d9d..671f6c8 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2235,11 +2235,6 @@ int perf_event_release_kernel(struct perf_event *event) raw_spin_unlock_irq(&ctx->lock); mutex_unlock(&ctx->mutex); - mutex_lock(&event->owner->perf_event_mutex); - list_del_init(&event->owner_entry); - mutex_unlock(&event->owner->perf_event_mutex); - put_task_struct(event->owner); - free_event(event); return 0; @@ -2252,9 +2247,43 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel); static int perf_release(struct inode *inode, struct file *file) { struct perf_event *event = file->private_data; + struct task_struct *owner; file->private_data = NULL; + rcu_read_lock(); + owner = ACCESS_ONCE(event->owner); + /* + * Matches the smp_wmb() in perf_event_exit_task(). If we observe + * !owner it means the list deletion is complete and we can indeed + * free this event, otherwise we need to serialize on + * owner->perf_event_mutex. + */ + smp_read_barrier_depends(); + if (owner) { + /* + * Since delayed_put_task_struct() also drops the last + * task reference we can safely take a new reference + * while holding the rcu_read_lock(). + */ + get_task_struct(owner); + } + rcu_read_unlock(); + + if (owner) { + mutex_lock(&owner->perf_event_mutex); + /* + * We have to re-check the event->owner field, if it is cleared + * we raced with perf_event_exit_task(), acquiring the mutex + * ensured they're done, and we can proceed with freeing the + * event. + */ + if (event->owner) + list_del_init(&event->owner_entry); + mutex_unlock(&owner->perf_event_mutex); + put_task_struct(owner); + } + return perf_event_release_kernel(event); } @@ -5678,7 +5707,7 @@ SYSCALL_DEFINE5(perf_event_open, mutex_unlock(&ctx->mutex); event->owner = current; - get_task_struct(current); + mutex_lock(¤t->perf_event_mutex); list_add_tail(&event->owner_entry, ¤t->perf_event_list); mutex_unlock(¤t->perf_event_mutex); @@ -5746,12 +5775,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, ++ctx->generation; mutex_unlock(&ctx->mutex); - event->owner = current; - get_task_struct(current); - mutex_lock(¤t->perf_event_mutex); - list_add_tail(&event->owner_entry, ¤t->perf_event_list); - mutex_unlock(¤t->perf_event_mutex); - return event; err_free: @@ -5902,8 +5925,24 @@ again: */ void perf_event_exit_task(struct task_struct *child) { + struct perf_event *event, *tmp; int ctxn; + mutex_lock(&child->perf_event_mutex); + list_for_each_entry_safe(event, tmp, &child->perf_event_list, + owner_entry) { + list_del_init(&event->owner_entry); + + /* + * Ensure the list deletion is visible before we clear + * the owner, closes a race against perf_release() where + * we need to serialize on the owner->perf_event_mutex. + */ + smp_wmb(); + event->owner = NULL; + } + mutex_unlock(&child->perf_event_mutex); + for_each_task_context_nr(ctxn) perf_event_exit_task_context(child, ctxn); } -- cgit v0.10.2 From 94e8ba728640dc01375a14e337f3b892bfacbeeb Mon Sep 17 00:00:00 2001 From: Sergio Aguirre Date: Tue, 16 Nov 2010 12:02:47 -0600 Subject: irq_work: Drop cmpxchg() result The compiler warned us about: kernel/irq_work.c: In function 'irq_work_run': kernel/irq_work.c:148: warning: value computed is not used Dropping the cmpxchg() result is indeed weird, but correct - so annotate away the warning. Signed-off-by: Sergio Aguirre Cc: Huang Ying Cc: Martin Schwidefsky Cc: Kyle McMartin Signed-off-by: Peter Zijlstra LKML-Reference: <1289930567-17828-1-git-send-email-saaguirre@ti.com> Signed-off-by: Ingo Molnar diff --git a/kernel/irq_work.c b/kernel/irq_work.c index f16763f..90f8819 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -145,7 +145,9 @@ void irq_work_run(void) * Clear the BUSY bit and return to the free state if * no-one else claimed it meanwhile. */ - cmpxchg(&entry->next, next_flags(NULL, IRQ_WORK_BUSY), NULL); + (void)cmpxchg(&entry->next, + next_flags(NULL, IRQ_WORK_BUSY), + NULL); } } EXPORT_SYMBOL_GPL(irq_work_run); -- cgit v0.10.2 From de31ec8a31046111befd16a7083e3bdda2ff42cf Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 18 Nov 2010 19:16:55 +0900 Subject: x86/kprobes: Prevent kprobes to probe on save_args() Prevent kprobes to probe on save_args() since this function will be called from breakpoint exception handler. That will cause infinit loop on breakpoint handling. Signed-off-by: Masami Hiramatsu Cc: 2nddept-manager@sdl.hitachi.co.jp Cc: Ananth N Mavinakayanahalli LKML-Reference: <20101118101655.2779.2816.stgit@ltc236.sdl.hitachi.co.jp> Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index fe2690d..e3ba417 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -295,6 +295,7 @@ ENDPROC(native_usergs_sysret64) .endm /* save partial stack frame */ + .pushsection .kprobes.text, "ax" ENTRY(save_args) XCPT_FRAME cld @@ -334,6 +335,7 @@ ENTRY(save_args) ret CFI_ENDPROC END(save_args) + .popsection ENTRY(save_rest) PARTIAL_FRAME 1 REST_SKIP+8 -- cgit v0.10.2 From c1a3a4b90a5a47adcca0e587f5d7e9ea61329b26 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 22 Nov 2010 14:01:55 -0200 Subject: perf record: Handle restrictive permissions in /proc/{kallsyms,modules} The 59365d1 commit, even being reverted by 33e0d57, showed a non robust behavior in 'perf record': it really should just warn the user that some functionality will not be available. The new behavior then becomes: [acme@felicio linux]$ ls -la /proc/{kallsyms,modules} -r-------- 1 root root 0 Nov 22 12:19 /proc/kallsyms -r-------- 1 root root 0 Nov 22 12:19 /proc/modules [acme@felicio linux]$ perf record ls -R > /dev/null Couldn't record kernel reference relocation symbol Symbol resolution may be skewed if relocation was used (e.g. kexec). Check /proc/kallsyms permission or run as root. [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.004 MB perf.data (~161 samples) ] [acme@felicio linux]$ perf report --stdio [kernel.kallsyms] with build id 77b05e00e64e4de1c9347d83879779b540d69f00 not found, continuing without symbols # Events: 98 cycles # # Overhead Command Shared Object Symbol # ........ ....... ............... .................... # 48.26% ls [kernel] [k] ffffffff8102b92b 22.49% ls libc-2.12.90.so [.] __strlen_sse2 8.35% ls libc-2.12.90.so [.] __GI___strcoll_l 8.17% ls ls [.] 11580 3.35% ls libc-2.12.90.so [.] _IO_new_file_xsputn 3.33% ls libc-2.12.90.so [.] _int_malloc 1.88% ls libc-2.12.90.so [.] _int_free 0.84% ls libc-2.12.90.so [.] malloc_consolidate 0.84% ls libc-2.12.90.so [.] __readdir64 0.83% ls ls [.] strlen@plt 0.83% ls libc-2.12.90.so [.] __GI_fwrite_unlocked 0.83% ls libc-2.12.90.so [.] __memcpy_sse2 # # (For a higher level overview, try: perf report --sort comm,dso) # [acme@felicio linux]$ It still has the build-ids for DSOs in the maps with hits: [acme@felicio linux]$ perf buildid-list 77b05e00e64e4de1c9347d83879779b540d69f00 [kernel.kallsyms] 09c4a431a4a8b648fcfc2c2bdda70f56050ddff1 /bin/ls af75ea9ad951d25e0f038901a11b3846dccb29a4 /lib64/libc-2.12.90.so [acme@felicio linux]$ That can be used in another machine to resolve kernel symbols. Cc: Eugene Teo Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Jesper Juhl Cc: Marcus Meissner Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Sarah Sharp Cc: Stephane Eranian Cc: Tejun Heo Cc: Tom Zanussi LKML-Reference: Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 93bd2ff..e2c2de2 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -697,17 +697,18 @@ static int __cmd_record(int argc, const char **argv) if (err < 0) err = event__synthesize_kernel_mmap(process_synthesized_event, session, machine, "_stext"); - if (err < 0) { - pr_err("Couldn't record kernel reference relocation symbol.\n"); - return err; - } + if (err < 0) + pr_err("Couldn't record kernel reference relocation symbol\n" + "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" + "Check /proc/kallsyms permission or run as root.\n"); err = event__synthesize_modules(process_synthesized_event, session, machine); - if (err < 0) { - pr_err("Couldn't record kernel reference relocation symbol.\n"); - return err; - } + if (err < 0) + pr_err("Couldn't record kernel module information.\n" + "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" + "Check /proc/modules permission or run as root.\n"); + if (perf_guest) perf_session__process_machines(session, event__synthesize_guest_os); -- cgit v0.10.2 From 02a9d03772aa1ff33a26180a2da0bfb191240eda Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 23 Nov 2010 22:08:18 +0530 Subject: perf symbols: Remove incorrect open-coded container_of() At least on ARM, padding is inserted between rb_node and sym in struct symbol_name_rb_node, causing "((void *)sym) - sizeof(struct rb_node)" to point inside rb_node rather than to the symbol_name_rb_node. Fix this by converting the code to use container_of(). Cc: Ian Munsie Cc: Ingo Molnar Cc: Ming Lei Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tom Zanussi LKML-Reference: <20101123163106.GA25677@debian> Signed-off-by: Rabin Vincent Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index b39f499..0500895 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -295,7 +295,9 @@ static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym) { struct rb_node **p = &self->rb_node; struct rb_node *parent = NULL; - struct symbol_name_rb_node *symn = ((void *)sym) - sizeof(*parent), *s; + struct symbol_name_rb_node *symn, *s; + + symn = container_of(sym, struct symbol_name_rb_node, sym); while (*p != NULL) { parent = *p; -- cgit v0.10.2 From dddd3379a619a4cb8247bfd3c94ca9ae3797aa2e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 24 Nov 2010 10:05:55 +0100 Subject: perf: Fix inherit vs. context rotation bug It was found that sometimes children of tasks with inherited events had one extra event. Eventually it turned out to be due to the list rotation no being exclusive with the list iteration in the inheritance code. Cure this by temporarily disabling the rotation while we inherit the events. Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra LKML-Reference: Cc: Signed-off-by: Ingo Molnar diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 40150f3..142e3d604 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -850,6 +850,7 @@ struct perf_event_context { int nr_active; int is_active; int nr_stat; + int rotate_disable; atomic_t refcount; struct task_struct *task; diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 671f6c8..f365dd8 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1622,8 +1622,12 @@ static void rotate_ctx(struct perf_event_context *ctx) { raw_spin_lock(&ctx->lock); - /* Rotate the first entry last of non-pinned groups */ - list_rotate_left(&ctx->flexible_groups); + /* + * Rotate the first entry last of non-pinned groups. Rotation might be + * disabled by the inheritance code. + */ + if (!ctx->rotate_disable) + list_rotate_left(&ctx->flexible_groups); raw_spin_unlock(&ctx->lock); } @@ -6162,6 +6166,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn) struct perf_event *event; struct task_struct *parent = current; int inherited_all = 1; + unsigned long flags; int ret = 0; child->perf_event_ctxp[ctxn] = NULL; @@ -6202,6 +6207,15 @@ int perf_event_init_context(struct task_struct *child, int ctxn) break; } + /* + * We can't hold ctx->lock when iterating the ->flexible_group list due + * to allocations, but we need to prevent rotation because + * rotate_ctx() will change the list from interrupt context. + */ + raw_spin_lock_irqsave(&parent_ctx->lock, flags); + parent_ctx->rotate_disable = 1; + raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); + list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { ret = inherit_task_group(event, parent, parent_ctx, child, ctxn, &inherited_all); @@ -6209,6 +6223,10 @@ int perf_event_init_context(struct task_struct *child, int ctxn) break; } + raw_spin_lock_irqsave(&parent_ctx->lock, flags); + parent_ctx->rotate_disable = 0; + raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); + child_ctx = child->perf_event_ctxp[ctxn]; if (child_ctx && inherited_all) { -- cgit v0.10.2 From 33c6d6a7ad0ffab9b1b15f8e4107a2af072a05a0 Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Mon, 22 Nov 2010 16:55:23 -0500 Subject: x86, perf, nmi: Disable perf if counters are not accessible In a kvm virt guests, the perf counters are not emulated. Instead they return zero on a rdmsrl. The perf nmi handler uses the fact that crossing a zero means the counter overflowed (for those counters that do not have specific interrupt bits). Therefore on kvm guests, perf will swallow all NMIs thinking the counters overflowed. This causes problems for subsystems like kgdb which needs NMIs to do its magic. This problem was discovered by running kgdb tests. The solution is to write garbage into a perf counter during the initialization and hopefully reading back the same number. On kvm guests, the value will be read back as zero and we disable perf as a result. Reported-by: Jason Wessel Patch-inspired-by: Peter Zijlstra Signed-off-by: Don Zickus Signed-off-by: Peter Zijlstra Cc: Stephane Eranian LKML-Reference: <1290462923-30734-1-git-send-email-dzickus@redhat.com> Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index ed63101..6d75b91 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -381,6 +381,20 @@ static void release_pmc_hardware(void) {} #endif +static bool check_hw_exists(void) +{ + u64 val, val_new = 0; + int ret = 0; + + val = 0xabcdUL; + ret |= checking_wrmsrl(x86_pmu.perfctr, val); + ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new); + if (ret || val != val_new) + return false; + + return true; +} + static void reserve_ds_buffers(void); static void release_ds_buffers(void); @@ -1372,6 +1386,12 @@ void __init init_hw_perf_events(void) pmu_check_apic(); + /* sanity check that the hardware exists or is emulated */ + if (!check_hw_exists()) { + pr_cont("Broken PMU hardware detected, software events only.\n"); + return; + } + pr_cont("%s PMU driver.\n", x86_pmu.name); if (x86_pmu.quirks) -- cgit v0.10.2 From cc2067a51424dd25c10c1b1230b4222d8baec94d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 16 Nov 2010 21:49:01 +0100 Subject: perf, x86: Fixup Kconfig deps This leads to a Kconfig dep inversion, x86 selects PERF_EVENT (due to a hw_breakpoint dep) but doesn't unconditionally provide HAVE_PERF_EVENT. (This can cause build failures on M386/M486 kernel .config's.) Signed-off-by: Peter Zijlstra LKML-Reference: <20101117222055.982965150@chello.nl> Signed-off-by: Ingo Molnar diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e832768..e330da2 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -21,7 +21,7 @@ config X86 select HAVE_UNSTABLE_SCHED_CLOCK select HAVE_IDE select HAVE_OPROFILE - select HAVE_PERF_EVENTS if (!M386 && !M486) + select HAVE_PERF_EVENTS select HAVE_IRQ_WORK select HAVE_IOREMAP_PROT select HAVE_KPROBES -- cgit v0.10.2 From ee6dcfa40a50fe12a3ae0fb4d2653c66c3ed6556 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 26 Nov 2010 13:49:04 +0100 Subject: perf: Fix the software context switch counter Stephane noticed that because the perf_sw_event() call is inside the perf_event_task_sched_out() call it won't get called unless we have a per-task counter. Reported-by: Stephane Eranian Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 142e3d604..de2c417 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -909,20 +909,6 @@ extern int perf_num_counters(void); extern const char *perf_pmu_name(void); extern void __perf_event_task_sched_in(struct task_struct *task); extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); - -extern atomic_t perf_task_events; - -static inline void perf_event_task_sched_in(struct task_struct *task) -{ - COND_STMT(&perf_task_events, __perf_event_task_sched_in(task)); -} - -static inline -void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) -{ - COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next)); -} - extern int perf_event_init_task(struct task_struct *child); extern void perf_event_exit_task(struct task_struct *child); extern void perf_event_free_task(struct task_struct *task); @@ -1031,6 +1017,21 @@ have_event: __perf_sw_event(event_id, nr, nmi, regs, addr); } +extern atomic_t perf_task_events; + +static inline void perf_event_task_sched_in(struct task_struct *task) +{ + COND_STMT(&perf_task_events, __perf_event_task_sched_in(task)); +} + +static inline +void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) +{ + perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); + + COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next)); +} + extern void perf_event_mmap(struct vm_area_struct *vma); extern struct perf_guest_info_callbacks *perf_guest_cbs; extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); diff --git a/kernel/perf_event.c b/kernel/perf_event.c index f365dd8..eac7e33 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1287,8 +1287,6 @@ void __perf_event_task_sched_out(struct task_struct *task, { int ctxn; - perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); - for_each_task_context_nr(ctxn) perf_event_context_sched_out(task, ctxn, next); } -- cgit v0.10.2