diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-06-22 14:35:24 (GMT) |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-25 19:39:06 (GMT) |
commit | 194002b274e9169a04beb1b23dcc132159bb566c (patch) | |
tree | 6977e48df4a1429dae999d4afe23e5e28febc99e | |
parent | 7f8b4e4e0988dadfd22330fd147ad2453e19f510 (diff) | |
download | linux-194002b274e9169a04beb1b23dcc132159bb566c.tar.xz |
perf_counter, x86: Add mmap counter read support
Update the mmap control page with the needed information to
use the userspace RDPMC instruction for self monitoring.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/powerpc/include/asm/perf_counter.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/perf_counter.h | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 6 | ||||
-rw-r--r-- | kernel/perf_counter.c | 10 |
4 files changed, 20 insertions, 1 deletions
diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index 8ccd4e1..0ea0639 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h @@ -61,6 +61,8 @@ struct pt_regs; extern unsigned long perf_misc_flags(struct pt_regs *regs); extern unsigned long perf_instruction_pointer(struct pt_regs *regs); +#define PERF_COUNTER_INDEX_OFFSET 1 + /* * Only override the default definitions in include/linux/perf_counter.h * if we have hardware PMU support. diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h index 5fb33e1..fa64e40 100644 --- a/arch/x86/include/asm/perf_counter.h +++ b/arch/x86/include/asm/perf_counter.h @@ -87,6 +87,9 @@ union cpuid10_edx { #ifdef CONFIG_PERF_COUNTERS extern void init_hw_perf_counters(void); extern void perf_counters_lapic_init(void); + +#define PERF_COUNTER_INDEX_OFFSET 0 + #else static inline void init_hw_perf_counters(void) { } static inline void perf_counters_lapic_init(void) { } diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index a310d19..b83474b 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -912,6 +912,8 @@ x86_perf_counter_set_period(struct perf_counter *counter, err = checking_wrmsrl(hwc->counter_base + idx, (u64)(-left) & x86_pmu.counter_mask); + perf_counter_update_userpage(counter); + return ret; } @@ -1034,6 +1036,8 @@ try_generic: x86_perf_counter_set_period(counter, hwc, idx); x86_pmu.enable(hwc, idx); + perf_counter_update_userpage(counter); + return 0; } @@ -1126,6 +1130,8 @@ static void x86_pmu_disable(struct perf_counter *counter) x86_perf_counter_update(counter, hwc, idx); cpuc->counters[idx] = NULL; clear_bit(idx, cpuc->used_mask); + + perf_counter_update_userpage(counter); } /* diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 23614ad..02994a7 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1753,6 +1753,14 @@ int perf_counter_task_disable(void) return 0; } +static int perf_counter_index(struct perf_counter *counter) +{ + if (counter->state != PERF_COUNTER_STATE_ACTIVE) + return 0; + + return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET; +} + /* * Callers need to ensure there can be no nesting of this function, otherwise * the seqlock logic goes bad. We can not serialize this because the arch @@ -1777,7 +1785,7 @@ void perf_counter_update_userpage(struct perf_counter *counter) preempt_disable(); ++userpg->lock; barrier(); - userpg->index = counter->hw.idx; + userpg->index = perf_counter_index(counter); userpg->offset = atomic64_read(&counter->count); if (counter->state == PERF_COUNTER_STATE_ACTIVE) userpg->offset -= atomic64_read(&counter->hw.prev_count); |