diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-03-02 20:16:55 (GMT) |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-03-10 12:22:29 (GMT) |
commit | 34538ee77b39a12702e0f4c3ed9e8fa2dd5eb92c (patch) | |
tree | af887af1600921641c92d1ae637274bb11728b28 /arch/x86/kernel/cpu/perf_event.c | |
parent | aff3d91a913c9ae0c2f56b65b27cbd00c7d27ee3 (diff) | |
download | linux-fsl-qoriq-34538ee77b39a12702e0f4c3ed9e8fa2dd5eb92c.tar.xz |
perf, x86: Use unlocked bitops
There is no concurrency on these variables, so don't use LOCK'ed ops.
As to the intel_pmu_handle_irq() status bit clean, nobody uses that so
remove it all together.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
LKML-Reference: <20100304140100.240023029@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 2dd704f..01b1667 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -643,7 +643,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) if (test_bit(hwc->idx, used_mask)) break; - set_bit(hwc->idx, used_mask); + __set_bit(hwc->idx, used_mask); if (assign) assign[i] = hwc->idx; } @@ -692,7 +692,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) if (j == X86_PMC_IDX_MAX) break; - set_bit(j, used_mask); + __set_bit(j, used_mask); if (assign) assign[i] = j; @@ -842,7 +842,7 @@ void hw_perf_enable(void) * clear active_mask and events[] yet it preserves * idx */ - set_bit(hwc->idx, cpuc->active_mask); + __set_bit(hwc->idx, cpuc->active_mask); cpuc->events[hwc->idx] = event; x86_pmu.enable(event); @@ -1057,7 +1057,7 @@ static void x86_pmu_stop(struct perf_event *event) * Must be done before we disable, otherwise the nmi handler * could reenable again: */ - clear_bit(idx, cpuc->active_mask); + __clear_bit(idx, cpuc->active_mask); x86_pmu.disable(event); /* |