summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-04-29 10:47:01 (GMT)
committerIngo Molnar <mingo@elte.hu>2009-04-29 12:51:02 (GMT)
commit4295ee62660b13ddb87d41539f49b239e6e7d56f (patch)
treebdd7c1b1c1b4c1c4b05d42d4837c6fbf8ad5e5f8 /arch
parent4138960a9251a265002b5cf07e671a49f8495381 (diff)
downloadlinux-fsl-qoriq-4295ee62660b13ddb87d41539f49b239e6e7d56f.tar.xz
perf_counter, x86: rework pmc_amd_save_disable_all() and pmc_amd_restore_all()
MSR reads and writes are expensive. This patch adds checks to avoid its usage where possible. [ Impact: micro-optimization on AMD CPUs ] Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1241002046-8832-5-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c24
1 files changed, 14 insertions, 10 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index d6d6529..75a0903 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -334,11 +334,13 @@ static u64 pmc_amd_save_disable_all(void)
for (idx = 0; idx < nr_counters_generic; idx++) {
u64 val;
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
- if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) {
- val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
- wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
- }
+ if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
+ continue;
+ val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
}
return enabled;
@@ -372,13 +374,15 @@ static void pmc_amd_restore_all(u64 ctrl)
return;
for (idx = 0; idx < nr_counters_generic; idx++) {
- if (test_bit(idx, cpuc->active_mask)) {
- u64 val;
+ u64 val;
- rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
- val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
- wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
- }
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+ rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
+ if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
+ continue;
+ val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
}
}