summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c20
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c2
2 files changed, 7 insertions, 15 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 9757b96..b68c4fb 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -983,14 +983,8 @@ static int x86_pmu_start(struct perf_event *event)
static void x86_pmu_unthrottle(struct perf_event *event)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct hw_perf_event *hwc = &event->hw;
-
- if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
- cpuc->events[hwc->idx] != event))
- return;
-
- x86_pmu.enable(event);
+ int ret = x86_pmu_start(event);
+ WARN_ON_ONCE(ret);
}
void perf_event_print_debug(void)
@@ -1050,11 +1044,9 @@ static void x86_pmu_stop(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
- /*
- * Must be done before we disable, otherwise the nmi handler
- * could reenable again:
- */
- __clear_bit(idx, cpuc->active_mask);
+ if (!__test_and_clear_bit(idx, cpuc->active_mask))
+ return;
+
x86_pmu.disable(event);
/*
@@ -1123,7 +1115,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
continue;
if (perf_event_overflow(event, 1, &data, regs))
- x86_pmu.disable(event);
+ x86_pmu_stop(event);
}
if (handled)
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index d87421c..84bfde6 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -774,7 +774,7 @@ again:
data.period = event->hw.last_period;
if (perf_event_overflow(event, 1, &data, regs))
- intel_pmu_disable_event(event);
+ x86_pmu_stop(event);
}
intel_pmu_ack_status(ack);