summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-05-21 08:57:32 (GMT)
committerIngo Molnar <mingo@kernel.org>2015-05-27 07:17:44 (GMT)
commit0c41e756b9c5a9899b5cd238226600f8f34c9b82 (patch)
tree49f4bfb7a30d3337c95fd1a6127eb9549ebe70e1 /arch
parent17186ccda374ae02ef231cbbc8f1825e7c19ddbd (diff)
downloadlinux-0c41e756b9c5a9899b5cd238226600f8f34c9b82.tar.xz
perf/x86/intel: Clean up intel_commit_scheduling() placement
Move the code of intel_commit_scheduling() to the right place, which is in between start() and stop(). No change in functionality. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_event.h4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c60
2 files changed, 32 insertions, 32 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index ef78516..e560952 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -527,10 +527,10 @@ struct x86_pmu {
void (*put_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event);
- void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
-
void (*start_scheduling)(struct cpu_hw_events *cpuc);
+ void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
+
void (*stop_scheduling)(struct cpu_hw_events *cpuc);
struct event_constraint *event_constraints;
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index d7d30b4..ff56fc3 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1934,6 +1934,34 @@ intel_start_scheduling(struct cpu_hw_events *cpuc)
memcpy(xl->init_state, xl->state, sizeof(xl->init_state));
}
+static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
+{
+ struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
+ struct event_constraint *c = cpuc->event_constraint[idx];
+ struct intel_excl_states *xl;
+ int tid = cpuc->excl_thread_id;
+
+ if (cpuc->is_fake || !is_ht_workaround_enabled())
+ return;
+
+ if (WARN_ON_ONCE(!excl_cntrs))
+ return;
+
+ if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
+ return;
+
+ xl = &excl_cntrs->states[tid];
+
+ lockdep_assert_held(&excl_cntrs->lock);
+
+ if (cntr >= 0) {
+ if (c->flags & PERF_X86_EVENT_EXCL)
+ xl->init_state[cntr] = INTEL_EXCL_EXCLUSIVE;
+ else
+ xl->init_state[cntr] = INTEL_EXCL_SHARED;
+ }
+}
+
static void
intel_stop_scheduling(struct cpu_hw_events *cpuc)
{
@@ -2184,34 +2212,6 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
intel_put_excl_constraints(cpuc, event);
}
-static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
-{
- struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
- struct event_constraint *c = cpuc->event_constraint[idx];
- struct intel_excl_states *xl;
- int tid = cpuc->excl_thread_id;
-
- if (cpuc->is_fake || !is_ht_workaround_enabled())
- return;
-
- if (WARN_ON_ONCE(!excl_cntrs))
- return;
-
- if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
- return;
-
- xl = &excl_cntrs->states[tid];
-
- lockdep_assert_held(&excl_cntrs->lock);
-
- if (cntr >= 0) {
- if (c->flags & PERF_X86_EVENT_EXCL)
- xl->init_state[cntr] = INTEL_EXCL_EXCLUSIVE;
- else
- xl->init_state[cntr] = INTEL_EXCL_SHARED;
- }
-}
-
static void intel_pebs_aliases_core2(struct perf_event *event)
{
if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
@@ -2920,8 +2920,8 @@ static __init void intel_ht_bug(void)
{
x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
- x86_pmu.commit_scheduling = intel_commit_scheduling;
x86_pmu.start_scheduling = intel_start_scheduling;
+ x86_pmu.commit_scheduling = intel_commit_scheduling;
x86_pmu.stop_scheduling = intel_stop_scheduling;
}
@@ -3377,8 +3377,8 @@ static __init int fixup_ht_bug(void)
x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
- x86_pmu.commit_scheduling = NULL;
x86_pmu.start_scheduling = NULL;
+ x86_pmu.commit_scheduling = NULL;
x86_pmu.stop_scheduling = NULL;
watchdog_nmi_enable_all();