summaryrefslogtreecommitdiff
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2016-04-13 13:56:50 (GMT)
committerIngo Molnar <mingo@kernel.org>2016-04-23 12:20:41 (GMT)
commitcee1afce3053e7aa0793fbd5f2e845fa2cef9e33 (patch)
tree6d85a5c5b124255c5babb024a3e83200813e0580 /kernel/sched/fair.c
parenta2c6c91f98247fef0fe75216d607812485aeb0df (diff)
downloadlinux-cee1afce3053e7aa0793fbd5f2e845fa2cef9e33.tar.xz
sched/fair: Gather CPU load functions under a more conventional namespace
The CPU load update related functions have a weak naming convention currently, starting with update_cpu_load_*() which isn't ideal as "update" is a very generic concept. Since two of these functions are public already (and a third is to come) that's enough to introduce a more conventional naming scheme. So let's do the following rename instead: update_cpu_load_*() -> cpu_load_update_*() Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Byungchul Park <byungchul.park@lge.com> Cc: Chris Metcalf <cmetcalf@ezchip.com> Cc: Christoph Lameter <cl@linux.com> Cc: Luiz Capitulino <lcapitulino@redhat.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul E . McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1460555812-25375-2-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c328bd7..ecd81c4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4559,7 +4559,7 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
}
/**
- * __update_cpu_load - update the rq->cpu_load[] statistics
+ * __cpu_load_update - update the rq->cpu_load[] statistics
* @this_rq: The rq to update statistics for
* @this_load: The current load
* @pending_updates: The number of missed updates
@@ -4594,7 +4594,7 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
* see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra
* term. See the @active paramter.
*/
-static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
+static void __cpu_load_update(struct rq *this_rq, unsigned long this_load,
unsigned long pending_updates, int active)
{
unsigned long tickless_load = active ? this_rq->cpu_load[0] : 0;
@@ -4642,7 +4642,7 @@ static unsigned long weighted_cpuload(const int cpu)
}
#ifdef CONFIG_NO_HZ_COMMON
-static void __update_cpu_load_nohz(struct rq *this_rq,
+static void __cpu_load_update_nohz(struct rq *this_rq,
unsigned long curr_jiffies,
unsigned long load,
int active)
@@ -4657,7 +4657,7 @@ static void __update_cpu_load_nohz(struct rq *this_rq,
* In the NOHZ_FULL case, we were non-idle, we should consider
* its weighted load.
*/
- __update_cpu_load(this_rq, load, pending_updates, active);
+ __cpu_load_update(this_rq, load, pending_updates, active);
}
}
@@ -4678,7 +4678,7 @@ static void __update_cpu_load_nohz(struct rq *this_rq,
* Called from nohz_idle_balance() to update the load ratings before doing the
* idle balance.
*/
-static void update_cpu_load_idle(struct rq *this_rq)
+static void cpu_load_update_idle(struct rq *this_rq)
{
/*
* bail if there's load or we're actually up-to-date.
@@ -4686,13 +4686,13 @@ static void update_cpu_load_idle(struct rq *this_rq)
if (weighted_cpuload(cpu_of(this_rq)))
return;
- __update_cpu_load_nohz(this_rq, READ_ONCE(jiffies), 0, 0);
+ __cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0, 0);
}
/*
* Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
*/
-void update_cpu_load_nohz(int active)
+void cpu_load_update_nohz(int active)
{
struct rq *this_rq = this_rq();
unsigned long curr_jiffies = READ_ONCE(jiffies);
@@ -4702,7 +4702,7 @@ void update_cpu_load_nohz(int active)
return;
raw_spin_lock(&this_rq->lock);
- __update_cpu_load_nohz(this_rq, curr_jiffies, load, active);
+ __cpu_load_update_nohz(this_rq, curr_jiffies, load, active);
raw_spin_unlock(&this_rq->lock);
}
#endif /* CONFIG_NO_HZ */
@@ -4710,14 +4710,14 @@ void update_cpu_load_nohz(int active)
/*
* Called from scheduler_tick()
*/
-void update_cpu_load_active(struct rq *this_rq)
+void cpu_load_update_active(struct rq *this_rq)
{
unsigned long load = weighted_cpuload(cpu_of(this_rq));
/*
- * See the mess around update_cpu_load_idle() / update_cpu_load_nohz().
+ * See the mess around cpu_load_update_idle() / cpu_load_update_nohz().
*/
this_rq->last_load_update_tick = jiffies;
- __update_cpu_load(this_rq, load, 1, 1);
+ __cpu_load_update(this_rq, load, 1, 1);
}
/*
@@ -8031,7 +8031,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
if (time_after_eq(jiffies, rq->next_balance)) {
raw_spin_lock_irq(&rq->lock);
update_rq_clock(rq);
- update_cpu_load_idle(rq);
+ cpu_load_update_idle(rq);
raw_spin_unlock_irq(&rq->lock);
rebalance_domains(rq, CPU_IDLE);
}