summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-05-15 15:43:35 (GMT)
committerIngo Molnar <mingo@kernel.org>2015-08-12 10:06:09 (GMT)
commitc5b2803840817115e9b568d5054e5007ae36176b (patch)
treecf95c50a52bf6e99c9c7eaff63534b535d7ba0c2 /kernel/sched
parent25834c73f93af7f0712c98ca4593691592e6b360 (diff)
downloadlinux-c5b2803840817115e9b568d5054e5007ae36176b.tar.xz
sched: Make sched_class::set_cpus_allowed() unconditional
Give every class a set_cpus_allowed() method, this enables some small optimization in the RT,DL implementation by avoiding a double cpumask_weight() call. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: dedekind1@gmail.com Cc: juri.lelli@arm.com Cc: mgorman@suse.de Cc: riel@redhat.com Cc: rostedt@goodmis.org Link: http://lkml.kernel.org/r/20150515154833.614517487@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c17
-rw-r--r--kernel/sched/deadline.c20
-rw-r--r--kernel/sched/fair.c1
-rw-r--r--kernel/sched/idle_task.c1
-rw-r--r--kernel/sched/rt.c12
-rw-r--r--kernel/sched/sched.h2
-rw-r--r--kernel/sched/stop_task.c1
7 files changed, 36 insertions, 18 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2e3b983..740f90b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1151,17 +1151,22 @@ static int migration_cpu_stop(void *data)
return 0;
}
-void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+/*
+ * sched_class::set_cpus_allowed must do the below, but is not required to
+ * actually call this function.
+ */
+void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
{
- lockdep_assert_held(&p->pi_lock);
-
- if (p->sched_class->set_cpus_allowed)
- p->sched_class->set_cpus_allowed(p, new_mask);
-
cpumask_copy(&p->cpus_allowed, new_mask);
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{
+ lockdep_assert_held(&p->pi_lock);
+ p->sched_class->set_cpus_allowed(p, new_mask);
+}
+
/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 20772ee..dc357fa 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1696,13 +1696,6 @@ static void set_cpus_allowed_dl(struct task_struct *p,
raw_spin_unlock(&src_dl_b->lock);
}
- /*
- * Update only if the task is actually running (i.e.,
- * it is on the rq AND it is not throttled).
- */
- if (!on_dl_rq(&p->dl))
- return;
-
weight = cpumask_weight(new_mask);
/*
@@ -1710,7 +1703,14 @@ static void set_cpus_allowed_dl(struct task_struct *p,
* can migrate or not.
*/
if ((p->nr_cpus_allowed > 1) == (weight > 1))
- return;
+ goto done;
+
+ /*
+ * Update only if the task is actually running (i.e.,
+ * it is on the rq AND it is not throttled).
+ */
+ if (!on_dl_rq(&p->dl))
+ goto done;
/*
* The process used to be able to migrate OR it can now migrate
@@ -1727,6 +1727,10 @@ static void set_cpus_allowed_dl(struct task_struct *p,
}
update_dl_migration(&rq->dl);
+
+done:
+ cpumask_copy(&p->cpus_allowed, new_mask);
+ p->nr_cpus_allowed = weight;
}
/* Assumes rq->lock is held */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f0950fd..6e2e348 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8252,6 +8252,7 @@ const struct sched_class fair_sched_class = {
.task_waking = task_waking_fair,
.task_dead = task_dead_fair,
+ .set_cpus_allowed = set_cpus_allowed_common,
#endif
.set_curr_task = set_curr_task_fair,
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index c65dac8..c4ae0f1 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -96,6 +96,7 @@ const struct sched_class idle_sched_class = {
#ifdef CONFIG_SMP
.select_task_rq = select_task_rq_idle,
+ .set_cpus_allowed = set_cpus_allowed_common,
#endif
.set_curr_task = set_curr_task_idle,
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 00816ee..63692ef 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2084,9 +2084,6 @@ static void set_cpus_allowed_rt(struct task_struct *p,
BUG_ON(!rt_task(p));
- if (!task_on_rq_queued(p))
- return;
-
weight = cpumask_weight(new_mask);
/*
@@ -2094,7 +2091,10 @@ static void set_cpus_allowed_rt(struct task_struct *p,
* can migrate or not.
*/
if ((p->nr_cpus_allowed > 1) == (weight > 1))
- return;
+ goto done;
+
+ if (!task_on_rq_queued(p))
+ goto done;
rq = task_rq(p);
@@ -2113,6 +2113,10 @@ static void set_cpus_allowed_rt(struct task_struct *p,
}
update_rt_migration(&rq->rt);
+
+done:
+ cpumask_copy(&p->cpus_allowed, new_mask);
+ p->nr_cpus_allowed = weight;
}
/* Assumes rq->lock is held */
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 22ccc55..68cda11 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1255,6 +1255,8 @@ extern void trigger_load_balance(struct rq *rq);
extern void idle_enter_fair(struct rq *this_rq);
extern void idle_exit_fair(struct rq *this_rq);
+extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
+
#else
static inline void idle_enter_fair(struct rq *rq) { }
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index 79ffec4..cbc67da 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -123,6 +123,7 @@ const struct sched_class stop_sched_class = {
#ifdef CONFIG_SMP
.select_task_rq = select_task_rq_stop,
+ .set_cpus_allowed = set_cpus_allowed_common,
#endif
.set_curr_task = set_curr_task_stop,