summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-09-27 12:40:25 (GMT)
committerScott Wood <scottwood@freescale.com>2015-02-13 22:20:38 (GMT)
commit5548232be0d297a23436de5de936855316bb02bb (patch)
tree9039dafb6c069342d79571cd68c43682f3b443df /kernel/sched
parent5b7a2b9962cbbcf841cd11533c2e5010087bc02c (diff)
downloadlinux-fsl-qoriq-5548232be0d297a23436de5de936855316bb02bb.tar.xz
sched: Have migrate_disable ignore bounded threads
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Clark Williams <williams@redhat.com> Link: http://lkml.kernel.org/r/20110927124423.567944215@goodmis.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c23
1 files changed, 9 insertions, 14 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2752836..8123f8a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2404,7 +2404,7 @@ void migrate_disable(void)
{
struct task_struct *p = current;
- if (in_atomic()) {
+ if (in_atomic() || p->flags & PF_NO_SETAFFINITY) {
#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic++;
#endif
@@ -2434,7 +2434,7 @@ void migrate_enable(void)
unsigned long flags;
struct rq *rq;
- if (in_atomic()) {
+ if (in_atomic() || p->flags & PF_NO_SETAFFINITY) {
#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic--;
#endif
@@ -2454,26 +2454,21 @@ void migrate_enable(void)
preempt_disable();
if (unlikely(migrate_disabled_updated(p))) {
/*
- * See comment in update_migrate_disable() about locking.
+ * Undo whatever update_migrate_disable() did, also see there
+ * about locking.
*/
rq = this_rq();
raw_spin_lock_irqsave(&rq->lock, flags);
- mask = tsk_cpus_allowed(p);
+
/*
* Clearing migrate_disable causes tsk_cpus_allowed to
* show the tasks original cpu affinity.
*/
p->migrate_disable = 0;
-
- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
-
- if (unlikely(!cpumask_equal(&p->cpus_allowed, mask))) {
- /* Get the mask now that migration is enabled */
- mask = tsk_cpus_allowed(p);
- if (p->sched_class->set_cpus_allowed)
- p->sched_class->set_cpus_allowed(p, mask);
- p->nr_cpus_allowed = cpumask_weight(mask);
- }
+ mask = tsk_cpus_allowed(p);
+ if (p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, mask);
+ p->nr_cpus_allowed = cpumask_weight(mask);
raw_spin_unlock_irqrestore(&rq->lock, flags);
} else
p->migrate_disable = 0;