summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-08-11 13:03:35 (GMT)
committerScott Wood <scottwood@freescale.com>2015-02-13 22:20:37 (GMT)
commit9587cb84db747d8cbe73bf8861e843778a5a462b (patch)
tree961e9bd2e4523bd9fe2947c2ce7e16372e539fdb /kernel/sched
parent4e2a82b515395625e60cf108455ea4f473e3df9c (diff)
downloadlinux-fsl-qoriq-9587cb84db747d8cbe73bf8861e843778a5a462b.tar.xz
sched: Optimize migrate_disable
Change from task_rq_lock() to raw_spin_lock(&rq->lock) to avoid a few atomic ops. See comment on why it should be safe. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/n/tip-cbz6hkl5r5mvwtx5s3tor2y6@git.kernel.org
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c24
1 files changed, 20 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 489aa4f..40523f9 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4435,7 +4435,19 @@ void migrate_disable(void)
preempt_enable();
return;
}
- rq = task_rq_lock(p, &flags);
+
+ /*
+ * Since this is always current we can get away with only locking
+ * rq->lock, the ->cpus_allowed value can normally only be changed
+ * while holding both p->pi_lock and rq->lock, but seeing that this
+ * it current, we cannot actually be waking up, so all code that
+ * relies on serialization against p->pi_lock is out of scope.
+ *
+ * Taking rq->lock serializes us against things like
+ * set_cpus_allowed_ptr() that can still happen concurrently.
+ */
+ rq = this_rq();
+ raw_spin_lock_irqsave(&rq->lock, flags);
p->migrate_disable = 1;
mask = tsk_cpus_allowed(p);
@@ -4446,7 +4458,7 @@ void migrate_disable(void)
p->sched_class->set_cpus_allowed(p, mask);
p->nr_cpus_allowed = cpumask_weight(mask);
}
- task_rq_unlock(rq, p, &flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
preempt_enable();
}
EXPORT_SYMBOL(migrate_disable);
@@ -4474,7 +4486,11 @@ void migrate_enable(void)
return;
}
- rq = task_rq_lock(p, &flags);
+ /*
+ * See comment in migrate_disable().
+ */
+ rq = this_rq();
+ raw_spin_lock_irqsave(&rq->lock, flags);
p->migrate_disable = 0;
mask = tsk_cpus_allowed(p);
@@ -4486,7 +4502,7 @@ void migrate_enable(void)
p->nr_cpus_allowed = cpumask_weight(mask);
}
- task_rq_unlock(rq, p, &flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
unpin_current_cpu();
preempt_enable();
}