summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-06-16 11:26:08 (GMT)
committerScott Wood <scottwood@freescale.com>2015-02-13 22:20:35 (GMT)
commit2e61bae2d666e1eea7d367b5b0b7b932ee573c29 (patch)
treeaf875940ab205cec4179f94c9bd3728d58bc57e1 /kernel/sched
parentfda46064abe2b94faafa5db1d6d66d97f006ae85 (diff)
downloadlinux-fsl-qoriq-2e61bae2d666e1eea7d367b5b0b7b932ee573c29.tar.xz
sched-migrate-disable.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c88
1 files changed, 83 insertions, 5 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2028291..489aa4f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4345,11 +4345,12 @@ void init_idle(struct task_struct *idle, int cpu)
#ifdef CONFIG_SMP
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
- if (p->sched_class && p->sched_class->set_cpus_allowed)
- p->sched_class->set_cpus_allowed(p, new_mask);
-
+ if (!p->migrate_disable) {
+ if (p->sched_class && p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, new_mask);
+ p->nr_cpus_allowed = cpumask_weight(new_mask);
+ }
cpumask_copy(&p->cpus_allowed, new_mask);
- p->nr_cpus_allowed = cpumask_weight(new_mask);
}
/*
@@ -4395,7 +4396,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
- if (cpumask_test_cpu(task_cpu(p), new_mask))
+ if (cpumask_test_cpu(task_cpu(p), new_mask) || p->migrate_disable)
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
@@ -4414,6 +4415,83 @@ out:
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+void migrate_disable(void)
+{
+ struct task_struct *p = current;
+ const struct cpumask *mask;
+ unsigned long flags;
+ struct rq *rq;
+
+ preempt_disable();
+ if (p->migrate_disable) {
+ p->migrate_disable++;
+ preempt_enable();
+ return;
+ }
+
+ pin_current_cpu();
+ if (unlikely(!scheduler_running)) {
+ p->migrate_disable = 1;
+ preempt_enable();
+ return;
+ }
+ rq = task_rq_lock(p, &flags);
+ p->migrate_disable = 1;
+ mask = tsk_cpus_allowed(p);
+
+ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
+
+ if (!cpumask_equal(&p->cpus_allowed, mask)) {
+ if (p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, mask);
+ p->nr_cpus_allowed = cpumask_weight(mask);
+ }
+ task_rq_unlock(rq, p, &flags);
+ preempt_enable();
+}
+EXPORT_SYMBOL(migrate_disable);
+
+void migrate_enable(void)
+{
+ struct task_struct *p = current;
+ const struct cpumask *mask;
+ unsigned long flags;
+ struct rq *rq;
+
+ WARN_ON_ONCE(p->migrate_disable <= 0);
+
+ preempt_disable();
+ if (p->migrate_disable > 1) {
+ p->migrate_disable--;
+ preempt_enable();
+ return;
+ }
+
+ if (unlikely(!scheduler_running)) {
+ p->migrate_disable = 0;
+ unpin_current_cpu();
+ preempt_enable();
+ return;
+ }
+
+ rq = task_rq_lock(p, &flags);
+ p->migrate_disable = 0;
+ mask = tsk_cpus_allowed(p);
+
+ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
+
+ if (!cpumask_equal(&p->cpus_allowed, mask)) {
+ if (p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, mask);
+ p->nr_cpus_allowed = cpumask_weight(mask);
+ }
+
+ task_rq_unlock(rq, p, &flags);
+ unpin_current_cpu();
+ preempt_enable();
+}
+EXPORT_SYMBOL(migrate_enable);
+
/*
* Move (not current) task off this cpu, onto dest cpu. We're doing
* this because either it can't run here any more (set_cpus_allowed()