summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-06-16 11:26:08 (GMT)
committerScott Wood <scottwood@freescale.com>2014-05-14 18:38:16 (GMT)
commit0a30be4525a026dfe566c5ae15e2073a2855abe8 (patch)
tree5ee360ec1d924d415da893fe68611649ee0343e9
parented286d4298597128252df0bc6b97bdc121f9084f (diff)
downloadlinux-fsl-qoriq-0a30be4525a026dfe566c5ae15e2073a2855abe8.tar.xz
sched-migrate-disable.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/preempt.h8
-rw-r--r--include/linux/sched.h13
-rw-r--r--kernel/sched/core.c88
-rw-r--r--lib/smp_processor_id.c6
4 files changed, 104 insertions, 11 deletions
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index f237ebd..fbf934b 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -130,6 +130,14 @@ do { \
#endif /* CONFIG_PREEMPT_COUNT */
+#ifdef CONFIG_SMP
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+#else
+# define migrate_disable() barrier()
+# define migrate_enable() barrier()
+#endif
+
#ifdef CONFIG_PREEMPT_RT_FULL
# define preempt_disable_rt() preempt_disable()
# define preempt_enable_rt() preempt_enable()
diff --git a/include/linux/sched.h b/include/linux/sched.h
index bf1cf6d..9861c9b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1063,6 +1063,7 @@ struct task_struct {
#endif
unsigned int policy;
+ int migrate_disable;
int nr_cpus_allowed;
cpumask_t cpus_allowed;
@@ -1436,9 +1437,6 @@ struct task_struct {
#endif
};
-/* Future-safe accessor for struct task_struct's cpus_allowed. */
-#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
-
#ifdef CONFIG_NUMA_BALANCING
extern void task_numa_fault(int node, int pages, bool migrated);
extern void set_numabalancing_state(bool enabled);
@@ -2734,6 +2732,15 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
+/* Future-safe accessor for struct task_struct's cpus_allowed. */
+static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
+{
+ if (p->migrate_disable)
+ return cpumask_of(task_cpu(p));
+
+ return &p->cpus_allowed;
+}
+
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3d063e2..7ef7cd3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4345,11 +4345,12 @@ void init_idle(struct task_struct *idle, int cpu)
#ifdef CONFIG_SMP
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
- if (p->sched_class && p->sched_class->set_cpus_allowed)
- p->sched_class->set_cpus_allowed(p, new_mask);
-
+ if (!p->migrate_disable) {
+ if (p->sched_class && p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, new_mask);
+ p->nr_cpus_allowed = cpumask_weight(new_mask);
+ }
cpumask_copy(&p->cpus_allowed, new_mask);
- p->nr_cpus_allowed = cpumask_weight(new_mask);
}
/*
@@ -4395,7 +4396,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
- if (cpumask_test_cpu(task_cpu(p), new_mask))
+ if (cpumask_test_cpu(task_cpu(p), new_mask) || p->migrate_disable)
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
@@ -4414,6 +4415,83 @@ out:
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+void migrate_disable(void)
+{
+ struct task_struct *p = current;
+ const struct cpumask *mask;
+ unsigned long flags;
+ struct rq *rq;
+
+ preempt_disable();
+ if (p->migrate_disable) {
+ p->migrate_disable++;
+ preempt_enable();
+ return;
+ }
+
+ pin_current_cpu();
+ if (unlikely(!scheduler_running)) {
+ p->migrate_disable = 1;
+ preempt_enable();
+ return;
+ }
+ rq = task_rq_lock(p, &flags);
+ p->migrate_disable = 1;
+ mask = tsk_cpus_allowed(p);
+
+ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
+
+ if (!cpumask_equal(&p->cpus_allowed, mask)) {
+ if (p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, mask);
+ p->nr_cpus_allowed = cpumask_weight(mask);
+ }
+ task_rq_unlock(rq, p, &flags);
+ preempt_enable();
+}
+EXPORT_SYMBOL(migrate_disable);
+
+void migrate_enable(void)
+{
+ struct task_struct *p = current;
+ const struct cpumask *mask;
+ unsigned long flags;
+ struct rq *rq;
+
+ WARN_ON_ONCE(p->migrate_disable <= 0);
+
+ preempt_disable();
+ if (p->migrate_disable > 1) {
+ p->migrate_disable--;
+ preempt_enable();
+ return;
+ }
+
+ if (unlikely(!scheduler_running)) {
+ p->migrate_disable = 0;
+ unpin_current_cpu();
+ preempt_enable();
+ return;
+ }
+
+ rq = task_rq_lock(p, &flags);
+ p->migrate_disable = 0;
+ mask = tsk_cpus_allowed(p);
+
+ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
+
+ if (!cpumask_equal(&p->cpus_allowed, mask)) {
+ if (p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, mask);
+ p->nr_cpus_allowed = cpumask_weight(mask);
+ }
+
+ task_rq_unlock(rq, p, &flags);
+ unpin_current_cpu();
+ preempt_enable();
+}
+EXPORT_SYMBOL(migrate_enable);
+
/*
* Move (not current) task off this cpu, onto dest cpu. We're doing
* this because either it can't run here any more (set_cpus_allowed()
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 4c0d0e5..0a846e7 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -39,9 +39,9 @@ notrace unsigned int debug_smp_processor_id(void)
if (!printk_ratelimit())
goto out_enable;
- printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] "
- "code: %s/%d\n",
- preempt_count() - 1, current->comm, current->pid);
+ printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] "
+ "code: %s/%d\n", preempt_count() - 1,
+ current->migrate_disable, current->comm, current->pid);
print_symbol("caller is %s\n", (long)__builtin_return_address(0));
dump_stack();