summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-08-11 13:14:58 (GMT)
committerScott Wood <scottwood@freescale.com>2014-04-10 00:19:17 (GMT)
commit2481a38554d43a00b23ecce4dd3175653b845e41 (patch)
tree5aa7c426ba030f745d2a99c409313a17f964bd7c
parent78211b80f38536b8e0e2dea5dddb665969fe4c0e (diff)
downloadlinux-fsl-qoriq-2481a38554d43a00b23ecce4dd3175653b845e41.tar.xz
sched: Generic migrate_disable
Make migrate_disable() be a preempt_disable() for !rt kernels. This allows generic code to use it but still enforces that these code sections stay relatively small. A preemptible migrate_disable() accessible for general use would allow people growing arbitrary per-cpu crap instead of clean these things up. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org
-rw-r--r--include/linux/preempt.h21
-rw-r--r--include/linux/sched.h13
-rw-r--r--include/linux/smp.h9
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--lib/smp_processor_id.c2
6 files changed, 30 insertions, 23 deletions
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index f906def..116ac32 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -130,28 +130,25 @@ do { \
#endif /* CONFIG_PREEMPT_COUNT */
-#ifdef CONFIG_SMP
-extern void migrate_disable(void);
-extern void migrate_enable(void);
-#else
-# define migrate_disable() barrier()
-# define migrate_enable() barrier()
-#endif
-
#ifdef CONFIG_PREEMPT_RT_FULL
# define preempt_disable_rt() preempt_disable()
# define preempt_enable_rt() preempt_enable()
# define preempt_disable_nort() barrier()
# define preempt_enable_nort() barrier()
-# define migrate_disable_rt() migrate_disable()
-# define migrate_enable_rt() migrate_enable()
+# ifdef CONFIG_SMP
+ extern void migrate_disable(void);
+ extern void migrate_enable(void);
+# else /* CONFIG_SMP */
+# define migrate_disable() barrier()
+# define migrate_enable() barrier()
+# endif /* CONFIG_SMP */
#else
# define preempt_disable_rt() barrier()
# define preempt_enable_rt() barrier()
# define preempt_disable_nort() preempt_disable()
# define preempt_enable_nort() preempt_enable()
-# define migrate_disable_rt() barrier()
-# define migrate_enable_rt() barrier()
+# define migrate_disable() preempt_disable()
+# define migrate_enable() preempt_enable()
#endif
#ifdef CONFIG_PREEMPT_NOTIFIERS
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9861c9b..9ef7bee 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1063,7 +1063,9 @@ struct task_struct {
#endif
unsigned int policy;
+#ifdef CONFIG_PREEMPT_RT_FULL
int migrate_disable;
+#endif
int nr_cpus_allowed;
cpumask_t cpus_allowed;
@@ -2732,11 +2734,22 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
+static inline int __migrate_disabled(struct task_struct *p)
+{
+#ifdef CONFIG_PREEMPT_RT_FULL
+ return p->migrate_disable;
+#else
+ return 0;
+#endif
+}
+
/* Future-safe accessor for struct task_struct's cpus_allowed. */
static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
{
+#ifdef CONFIG_PREEMPT_RT_FULL
if (p->migrate_disable)
return cpumask_of(task_cpu(p));
+#endif
return &p->cpus_allowed;
}
diff --git a/include/linux/smp.h b/include/linux/smp.h
index f30c7b1..e05b694 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -188,13 +188,8 @@ static inline void __smp_call_function_single(int cpuid,
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
#define put_cpu() preempt_enable()
-#ifndef CONFIG_PREEMPT_RT_FULL
-# define get_cpu_light() get_cpu()
-# define put_cpu_light() put_cpu()
-#else
-# define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
-# define put_cpu_light() migrate_enable()
-#endif
+#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
+#define put_cpu_light() migrate_enable()
/*
* Callback to arch code if there's nosmp or maxcpus=0 on the
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8cdf838..17dc85f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4345,7 +4345,7 @@ void init_idle(struct task_struct *idle, int cpu)
#ifdef CONFIG_SMP
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
- if (!p->migrate_disable) {
+ if (!__migrate_disabled(p)) {
if (p->sched_class && p->sched_class->set_cpus_allowed)
p->sched_class->set_cpus_allowed(p, new_mask);
p->nr_cpus_allowed = cpumask_weight(new_mask);
@@ -4396,7 +4396,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
- if (cpumask_test_cpu(task_cpu(p), new_mask) || p->migrate_disable)
+ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
@@ -4415,6 +4415,7 @@ out:
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+#ifdef CONFIG_PREEMPT_RT_FULL
void migrate_disable(void)
{
struct task_struct *p = current;
@@ -4507,6 +4508,7 @@ void migrate_enable(void)
preempt_enable();
}
EXPORT_SYMBOL(migrate_enable);
+#endif /* CONFIG_PREEMPT_RT_FULL */
/*
* Move (not current) task off this cpu, onto dest cpu. We're doing
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 4caf463..402fcc6 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1520,7 +1520,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
- entry->migrate_disable = (tsk) ? tsk->migrate_disable & 0xFF : 0;
+ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 0a846e7..dbb1570 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -41,7 +41,7 @@ notrace unsigned int debug_smp_processor_id(void)
printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] "
"code: %s/%d\n", preempt_count() - 1,
- current->migrate_disable, current->comm, current->pid);
+ __migrate_disabled(current), current->comm, current->pid);
print_symbol("caller is %s\n", (long)__builtin_return_address(0));
dump_stack();