summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-31 20:35:31 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-31 20:35:31 (GMT)
commitf22e08a79f3765fecf060b225a46931c94fb0a92 (patch)
tree2f02777e8893d03289ec0a7f5f414f3ff01c8c37 /kernel
parentf187e9fd68577cdd5f914659b6f7f11124e40485 (diff)
parente3831edd59edf57ca11fc289f08961b20baf5146 (diff)
downloadlinux-fsl-qoriq-f22e08a79f3765fecf060b225a46931c94fb0a92.tar.xz
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar. * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched: Fix incorrect usage of for_each_cpu_mask() in select_fallback_rq() sched: Fix __schedule_bug() output when called from an interrupt sched/arch: Introduce the finish_arch_post_lock_switch() scheduler callback
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c13
-rw-r--r--kernel/sched/sched.h3
2 files changed, 7 insertions, 9 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e3ed0ec..4603b9d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1270,7 +1270,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
int dest_cpu;
/* Look for allowed, online CPU in same node. */
- for_each_cpu_mask(dest_cpu, *nodemask) {
+ for_each_cpu(dest_cpu, nodemask) {
if (!cpu_online(dest_cpu))
continue;
if (!cpu_active(dest_cpu))
@@ -1281,7 +1281,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
for (;;) {
/* Any allowed, online CPU? */
- for_each_cpu_mask(dest_cpu, *tsk_cpus_allowed(p)) {
+ for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
if (!cpu_online(dest_cpu))
continue;
if (!cpu_active(dest_cpu))
@@ -1964,6 +1964,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
local_irq_enable();
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
finish_lock_switch(rq, prev);
+ finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
if (mm)
@@ -3101,8 +3102,6 @@ EXPORT_SYMBOL(sub_preempt_count);
*/
static noinline void __schedule_bug(struct task_struct *prev)
{
- struct pt_regs *regs = get_irq_regs();
-
if (oops_in_progress)
return;
@@ -3113,11 +3112,7 @@ static noinline void __schedule_bug(struct task_struct *prev)
print_modules();
if (irqs_disabled())
print_irqtrace_events(prev);
-
- if (regs)
- show_regs(regs);
- else
- dump_stack();
+ dump_stack();
}
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 42b1f30..fb3acba 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -681,6 +681,9 @@ static inline int task_running(struct rq *rq, struct task_struct *p)
#ifndef finish_arch_switch
# define finish_arch_switch(prev) do { } while (0)
#endif
+#ifndef finish_arch_post_lock_switch
+# define finish_arch_post_lock_switch() do { } while (0)
+#endif
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)