summaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-09-15 13:32:06 (GMT)
committerIngo Molnar <mingo@elte.hu>2011-10-04 10:44:07 (GMT)
commit908a3283728d92df36e0c7cd63304fd35e93a8a9 (patch)
tree2511eb8eee1ebc534f34abd640e0fddb4d4fe5a3 /kernel/sched.c
parentf0f1d32f931b705c4ee5dd374074d34edf3eae14 (diff)
downloadlinux-fsl-qoriq-908a3283728d92df36e0c7cd63304fd35e93a8a9.tar.xz
sched: Fix idle_cpu()
On -rt we observed hackbench waking all 400 tasks to a single cpu. This is because of select_idle_sibling()'s interaction with the new ipi based wakeup scheme. The existing idle_cpu() test only checks to see if the current task on that cpu is the idle task, it does not take already queued tasks into account, nor does it take queued to be woken tasks into account. If the remote wakeup IPIs come hard enough, there won't be time to schedule away from the idle task, and would thus keep thinking the cpu was in fact idle, regardless of the fact that there were already several hundred tasks runnable. We couldn't reproduce on mainline, but there's no reason it couldn't happen. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/n/tip-3o30p18b2paswpc9ohy2gltp@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c15
1 files changed, 14 insertions, 1 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 1874c74..4cdc91c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5138,7 +5138,20 @@ EXPORT_SYMBOL(task_nice);
*/
int idle_cpu(int cpu)
{
- return cpu_curr(cpu) == cpu_rq(cpu)->idle;
+ struct rq *rq = cpu_rq(cpu);
+
+ if (rq->curr != rq->idle)
+ return 0;
+
+ if (rq->nr_running)
+ return 0;
+
+#ifdef CONFIG_SMP
+ if (!llist_empty(&rq->wake_list))
+ return 0;
+#endif
+
+ return 1;
}
/**