summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-03-10 11:54:14 (GMT)
committerThomas Gleixner <tglx@linutronix.de>2016-05-06 12:58:24 (GMT)
commitb2454caa8977ade27292a71f2def5e403e24b4d5 (patch)
treeaead0505616aa4402a99f4a85bf2dc6d001e2a4a /kernel/sched
parent40190a78f85fec29f0fdd21f6b4415712085711e (diff)
downloadlinux-b2454caa8977ade27292a71f2def5e403e24b4d5.tar.xz
sched/hotplug: Move sync_rcu to be with set_cpu_active(false)
The sync_rcu stuff is specificically for clearing bits in the active mask, such that everybody will observe the bit cleared and will not consider the cleared CPU for load-balancing etc. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160310120025.169219710@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 73bcd93..0a31078 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7112,6 +7112,20 @@ int sched_cpu_deactivate(unsigned int cpu)
int ret;
set_cpu_active(cpu, false);
+ /*
+ * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
+ * users of this state to go away such that all new such users will
+ * observe it.
+ *
+ * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
+ * not imply sync_sched(), so wait for both.
+ *
+ * Do sync before park smpboot threads to take care the rcu boost case.
+ */
+ if (IS_ENABLED(CONFIG_PREEMPT))
+ synchronize_rcu_mult(call_rcu, call_rcu_sched);
+ else
+ synchronize_rcu();
if (!sched_smp_initialized)
return 0;