From 1e05219e0ca80d8568dc39e3365646540ce75b65 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:30:27 -0500 Subject: stop_machine: convert stop_machine_run() to PREEMPT_RT Instead of playing with non-preemption, introduce explicit startup serialization. This is more robust and cleaner as well. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 2f194e9..61779f8 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -135,6 +135,7 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, /* static data for stop_cpus */ static DEFINE_MUTEX(stop_cpus_mutex); +static DEFINE_MUTEX(stopper_lock); static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work); static void queue_stop_cpus_work(const struct cpumask *cpumask, @@ -153,15 +154,14 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask, } /* - * Disable preemption while queueing to avoid getting - * preempted by a stopper which might wait for other stoppers - * to enter @fn which can lead to deadlock. + * Make sure that all work is queued on all cpus before we + * any of the cpus can execute it. */ - preempt_disable(); + mutex_lock(&stopper_lock); for_each_cpu(cpu, cpumask) cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &per_cpu(stop_cpus_work, cpu)); - preempt_enable(); + mutex_unlock(&stopper_lock); } static int __stop_cpus(const struct cpumask *cpumask, @@ -275,6 +275,16 @@ repeat: __set_current_state(TASK_RUNNING); + /* + * Wait until the stopper finished scheduling on all + * cpus + */ + mutex_lock(&stopper_lock); + /* + * Let other cpu threads continue as well + */ + mutex_unlock(&stopper_lock); + /* cpu stop callbacks are not allowed to sleep */ preempt_disable(); -- cgit v0.10.2