diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-07-03 13:29:34 (GMT) |
---|---|---|
committer | Scott Wood <scottwood@freescale.com> | 2014-05-14 18:38:08 (GMT) |
commit | a81e62ef0b3852e26665d916d60c7452d80ca10a (patch) | |
tree | 645d6acccd9bccdfadd819d9ae7f427079ae7554 /kernel/timer.c | |
parent | c39360ac3b59843d79d4a76a317b9581c6895757 (diff) | |
download | linux-fsl-qoriq-a81e62ef0b3852e26665d916d60c7452d80ca10a.tar.xz |
timers: prepare for full preemption
When softirqs can be preempted we need to make sure that cancelling
the timer from the active thread can not deadlock vs. a running timer
callback. Add a waitqueue to resolve that.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/timer.c')
-rw-r--r-- | kernel/timer.c | 36 |
1 files changed, 33 insertions, 3 deletions
diff --git a/kernel/timer.c b/kernel/timer.c index 4296d13..cd92037 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -78,6 +78,7 @@ struct tvec_root { struct tvec_base { spinlock_t lock; struct timer_list *running_timer; + wait_queue_head_t wait_for_running_timer; unsigned long timer_jiffies; unsigned long next_timer; unsigned long active_timers; @@ -739,12 +740,15 @@ __mod_timer(struct timer_list *timer, unsigned long expires, debug_activate(timer, expires); + preempt_disable_rt(); cpu = smp_processor_id(); #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) cpu = get_nohz_timer_target(); #endif + preempt_enable_rt(); + new_base = per_cpu(tvec_bases, cpu); if (base != new_base) { @@ -945,6 +949,29 @@ void add_timer_on(struct timer_list *timer, int cpu) } EXPORT_SYMBOL_GPL(add_timer_on); +#ifdef CONFIG_PREEMPT_RT_FULL +/* + * Wait for a running timer + */ +static void wait_for_running_timer(struct timer_list *timer) +{ + struct tvec_base *base = timer->base; + + if (base->running_timer == timer) + wait_event(base->wait_for_running_timer, + base->running_timer != timer); +} + +# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_tunning_timer) +#else +static inline void wait_for_running_timer(struct timer_list *timer) +{ + cpu_relax(); +} + +# define wakeup_timer_waiters(b) do { } while (0) +#endif + /** * del_timer - deactive a timer. * @timer: the timer to be deactivated @@ -1002,7 +1029,7 @@ int try_to_del_timer_sync(struct timer_list *timer) } EXPORT_SYMBOL(try_to_del_timer_sync); -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) /** * del_timer_sync - deactivate a timer and wait for the handler to finish. * @timer: the timer to be deactivated @@ -1062,7 +1089,7 @@ int del_timer_sync(struct timer_list *timer) int ret = try_to_del_timer_sync(timer); if (ret >= 0) return ret; - cpu_relax(); + wait_for_running_timer(timer); } } EXPORT_SYMBOL(del_timer_sync); @@ -1179,15 +1206,17 @@ static inline void __run_timers(struct tvec_base *base) if (irqsafe) { spin_unlock(&base->lock); call_timer_fn(timer, fn, data); + base->running_timer = NULL; spin_lock(&base->lock); } else { spin_unlock_irq(&base->lock); call_timer_fn(timer, fn, data); + base->running_timer = NULL; spin_lock_irq(&base->lock); } } } - base->running_timer = NULL; + wake_up(&base->wait_for_running_timer); spin_unlock_irq(&base->lock); } @@ -1547,6 +1576,7 @@ static int init_timers_cpu(int cpu) base = per_cpu(tvec_bases, cpu); } + init_waitqueue_head(&base->wait_for_running_timer); for (j = 0; j < TVN_SIZE; j++) { INIT_LIST_HEAD(base->tv5.vec + j); |