diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-10-05 18:45:18 (GMT) |
---|---|---|
committer | Scott Wood <scottwood@freescale.com> | 2014-05-14 18:38:30 (GMT) |
commit | 9e20ed4e1caedf12d0cafe3542ce95a937cd11da (patch) | |
tree | 624ab4f90f27b6b511c1e61d2239ce845d76966a /kernel | |
parent | 1bad122a859ae35a5b7cea747121be39d38d6dc8 (diff) | |
download | linux-fsl-qoriq-9e20ed4e1caedf12d0cafe3542ce95a937cd11da.tar.xz |
rcu: Make ksoftirqd do RCU quiescent states
Implementing RCU-bh in terms of RCU-preempt makes the system vulnerable
to network-based denial-of-service attacks. This patch therefore
makes __do_softirq() invoke rcu_bh_qs(), but only when __do_softirq()
is running in ksoftirqd context. A wrapper layer in interposed so that
other calls to __do_softirq() avoid invoking rcu_bh_qs(). The underlying
function __do_softirq_common() does the actual work.
The reason that rcu_bh_qs() is bad in these non-ksoftirqd contexts is
that there might be a local_bh_enable() inside an RCU-preempt read-side
critical section. This local_bh_enable() can invoke __do_softirq()
directly, so if __do_softirq() were to invoke rcu_bh_qs() (which just
calls rcu_preempt_qs() in the PREEMPT_RT_FULL case), there would be
an illegal RCU-preempt quiescent state in the middle of an RCU-preempt
read-side critical section. Therefore, quiescent states can only happen
in cases where __do_softirq() is invoked directly from ksoftirqd.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/20111005184518.GA21601@linux.vnet.ibm.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcutree.c | 9 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 8 | ||||
-rw-r--r-- | kernel/softirq.c | 20 |
3 files changed, 28 insertions, 9 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index d52e23b..8104cb2 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -199,7 +199,14 @@ void rcu_sched_qs(int cpu) rdp->passed_quiesce = 1; } -#ifndef CONFIG_PREEMPT_RT_FULL +#ifdef CONFIG_PREEMPT_RT_FULL +static void rcu_preempt_qs(int cpu); + +void rcu_bh_qs(int cpu) +{ + rcu_preempt_qs(cpu); +} +#else void rcu_bh_qs(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index d2da952..63e0520 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -1553,7 +1553,7 @@ static void rcu_prepare_kthreads(int cpu) #endif /* #else #ifdef CONFIG_RCU_BOOST */ -#if !defined(CONFIG_RCU_FAST_NO_HZ) +#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) /* * Check to see if any future RCU-related work will need to be done @@ -1569,6 +1569,9 @@ int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) *delta_jiffies = ULONG_MAX; return rcu_cpu_has_callbacks(cpu, NULL); } +#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */ + +#if !defined(CONFIG_RCU_FAST_NO_HZ) /* * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up @@ -1666,6 +1669,8 @@ static bool rcu_try_advance_all_cbs(void) return cbs_ready; } +#ifndef CONFIG_PREEMPT_RT_FULL + /* * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready * to invoke. If the CPU has callbacks, try to advance them. Tell the @@ -1704,6 +1709,7 @@ int rcu_needs_cpu(int cpu, unsigned long *dj) } return 0; } +#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */ /* * Prepare a CPU for idle from an RCU perspective. The first major task diff --git a/kernel/softirq.c b/kernel/softirq.c index 37482da..8692908 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -142,7 +142,7 @@ static void wakeup_softirqd(void) wake_up_process(tsk); } -static void handle_pending_softirqs(u32 pending, int cpu) +static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs) { struct softirq_action *h = softirq_vec; unsigned int prev_count = preempt_count(); @@ -165,7 +165,8 @@ static void handle_pending_softirqs(u32 pending, int cpu) prev_count, (unsigned int) preempt_count()); preempt_count() = prev_count; } - rcu_bh_qs(cpu); + if (need_rcu_bh_qs) + rcu_bh_qs(cpu); } local_irq_disable(); } @@ -367,7 +368,7 @@ restart: /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); - handle_pending_softirqs(pending, cpu); + handle_pending_softirqs(pending, cpu, 1); pending = local_softirq_pending(); if (pending) { @@ -419,7 +420,12 @@ static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { } static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock); static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner); -static void __do_softirq(void); +static void __do_softirq_common(int need_rcu_bh_qs); + +void __do_softirq(void) +{ + __do_softirq_common(0); +} void __init softirq_early_init(void) { @@ -490,7 +496,7 @@ EXPORT_SYMBOL(in_serving_softirq); * Called with bh and local interrupts disabled. For full RT cpu must * be pinned. */ -static void __do_softirq(void) +static void __do_softirq_common(int need_rcu_bh_qs) { u32 pending = local_softirq_pending(); int cpu = smp_processor_id(); @@ -504,7 +510,7 @@ static void __do_softirq(void) lockdep_softirq_enter(); - handle_pending_softirqs(pending, cpu); + handle_pending_softirqs(pending, cpu, need_rcu_bh_qs); pending = local_softirq_pending(); if (pending) @@ -543,7 +549,7 @@ static int __thread_do_softirq(int cpu) * schedule! */ if (local_softirq_pending()) - __do_softirq(); + __do_softirq_common(cpu >= 0); local_unlock(local_softirq_lock); unpin_current_cpu(); preempt_disable(); |