summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-07-03 18:16:38 (GMT)
committerScott Wood <scottwood@freescale.com>2014-04-10 00:19:20 (GMT)
commit0be6e65c2f57d62663e4df7a516686aa421f5f89 (patch)
tree21677e109101d7beb90573dfc397e3dbb20c6217
parent086eb6f6bfee117803e858278dec495424bcefc1 (diff)
downloadlinux-fsl-qoriq-0be6e65c2f57d62663e4df7a516686aa421f5f89.tar.xz
softirq: Sanitize softirq pending for NOHZ/RT
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--kernel/softirq.c63
-rw-r--r--kernel/time/tick-sched.c9
3 files changed, 66 insertions, 8 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 9fadfa2..674d90e 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -387,6 +387,8 @@ extern void __raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
+extern void softirq_check_pending_idle(void);
+
/* This is the worklist that queues up per-cpu softirq work.
*
* send_remote_sendirq() adds work to these lists, and
diff --git a/kernel/softirq.c b/kernel/softirq.c
index c647956..a1c156a 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -62,6 +62,69 @@ char *softirq_to_name[NR_SOFTIRQS] = {
"TASKLET", "SCHED", "HRTIMER", "RCU"
};
+#ifdef CONFIG_NO_HZ_COMMON
+# ifdef CONFIG_PREEMPT_RT_FULL
+/*
+ * On preempt-rt a softirq might be blocked on a lock. There might be
+ * no other runnable task on this CPU because the lock owner runs on
+ * some other CPU. So we have to go into idle with the pending bit
+ * set. Therefor we need to check this otherwise we warn about false
+ * positives which confuses users and defeats the whole purpose of
+ * this test.
+ *
+ * This code is called with interrupts disabled.
+ */
+void softirq_check_pending_idle(void)
+{
+ static int rate_limit;
+ u32 warnpending = 0, pending;
+
+ if (rate_limit >= 10)
+ return;
+
+ pending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK;
+ if (pending) {
+ struct task_struct *tsk;
+
+ tsk = __get_cpu_var(ksoftirqd);
+ /*
+ * The wakeup code in rtmutex.c wakes up the task
+ * _before_ it sets pi_blocked_on to NULL under
+ * tsk->pi_lock. So we need to check for both: state
+ * and pi_blocked_on.
+ */
+ raw_spin_lock(&tsk->pi_lock);
+
+ if (!tsk->pi_blocked_on && !(tsk->state == TASK_RUNNING))
+ warnpending = 1;
+
+ raw_spin_unlock(&tsk->pi_lock);
+ }
+
+ if (warnpending) {
+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
+ pending);
+ rate_limit++;
+ }
+}
+# else
+/*
+ * On !PREEMPT_RT we just printk rate limited:
+ */
+void softirq_check_pending_idle(void)
+{
+ static int rate_limit;
+
+ if (rate_limit < 10 &&
+ (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
+ local_softirq_pending());
+ rate_limit++;
+ }
+}
+# endif
+#endif
+
/*
* we cannot loop indefinitely here to avoid userspace starvation,
* but we also don't want to introduce a worst case 1/HZ latency
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 6c77b2d..1f4299b 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -724,14 +724,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
return false;
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
- static int ratelimit;
-
- if (ratelimit < 10 &&
- (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
- pr_warn("NOHZ: local_softirq_pending %02x\n",
- (unsigned int) local_softirq_pending());
- ratelimit++;
- }
+ softirq_check_pending_idle();
return false;
}