summaryrefslogtreecommitdiff
path: root/kernel/irq
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2015-02-13 22:12:06 (GMT)
committerScott Wood <scottwood@freescale.com>2015-02-13 22:19:22 (GMT)
commit6faa2909871d8937cb2f79a10e1b21ffe193fac1 (patch)
treef558a94f1553814cc122ab8d9e04c0ebad5262a5 /kernel/irq
parentfcb2fb84301c673ee15ca04e7a2fc965712d49a0 (diff)
downloadlinux-fsl-qoriq-6faa2909871d8937cb2f79a10e1b21ffe193fac1.tar.xz
Reset to 3.12.37
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/handle.c8
-rw-r--r--kernel/irq/internals.h8
-rw-r--r--kernel/irq/irqdesc.c52
-rw-r--r--kernel/irq/manage.c123
-rw-r--r--kernel/irq/proc.c22
-rw-r--r--kernel/irq/settings.h12
-rw-r--r--kernel/irq/spurious.c114
7 files changed, 197 insertions, 142 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 7f50c55..131ca17 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -132,8 +132,6 @@ static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
irqreturn_t
handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
{
- struct pt_regs *regs = get_irq_regs();
- u64 ip = regs ? instruction_pointer(regs) : 0;
irqreturn_t retval = IRQ_NONE;
unsigned int flags = 0, irq = desc->irq_data.irq;
@@ -174,11 +172,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
action = action->next;
} while (action);
-#ifndef CONFIG_PREEMPT_RT_FULL
- add_interrupt_randomness(irq, flags, ip);
-#else
- desc->random_ip = ip;
-#endif
+ add_interrupt_randomness(irq, flags);
if (!noirqdebug)
note_interrupt(irq, desc, retval);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 001fa5b..8a160e8 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -74,6 +74,14 @@ extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu);
extern void mask_irq(struct irq_desc *desc);
extern void unmask_irq(struct irq_desc *desc);
+#ifdef CONFIG_SPARSE_IRQ
+extern void irq_lock_sparse(void);
+extern void irq_unlock_sparse(void);
+#else
+static inline void irq_lock_sparse(void) { }
+static inline void irq_unlock_sparse(void) { }
+#endif
+
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action);
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 8ab8e93..07d4551 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -131,6 +131,16 @@ static void free_masks(struct irq_desc *desc)
static inline void free_masks(struct irq_desc *desc) { }
#endif
+void irq_lock_sparse(void)
+{
+ mutex_lock(&sparse_irq_lock);
+}
+
+void irq_unlock_sparse(void)
+{
+ mutex_unlock(&sparse_irq_lock);
+}
+
static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
{
struct irq_desc *desc;
@@ -167,6 +177,12 @@ static void free_desc(unsigned int irq)
unregister_irq_proc(irq, desc);
+ /*
+ * sparse_irq_lock protects also show_interrupts() and
+ * kstat_irq_usr(). Once we deleted the descriptor from the
+ * sparse tree we can free it. Access in proc will fail to
+ * lookup the descriptor.
+ */
mutex_lock(&sparse_irq_lock);
delete_irq_desc(irq);
mutex_unlock(&sparse_irq_lock);
@@ -489,6 +505,15 @@ void dynamic_irq_cleanup(unsigned int irq)
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
+/**
+ * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
+ * @irq: The interrupt number
+ * @cpu: The cpu number
+ *
+ * Returns the sum of interrupt counts on @cpu since boot for
+ * @irq. The caller must ensure that the interrupt is not removed
+ * concurrently.
+ */
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
{
struct irq_desc *desc = irq_to_desc(irq);
@@ -497,6 +522,14 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
}
+/**
+ * kstat_irqs - Get the statistics for an interrupt
+ * @irq: The interrupt number
+ *
+ * Returns the sum of interrupt counts on all cpus since boot for
+ * @irq. The caller must ensure that the interrupt is not removed
+ * concurrently.
+ */
unsigned int kstat_irqs(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
@@ -509,3 +542,22 @@ unsigned int kstat_irqs(unsigned int irq)
sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
return sum;
}
+
+/**
+ * kstat_irqs_usr - Get the statistics for an interrupt
+ * @irq: The interrupt number
+ *
+ * Returns the sum of interrupt counts on all cpus since boot for
+ * @irq. Contrary to kstat_irqs() this can be called from any
+ * preemptible context. It's protected against concurrent removal of
+ * an interrupt descriptor when sparse irqs are enabled.
+ */
+unsigned int kstat_irqs_usr(unsigned int irq)
+{
+ int sum;
+
+ irq_lock_sparse();
+ sum = kstat_irqs(irq);
+ irq_unlock_sparse();
+ return sum;
+}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 252bf10..75a976a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -22,7 +22,6 @@
#include "internals.h"
#ifdef CONFIG_IRQ_FORCED_THREADING
-# ifndef CONFIG_PREEMPT_RT_BASE
__read_mostly bool force_irqthreads;
static int __init setup_forced_irqthreads(char *arg)
@@ -31,7 +30,6 @@ static int __init setup_forced_irqthreads(char *arg)
return 0;
}
early_param("threadirqs", setup_forced_irqthreads);
-# endif
#endif
/**
@@ -152,7 +150,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
struct irq_chip *chip = irq_data_get_irq_chip(data);
int ret;
- ret = chip->irq_set_affinity(data, mask, false);
+ ret = chip->irq_set_affinity(data, mask, force);
switch (ret) {
case IRQ_SET_MASK_OK:
cpumask_copy(data->affinity, mask);
@@ -164,63 +162,8 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
return ret;
}
-#ifdef CONFIG_PREEMPT_RT_FULL
-static void _irq_affinity_notify(struct irq_affinity_notify *notify);
-static struct task_struct *set_affinity_helper;
-static LIST_HEAD(affinity_list);
-static DEFINE_RAW_SPINLOCK(affinity_list_lock);
-
-static int set_affinity_thread(void *unused)
-{
- while (1) {
- struct irq_affinity_notify *notify;
- int empty;
-
- set_current_state(TASK_INTERRUPTIBLE);
-
- raw_spin_lock_irq(&affinity_list_lock);
- empty = list_empty(&affinity_list);
- raw_spin_unlock_irq(&affinity_list_lock);
-
- if (empty)
- schedule();
- if (kthread_should_stop())
- break;
- set_current_state(TASK_RUNNING);
-try_next:
- notify = NULL;
-
- raw_spin_lock_irq(&affinity_list_lock);
- if (!list_empty(&affinity_list)) {
- notify = list_first_entry(&affinity_list,
- struct irq_affinity_notify, list);
- list_del_init(&notify->list);
- }
- raw_spin_unlock_irq(&affinity_list_lock);
-
- if (!notify)
- continue;
- _irq_affinity_notify(notify);
- goto try_next;
- }
- return 0;
-}
-
-static void init_helper_thread(void)
-{
- if (set_affinity_helper)
- return;
- set_affinity_helper = kthread_run(set_affinity_thread, NULL,
- "affinity-cb");
- WARN_ON(IS_ERR(set_affinity_helper));
-}
-#else
-
-static inline void init_helper_thread(void) { }
-
-#endif
-
-int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
+int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
+ bool force)
{
struct irq_chip *chip = irq_data_get_irq_chip(data);
struct irq_desc *desc = irq_data_to_desc(data);
@@ -230,7 +173,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
return -EINVAL;
if (irq_can_move_pcntxt(data)) {
- ret = irq_do_set_affinity(data, mask, false);
+ ret = irq_do_set_affinity(data, mask, force);
} else {
irqd_set_move_pending(data);
irq_copy_pending(desc, mask);
@@ -238,30 +181,14 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
if (desc->affinity_notify) {
kref_get(&desc->affinity_notify->kref);
-
-#ifdef CONFIG_PREEMPT_RT_FULL
- raw_spin_lock(&affinity_list_lock);
- if (list_empty(&desc->affinity_notify->list))
- list_add_tail(&affinity_list,
- &desc->affinity_notify->list);
- raw_spin_unlock(&affinity_list_lock);
- wake_up_process(set_affinity_helper);
-#else
schedule_work(&desc->affinity_notify->work);
-#endif
}
irqd_set(data, IRQD_AFFINITY_SET);
return ret;
}
-/**
- * irq_set_affinity - Set the irq affinity of a given irq
- * @irq: Interrupt to set affinity
- * @mask: cpumask
- *
- */
-int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
+int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
{
struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
@@ -271,7 +198,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
return -EINVAL;
raw_spin_lock_irqsave(&desc->lock, flags);
- ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
+ ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
@@ -289,8 +216,10 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
}
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
-static void _irq_affinity_notify(struct irq_affinity_notify *notify)
+static void irq_affinity_notify(struct work_struct *work)
{
+ struct irq_affinity_notify *notify =
+ container_of(work, struct irq_affinity_notify, work);
struct irq_desc *desc = irq_to_desc(notify->irq);
cpumask_var_t cpumask;
unsigned long flags;
@@ -312,13 +241,6 @@ out:
kref_put(&notify->kref, notify->release);
}
-static void irq_affinity_notify(struct work_struct *work)
-{
- struct irq_affinity_notify *notify =
- container_of(work, struct irq_affinity_notify, work);
- _irq_affinity_notify(notify);
-}
-
/**
* irq_set_affinity_notifier - control notification of IRQ affinity changes
* @irq: Interrupt for which to enable/disable notification
@@ -348,8 +270,6 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
notify->irq = irq;
kref_init(&notify->kref);
INIT_WORK(&notify->work, irq_affinity_notify);
- INIT_LIST_HEAD(&notify->list);
- init_helper_thread();
}
raw_spin_lock_irqsave(&desc->lock, flags);
@@ -856,15 +776,7 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
local_bh_disable();
ret = action->thread_fn(action->irq, action->dev_id);
irq_finalize_oneshot(desc, action);
- /*
- * Interrupts which have real time requirements can be set up
- * to avoid softirq processing in the thread handler. This is
- * safe as these interrupts do not raise soft interrupts.
- */
- if (irq_settings_no_softirq_call(desc))
- _local_bh_enable();
- else
- local_bh_enable();
+ local_bh_enable();
return ret;
}
@@ -944,15 +856,9 @@ static int irq_thread(void *data)
irq_thread_check_affinity(desc, action);
action_ret = handler_fn(desc, action);
- if (!noirqdebug)
- note_interrupt(action->irq, desc, action_ret);
-
-#ifdef CONFIG_PREEMPT_RT_FULL
- migrate_disable();
- add_interrupt_randomness(action->irq, 0,
- desc->random_ip ^ (unsigned long) action);
- migrate_enable();
-#endif
+ if (action_ret == IRQ_HANDLED)
+ atomic_inc(&desc->threads_handled);
+
wake_threads_waitq(desc);
}
@@ -1215,9 +1121,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
}
- if (new->flags & IRQF_NO_SOFTIRQ_CALL)
- irq_settings_set_no_softirq_call(desc);
-
/* Set default affinity mask once everything is setup */
setup_affinity(irq, desc, mask);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 36f6ee1..095cd72 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -15,6 +15,23 @@
#include "internals.h"
+/*
+ * Access rules:
+ *
+ * procfs protects read/write of /proc/irq/N/ files against a
+ * concurrent free of the interrupt descriptor. remove_proc_entry()
+ * immediately prevents new read/writes to happen and waits for
+ * already running read/write functions to complete.
+ *
+ * We remove the proc entries first and then delete the interrupt
+ * descriptor from the radix tree and free it. So it is guaranteed
+ * that irq_to_desc(N) is valid as long as the read/writes are
+ * permitted by procfs.
+ *
+ * The read from /proc/interrupts is a different problem because there
+ * is no protection. So the lookup and the access to irqdesc
+ * information must be protected by sparse_irq_lock.
+ */
static struct proc_dir_entry *root_irq_dir;
#ifdef CONFIG_SMP
@@ -437,9 +454,10 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
}
+ irq_lock_sparse();
desc = irq_to_desc(i);
if (!desc)
- return 0;
+ goto outsparse;
raw_spin_lock_irqsave(&desc->lock, flags);
for_each_online_cpu(j)
@@ -479,6 +497,8 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
out:
raw_spin_unlock_irqrestore(&desc->lock, flags);
+outsparse:
+ irq_unlock_sparse();
return 0;
}
#endif
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
index 0d2c381..1162f10 100644
--- a/kernel/irq/settings.h
+++ b/kernel/irq/settings.h
@@ -14,7 +14,6 @@ enum {
_IRQ_NO_BALANCING = IRQ_NO_BALANCING,
_IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
_IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
- _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL,
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
};
@@ -27,7 +26,6 @@ enum {
#define IRQ_NOAUTOEN GOT_YOU_MORON
#define IRQ_NESTED_THREAD GOT_YOU_MORON
#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
-#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON
#undef IRQF_MODIFY_MASK
#define IRQF_MODIFY_MASK GOT_YOU_MORON
@@ -38,16 +36,6 @@ irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
}
-static inline bool irq_settings_no_softirq_call(struct irq_desc *desc)
-{
- return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL;
-}
-
-static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc)
-{
- desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL;
-}
-
static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
{
return desc->status_use_accessors & _IRQ_PER_CPU;
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index e5a309a..febcee3c 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -265,21 +265,119 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
return action && (action->flags & IRQF_IRQPOLL);
}
+#define SPURIOUS_DEFERRED 0x80000000
+
void note_interrupt(unsigned int irq, struct irq_desc *desc,
irqreturn_t action_ret)
{
if (desc->istate & IRQS_POLL_INPROGRESS)
return;
- /* we get here again via the threaded handler */
- if (action_ret == IRQ_WAKE_THREAD)
- return;
-
if (bad_action_ret(action_ret)) {
report_bad_irq(irq, desc, action_ret);
return;
}
+ /*
+ * We cannot call note_interrupt from the threaded handler
+ * because we need to look at the compound of all handlers
+ * (primary and threaded). Aside of that in the threaded
+ * shared case we have no serialization against an incoming
+ * hardware interrupt while we are dealing with a threaded
+ * result.
+ *
+ * So in case a thread is woken, we just note the fact and
+ * defer the analysis to the next hardware interrupt.
+ *
+ * The threaded handlers store whether they sucessfully
+ * handled an interrupt and we check whether that number
+ * changed versus the last invocation.
+ *
+ * We could handle all interrupts with the delayed by one
+ * mechanism, but for the non forced threaded case we'd just
+ * add pointless overhead to the straight hardirq interrupts
+ * for the sake of a few lines less code.
+ */
+ if (action_ret & IRQ_WAKE_THREAD) {
+ /*
+ * There is a thread woken. Check whether one of the
+ * shared primary handlers returned IRQ_HANDLED. If
+ * not we defer the spurious detection to the next
+ * interrupt.
+ */
+ if (action_ret == IRQ_WAKE_THREAD) {
+ int handled;
+ /*
+ * We use bit 31 of thread_handled_last to
+ * denote the deferred spurious detection
+ * active. No locking necessary as
+ * thread_handled_last is only accessed here
+ * and we have the guarantee that hard
+ * interrupts are not reentrant.
+ */
+ if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
+ desc->threads_handled_last |= SPURIOUS_DEFERRED;
+ return;
+ }
+ /*
+ * Check whether one of the threaded handlers
+ * returned IRQ_HANDLED since the last
+ * interrupt happened.
+ *
+ * For simplicity we just set bit 31, as it is
+ * set in threads_handled_last as well. So we
+ * avoid extra masking. And we really do not
+ * care about the high bits of the handled
+ * count. We just care about the count being
+ * different than the one we saw before.
+ */
+ handled = atomic_read(&desc->threads_handled);
+ handled |= SPURIOUS_DEFERRED;
+ if (handled != desc->threads_handled_last) {
+ action_ret = IRQ_HANDLED;
+ /*
+ * Note: We keep the SPURIOUS_DEFERRED
+ * bit set. We are handling the
+ * previous invocation right now.
+ * Keep it for the current one, so the
+ * next hardware interrupt will
+ * account for it.
+ */
+ desc->threads_handled_last = handled;
+ } else {
+ /*
+ * None of the threaded handlers felt
+ * responsible for the last interrupt
+ *
+ * We keep the SPURIOUS_DEFERRED bit
+ * set in threads_handled_last as we
+ * need to account for the current
+ * interrupt as well.
+ */
+ action_ret = IRQ_NONE;
+ }
+ } else {
+ /*
+ * One of the primary handlers returned
+ * IRQ_HANDLED. So we don't care about the
+ * threaded handlers on the same line. Clear
+ * the deferred detection bit.
+ *
+ * In theory we could/should check whether the
+ * deferred bit is set and take the result of
+ * the previous run into account here as
+ * well. But it's really not worth the
+ * trouble. If every other interrupt is
+ * handled we never trigger the spurious
+ * detector. And if this is just the one out
+ * of 100k unhandled ones which is handled
+ * then we merily delay the spurious detection
+ * by one hard interrupt. Not a real problem.
+ */
+ desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
+ }
+ }
+
if (unlikely(action_ret == IRQ_NONE)) {
/*
* If we are seeing only the odd spurious IRQ caused by
@@ -340,10 +438,6 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
static int __init irqfixup_setup(char *str)
{
-#ifdef CONFIG_PREEMPT_RT_BASE
- pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
- return 1;
-#endif
irqfixup = 1;
printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
printk(KERN_WARNING "This may impact system performance.\n");
@@ -356,10 +450,6 @@ module_param(irqfixup, int, 0644);
static int __init irqpoll_setup(char *str)
{
-#ifdef CONFIG_PREEMPT_RT_BASE
- pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
- return 1;
-#endif
irqfixup = 2;
printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
"enabled\n");