summaryrefslogtreecommitdiff
path: root/include/linux/interrupt.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/interrupt.h')
-rw-r--r--include/linux/interrupt.h53
1 files changed, 24 insertions, 29 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 62aa4f8..f58a0cf 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -8,9 +8,12 @@
#include <linux/preempt.h>
#include <linux/cpumask.h>
#include <linux/irqreturn.h>
+#include <linux/irqnr.h>
#include <linux/hardirq.h>
#include <linux/sched.h>
#include <linux/irqflags.h>
+#include <linux/smp.h>
+#include <linux/percpu.h>
#include <asm/atomic.h>
#include <asm/ptrace.h>
#include <asm/system.h>
@@ -223,35 +226,6 @@ static inline int disable_irq_wake(unsigned int irq)
#define or_softirq_pending(x) (local_softirq_pending() |= (x))
#endif
-/*
- * Temporary defines for UP kernels, until all code gets fixed.
- */
-#ifndef CONFIG_SMP
-static inline void __deprecated cli(void)
-{
- local_irq_disable();
-}
-static inline void __deprecated sti(void)
-{
- local_irq_enable();
-}
-static inline void __deprecated save_flags(unsigned long *x)
-{
- local_save_flags(*x);
-}
-#define save_flags(x) save_flags(&x)
-static inline void __deprecated restore_flags(unsigned long x)
-{
- local_irq_restore(x);
-}
-
-static inline void __deprecated save_and_cli(unsigned long *x)
-{
- local_irq_save(*x);
-}
-#define save_and_cli(x) save_and_cli(&x)
-#endif /* CONFIG_SMP */
-
/* Some architectures might implement lazy enabling/disabling of
* interrupts. In some cases, such as stop_machine, we might want
* to ensure that after a local_irq_disable(), interrupts have
@@ -281,6 +255,8 @@ enum
HRTIMER_SOFTIRQ,
#endif
RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
+
+ NR_SOFTIRQS
};
/* softirq mask and active fields moved to irq_cpustat_t in
@@ -300,6 +276,25 @@ extern void softirq_init(void);
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
+/* This is the worklist that queues up per-cpu softirq work.
+ *
+ * send_remote_sendirq() adds work to these lists, and
+ * the softirq handler itself dequeues from them. The queues
+ * are protected by disabling local cpu interrupts and they must
+ * only be accessed by the local cpu that they are for.
+ */
+DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
+
+/* Try to send a softirq to a remote cpu. If this cannot be done, the
+ * work will be queued to the local cpu.
+ */
+extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
+
+/* Like send_remote_softirq(), but the caller must disable local cpu interrupts
+ * and compute the current cpu, passed in as 'this_cpu'.
+ */
+extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
+ int this_cpu, int softirq);
/* Tasklets --- multithreaded analogue of BHs.