summaryrefslogtreecommitdiff
path: root/kernel/softirq.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r--kernel/softirq.c184
1 files changed, 151 insertions, 33 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 11025cc..d7d498d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -6,6 +6,8 @@
* Distribute under GPLv2.
*
* Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
+ *
+ * Remote softirq infrastructure is by Jens Axboe.
*/
#include <linux/export.h>
@@ -27,6 +29,7 @@
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
+#include <asm/irq.h>
/*
- No shared variables, all the data are CPU local.
- If a softirq needs serialization, let it serialize itself
@@ -97,13 +100,13 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt)
raw_local_irq_save(flags);
/*
- * The preempt tracer hooks into preempt_count_add and will break
+ * The preempt tracer hooks into add_preempt_count and will break
* lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
* is set and before current->softirq_enabled is cleared.
* We must manually increment preempt_count here and manually
* call the trace_preempt_off later.
*/
- __preempt_count_add(cnt);
+ preempt_count() += cnt;
/*
* Were softirqs turned off above:
*/
@@ -117,7 +120,7 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt)
#else /* !CONFIG_TRACE_IRQFLAGS */
static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
{
- preempt_count_add(cnt);
+ add_preempt_count(cnt);
barrier();
}
#endif /* CONFIG_TRACE_IRQFLAGS */
@@ -131,11 +134,12 @@ EXPORT_SYMBOL(local_bh_disable);
static void __local_bh_enable(unsigned int cnt)
{
+ WARN_ON_ONCE(in_irq());
WARN_ON_ONCE(!irqs_disabled());
if (softirq_count() == cnt)
trace_softirqs_on(_RET_IP_);
- preempt_count_sub(cnt);
+ sub_preempt_count(cnt);
}
/*
@@ -145,7 +149,6 @@ static void __local_bh_enable(unsigned int cnt)
*/
void _local_bh_enable(void)
{
- WARN_ON_ONCE(in_irq());
__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
}
@@ -166,17 +169,12 @@ static inline void _local_bh_enable_ip(unsigned long ip)
* Keep preemption disabled until we are done with
* softirq processing:
*/
- preempt_count_sub(SOFTIRQ_DISABLE_OFFSET - 1);
+ sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
- if (unlikely(!in_interrupt() && local_softirq_pending())) {
- /*
- * Run softirq if any pending. And do it in its own stack
- * as we may be calling this deep in a task call stack already.
- */
+ if (unlikely(!in_interrupt() && local_softirq_pending()))
do_softirq();
- }
- preempt_count_dec();
+ dec_preempt_count();
#ifdef CONFIG_TRACE_IRQFLAGS
local_irq_enable();
#endif
@@ -258,7 +256,7 @@ restart:
" exited with %08x?\n", vec_nr,
softirq_to_name[vec_nr], h->action,
prev_count, preempt_count());
- preempt_count_set(prev_count);
+ preempt_count() = prev_count;
}
rcu_bh_qs(cpu);
@@ -282,11 +280,10 @@ restart:
account_irq_exit_time(current);
__local_bh_enable(SOFTIRQ_OFFSET);
- WARN_ON_ONCE(in_interrupt());
tsk_restore_flags(current, old_flags, PF_MEMALLOC);
}
-
+#ifndef __ARCH_HAS_DO_SOFTIRQ
asmlinkage void do_softirq(void)
{
@@ -301,11 +298,13 @@ asmlinkage void do_softirq(void)
pending = local_softirq_pending();
if (pending)
- do_softirq_own_stack();
+ __do_softirq();
local_irq_restore(flags);
}
+#endif
+
/*
* Enter an interrupt context.
*/
@@ -330,21 +329,15 @@ void irq_enter(void)
static inline void invoke_softirq(void)
{
if (!force_irqthreads) {
-#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
/*
* We can safely execute softirq on the current stack if
* it is the irq stack, because it should be near empty
- * at this stage.
+ * at this stage. But we have no way to know if the arch
+ * calls irq_exit() on the irq stack. So call softirq
+ * in its own stack to prevent from any overrun on top
+ * of a potentially deep task stack.
*/
- __do_softirq();
-#else
- /*
- * Otherwise, irq_exit() is called on the task stack that can
- * be potentially deep already. So call softirq in its own stack
- * to prevent from any overrun.
- */
- do_softirq_own_stack();
-#endif
+ do_softirq();
} else {
wakeup_softirqd();
}
@@ -376,7 +369,7 @@ void irq_exit(void)
account_irq_exit_time(current);
trace_hardirq_exit();
- preempt_count_sub(HARDIRQ_OFFSET);
+ sub_preempt_count(HARDIRQ_OFFSET);
if (!in_interrupt() && local_softirq_pending())
invoke_softirq();
@@ -625,17 +618,146 @@ void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
}
EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
+/*
+ * Remote softirq bits
+ */
+
+DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
+EXPORT_PER_CPU_SYMBOL(softirq_work_list);
+
+static void __local_trigger(struct call_single_data *cp, int softirq)
+{
+ struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
+
+ list_add_tail(&cp->list, head);
+
+ /* Trigger the softirq only if the list was previously empty. */
+ if (head->next == &cp->list)
+ raise_softirq_irqoff(softirq);
+}
+
+#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
+static void remote_softirq_receive(void *data)
+{
+ struct call_single_data *cp = data;
+ unsigned long flags;
+ int softirq;
+
+ softirq = *(int *)cp->info;
+ local_irq_save(flags);
+ __local_trigger(cp, softirq);
+ local_irq_restore(flags);
+}
+
+static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
+{
+ if (cpu_online(cpu)) {
+ cp->func = remote_softirq_receive;
+ cp->info = &softirq;
+ cp->flags = 0;
+
+ __smp_call_function_single(cpu, cp, 0);
+ return 0;
+ }
+ return 1;
+}
+#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
+static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
+{
+ return 1;
+}
+#endif
+
+/**
+ * __send_remote_softirq - try to schedule softirq work on a remote cpu
+ * @cp: private SMP call function data area
+ * @cpu: the remote cpu
+ * @this_cpu: the currently executing cpu
+ * @softirq: the softirq for the work
+ *
+ * Attempt to schedule softirq work on a remote cpu. If this cannot be
+ * done, the work is instead queued up on the local cpu.
+ *
+ * Interrupts must be disabled.
+ */
+void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
+{
+ if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
+ __local_trigger(cp, softirq);
+}
+EXPORT_SYMBOL(__send_remote_softirq);
+
+/**
+ * send_remote_softirq - try to schedule softirq work on a remote cpu
+ * @cp: private SMP call function data area
+ * @cpu: the remote cpu
+ * @softirq: the softirq for the work
+ *
+ * Like __send_remote_softirq except that disabling interrupts and
+ * computing the current cpu is done for the caller.
+ */
+void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
+{
+ unsigned long flags;
+ int this_cpu;
+
+ local_irq_save(flags);
+ this_cpu = smp_processor_id();
+ __send_remote_softirq(cp, cpu, this_cpu, softirq);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(send_remote_softirq);
+
+static int remote_softirq_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ /*
+ * If a CPU goes away, splice its entries to the current CPU
+ * and trigger a run of the softirq
+ */
+ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
+ int cpu = (unsigned long) hcpu;
+ int i;
+
+ local_irq_disable();
+ for (i = 0; i < NR_SOFTIRQS; i++) {
+ struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
+ struct list_head *local_head;
+
+ if (list_empty(head))
+ continue;
+
+ local_head = &__get_cpu_var(softirq_work_list[i]);
+ list_splice_init(head, local_head);
+ raise_softirq_irqoff(i);
+ }
+ local_irq_enable();
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block remote_softirq_cpu_notifier = {
+ .notifier_call = remote_softirq_cpu_notify,
+};
+
void __init softirq_init(void)
{
int cpu;
for_each_possible_cpu(cpu) {
+ int i;
+
per_cpu(tasklet_vec, cpu).tail =
&per_cpu(tasklet_vec, cpu).head;
per_cpu(tasklet_hi_vec, cpu).tail =
&per_cpu(tasklet_hi_vec, cpu).head;
+ for (i = 0; i < NR_SOFTIRQS; i++)
+ INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
}
+ register_hotcpu_notifier(&remote_softirq_cpu_notifier);
+
open_softirq(TASKLET_SOFTIRQ, tasklet_action);
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
}
@@ -649,10 +771,6 @@ static void run_ksoftirqd(unsigned int cpu)
{
local_irq_disable();
if (local_softirq_pending()) {
- /*
- * We can safely run softirq on inline stack, as we are not deep
- * in the task stack here.
- */
__do_softirq();
rcu_note_context_switch(cpu);
local_irq_enable();