diff options
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r-- | kernel/softirq.c | 742 |
1 files changed, 135 insertions, 607 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c index 15ad603..d7d498d 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -21,12 +21,10 @@ #include <linux/freezer.h> #include <linux/kthread.h> #include <linux/rcupdate.h> -#include <linux/delay.h> #include <linux/ftrace.h> #include <linux/smp.h> #include <linux/smpboot.h> #include <linux/tick.h> -#include <linux/locallock.h> #define CREATE_TRACE_POINTS #include <trace/events/irq.h> @@ -64,98 +62,6 @@ char *softirq_to_name[NR_SOFTIRQS] = { "TASKLET", "SCHED", "HRTIMER", "RCU" }; -#ifdef CONFIG_NO_HZ_COMMON -# ifdef CONFIG_PREEMPT_RT_FULL - -struct softirq_runner { - struct task_struct *runner[NR_SOFTIRQS]; -}; - -static DEFINE_PER_CPU(struct softirq_runner, softirq_runners); - -static inline void softirq_set_runner(unsigned int sirq) -{ - struct softirq_runner *sr = &__get_cpu_var(softirq_runners); - - sr->runner[sirq] = current; -} - -static inline void softirq_clr_runner(unsigned int sirq) -{ - struct softirq_runner *sr = &__get_cpu_var(softirq_runners); - - sr->runner[sirq] = NULL; -} - -/* - * On preempt-rt a softirq running context might be blocked on a - * lock. There might be no other runnable task on this CPU because the - * lock owner runs on some other CPU. So we have to go into idle with - * the pending bit set. Therefor we need to check this otherwise we - * warn about false positives which confuses users and defeats the - * whole purpose of this test. - * - * This code is called with interrupts disabled. - */ -void softirq_check_pending_idle(void) -{ - static int rate_limit; - struct softirq_runner *sr = &__get_cpu_var(softirq_runners); - u32 warnpending; - int i; - - if (rate_limit >= 10) - return; - - warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; - for (i = 0; i < NR_SOFTIRQS; i++) { - struct task_struct *tsk = sr->runner[i]; - - /* - * The wakeup code in rtmutex.c wakes up the task - * _before_ it sets pi_blocked_on to NULL under - * tsk->pi_lock. So we need to check for both: state - * and pi_blocked_on. - */ - if (tsk) { - raw_spin_lock(&tsk->pi_lock); - if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) { - /* Clear all bits pending in that task */ - warnpending &= ~(tsk->softirqs_raised); - warnpending &= ~(1 << i); - } - raw_spin_unlock(&tsk->pi_lock); - } - } - - if (warnpending) { - printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", - warnpending); - rate_limit++; - } -} -# else -/* - * On !PREEMPT_RT we just printk rate limited: - */ -void softirq_check_pending_idle(void) -{ - static int rate_limit; - - if (rate_limit < 10 && - (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { - printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", - local_softirq_pending()); - rate_limit++; - } -} -# endif - -#else /* !CONFIG_NO_HZ_COMMON */ -static inline void softirq_set_runner(unsigned int sirq) { } -static inline void softirq_clr_runner(unsigned int sirq) { } -#endif - /* * we cannot loop indefinitely here to avoid userspace starvation, * but we also don't want to introduce a worst case 1/HZ latency @@ -171,57 +77,6 @@ static void wakeup_softirqd(void) wake_up_process(tsk); } -static void handle_softirq(unsigned int vec_nr, int cpu, int need_rcu_bh_qs) -{ - struct softirq_action *h = softirq_vec + vec_nr; - unsigned int prev_count = preempt_count(); - - kstat_incr_softirqs_this_cpu(vec_nr); - trace_softirq_entry(vec_nr); - h->action(h); - trace_softirq_exit(vec_nr); - - if (unlikely(prev_count != preempt_count())) { - pr_err("softirq %u %s %p preempt count leak: %08x -> %08x\n", - vec_nr, softirq_to_name[vec_nr], h->action, - prev_count, (unsigned int) preempt_count()); - preempt_count() = prev_count; - } - if (need_rcu_bh_qs) - rcu_bh_qs(cpu); -} - -#ifndef CONFIG_PREEMPT_RT_FULL -static inline int ksoftirqd_softirq_pending(void) -{ - return local_softirq_pending(); -} - -static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs) -{ - unsigned int vec_nr; - - local_irq_enable(); - for (vec_nr = 0; pending; vec_nr++, pending >>= 1) { - if (pending & 1) - handle_softirq(vec_nr, cpu, need_rcu_bh_qs); - } - local_irq_disable(); -} - -static void run_ksoftirqd(unsigned int cpu) -{ - local_irq_disable(); - if (ksoftirqd_softirq_pending()) { - __do_softirq(); - rcu_note_context_switch(cpu); - local_irq_enable(); - cond_resched(); - return; - } - local_irq_enable(); -} - /* * preempt_count and SOFTIRQ_OFFSET usage: * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving @@ -354,51 +209,14 @@ EXPORT_SYMBOL(local_bh_enable_ip); #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) #define MAX_SOFTIRQ_RESTART 10 -#ifdef CONFIG_TRACE_IRQFLAGS -/* - * Convoluted means of passing __do_softirq() a message through the various - * architecture execute_on_stack() bits. - * - * When we run softirqs from irq_exit() and thus on the hardirq stack we need - * to keep the lockdep irq context tracking as tight as possible in order to - * not miss-qualify lock contexts and miss possible deadlocks. - */ -static DEFINE_PER_CPU(int, softirq_from_hardirq); - -static inline void lockdep_softirq_from_hardirq(void) -{ - this_cpu_write(softirq_from_hardirq, 1); -} - -static inline void lockdep_softirq_start(void) -{ - if (this_cpu_read(softirq_from_hardirq)) - trace_hardirq_exit(); - lockdep_softirq_enter(); -} - -static inline void lockdep_softirq_end(void) -{ - lockdep_softirq_exit(); - if (this_cpu_read(softirq_from_hardirq)) { - this_cpu_write(softirq_from_hardirq, 0); - trace_hardirq_enter(); - } -} - -#else -static inline void lockdep_softirq_from_hardirq(void) { } -static inline void lockdep_softirq_start(void) { } -static inline void lockdep_softirq_end(void) { } -#endif - asmlinkage void __do_softirq(void) { + struct softirq_action *h; + __u32 pending; unsigned long end = jiffies + MAX_SOFTIRQ_TIME; + int cpu; unsigned long old_flags = current->flags; int max_restart = MAX_SOFTIRQ_RESTART; - __u32 pending; - int cpu; /* * Mask out PF_MEMALLOC s current task context is borrowed for the @@ -411,14 +229,43 @@ asmlinkage void __do_softirq(void) account_irq_enter_time(current); __local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET); - lockdep_softirq_start(); + lockdep_softirq_enter(); cpu = smp_processor_id(); restart: /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); - handle_pending_softirqs(pending, cpu, 1); + local_irq_enable(); + + h = softirq_vec; + + do { + if (pending & 1) { + unsigned int vec_nr = h - softirq_vec; + int prev_count = preempt_count(); + + kstat_incr_softirqs_this_cpu(vec_nr); + + trace_softirq_entry(vec_nr); + h->action(h); + trace_softirq_exit(vec_nr); + if (unlikely(prev_count != preempt_count())) { + printk(KERN_ERR "huh, entered softirq %u %s %p" + "with preempt_count %08x," + " exited with %08x?\n", vec_nr, + softirq_to_name[vec_nr], h->action, + prev_count, preempt_count()); + preempt_count() = prev_count; + } + + rcu_bh_qs(cpu); + } + h++; + pending >>= 1; + } while (pending); + + local_irq_disable(); pending = local_softirq_pending(); if (pending) { @@ -429,7 +276,8 @@ restart: wakeup_softirqd(); } - lockdep_softirq_end(); + lockdep_softirq_exit(); + account_irq_exit_time(current); __local_bh_enable(SOFTIRQ_OFFSET); tsk_restore_flags(current, old_flags, PF_MEMALLOC); @@ -458,261 +306,6 @@ asmlinkage void do_softirq(void) #endif /* - * This function must run with irqs disabled! - */ -void raise_softirq_irqoff(unsigned int nr) -{ - __raise_softirq_irqoff(nr); - - /* - * If we're in an interrupt or softirq, we're done - * (this also catches softirq-disabled code). We will - * actually run the softirq once we return from - * the irq or softirq. - * - * Otherwise we wake up ksoftirqd to make sure we - * schedule the softirq soon. - */ - if (!in_interrupt()) - wakeup_softirqd(); -} - -void __raise_softirq_irqoff(unsigned int nr) -{ - trace_softirq_raise(nr); - or_softirq_pending(1UL << nr); -} - -static inline void local_bh_disable_nort(void) { local_bh_disable(); } -static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } -static void ksoftirqd_set_sched_params(unsigned int cpu) { } -static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { } - -#else /* !PREEMPT_RT_FULL */ - -/* - * On RT we serialize softirq execution with a cpu local lock per softirq - */ -static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks); - -void __init softirq_early_init(void) -{ - int i; - - for (i = 0; i < NR_SOFTIRQS; i++) - local_irq_lock_init(local_softirq_locks[i]); -} - -static void lock_softirq(int which) -{ - local_lock(local_softirq_locks[which]); -} - -static void unlock_softirq(int which) -{ - local_unlock(local_softirq_locks[which]); -} - -static void do_single_softirq(int which, int need_rcu_bh_qs) -{ - unsigned long old_flags = current->flags; - - current->flags &= ~PF_MEMALLOC; - vtime_account_irq_enter(current); - current->flags |= PF_IN_SOFTIRQ; - lockdep_softirq_enter(); - local_irq_enable(); - handle_softirq(which, smp_processor_id(), need_rcu_bh_qs); - local_irq_disable(); - lockdep_softirq_exit(); - current->flags &= ~PF_IN_SOFTIRQ; - vtime_account_irq_enter(current); - tsk_restore_flags(current, old_flags, PF_MEMALLOC); -} - -/* - * Called with interrupts disabled. Process softirqs which were raised - * in current context (or on behalf of ksoftirqd). - */ -static void do_current_softirqs(int need_rcu_bh_qs) -{ - while (current->softirqs_raised) { - int i = __ffs(current->softirqs_raised); - unsigned int pending, mask = (1U << i); - - current->softirqs_raised &= ~mask; - local_irq_enable(); - - /* - * If the lock is contended, we boost the owner to - * process the softirq or leave the critical section - * now. - */ - lock_softirq(i); - local_irq_disable(); - softirq_set_runner(i); - /* - * Check with the local_softirq_pending() bits, - * whether we need to process this still or if someone - * else took care of it. - */ - pending = local_softirq_pending(); - if (pending & mask) { - set_softirq_pending(pending & ~mask); - do_single_softirq(i, need_rcu_bh_qs); - } - softirq_clr_runner(i); - unlock_softirq(i); - WARN_ON(current->softirq_nestcnt != 1); - } -} - -void local_bh_disable(void) -{ - if (++current->softirq_nestcnt == 1) - migrate_disable(); -} -EXPORT_SYMBOL(local_bh_disable); - -void local_bh_enable(void) -{ - if (WARN_ON(current->softirq_nestcnt == 0)) - return; - - local_irq_disable(); - if (current->softirq_nestcnt == 1 && current->softirqs_raised) - do_current_softirqs(1); - local_irq_enable(); - - if (--current->softirq_nestcnt == 0) - migrate_enable(); -} -EXPORT_SYMBOL(local_bh_enable); - -void local_bh_enable_ip(unsigned long ip) -{ - local_bh_enable(); -} -EXPORT_SYMBOL(local_bh_enable_ip); - -void _local_bh_enable(void) -{ - if (WARN_ON(current->softirq_nestcnt == 0)) - return; - if (--current->softirq_nestcnt == 0) - migrate_enable(); -} -EXPORT_SYMBOL(_local_bh_enable); - -int in_serving_softirq(void) -{ - return current->flags & PF_IN_SOFTIRQ; -} -EXPORT_SYMBOL(in_serving_softirq); - -/* Called with preemption disabled */ -static void run_ksoftirqd(unsigned int cpu) -{ - local_irq_disable(); - current->softirq_nestcnt++; - do_current_softirqs(1); - current->softirq_nestcnt--; - rcu_note_context_switch(cpu); - local_irq_enable(); -} - -/* - * Called from netif_rx_ni(). Preemption enabled, but migration - * disabled. So the cpu can't go away under us. - */ -void thread_do_softirq(void) -{ - if (!in_serving_softirq() && current->softirqs_raised) { - current->softirq_nestcnt++; - do_current_softirqs(0); - current->softirq_nestcnt--; - } -} - -static void do_raise_softirq_irqoff(unsigned int nr) -{ - trace_softirq_raise(nr); - or_softirq_pending(1UL << nr); - - /* - * If we are not in a hard interrupt and inside a bh disabled - * region, we simply raise the flag on current. local_bh_enable() - * will make sure that the softirq is executed. Otherwise we - * delegate it to ksoftirqd. - */ - if (!in_irq() && current->softirq_nestcnt) - current->softirqs_raised |= (1U << nr); - else if (__this_cpu_read(ksoftirqd)) - __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr); -} - -void __raise_softirq_irqoff(unsigned int nr) -{ - do_raise_softirq_irqoff(nr); - if (!in_irq() && !current->softirq_nestcnt) - wakeup_softirqd(); -} - -/* - * This function must run with irqs disabled! - */ -void raise_softirq_irqoff(unsigned int nr) -{ - do_raise_softirq_irqoff(nr); - - /* - * If we're in an hard interrupt we let irq return code deal - * with the wakeup of ksoftirqd. - */ - if (in_irq()) - return; - - /* - * If we are in thread context but outside of a bh disabled - * region, we need to wake ksoftirqd as well. - * - * CHECKME: Some of the places which do that could be wrapped - * into local_bh_disable/enable pairs. Though it's unclear - * whether this is worth the effort. To find those places just - * raise a WARN() if the condition is met. - */ - if (!current->softirq_nestcnt) - wakeup_softirqd(); -} - -static inline int ksoftirqd_softirq_pending(void) -{ - return current->softirqs_raised; -} - -static inline void local_bh_disable_nort(void) { } -static inline void _local_bh_enable_nort(void) { } - -static inline void ksoftirqd_set_sched_params(unsigned int cpu) -{ - struct sched_param param = { .sched_priority = 1 }; - - sched_setscheduler(current, SCHED_FIFO, ¶m); - /* Take over all pending softirqs when starting */ - local_irq_disable(); - current->softirqs_raised = local_softirq_pending(); - local_irq_enable(); -} - -static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) -{ - struct sched_param param = { .sched_priority = 0 }; - - sched_setscheduler(current, SCHED_NORMAL, ¶m); -} - -#endif /* PREEMPT_RT_FULL */ -/* * Enter an interrupt context. */ void irq_enter(void) @@ -725,9 +318,9 @@ void irq_enter(void) * Prevent raise_softirq from needlessly waking up ksoftirqd * here, as softirq will be serviced on return from interrupt. */ - local_bh_disable_nort(); + local_bh_disable(); tick_check_idle(cpu); - _local_bh_enable_nort(); + _local_bh_enable(); } __irq_enter(); @@ -735,9 +328,7 @@ void irq_enter(void) static inline void invoke_softirq(void) { -#ifndef CONFIG_PREEMPT_RT_FULL if (!force_irqthreads) { - lockdep_softirq_from_hardirq(); /* * We can safely execute softirq on the current stack if * it is the irq stack, because it should be near empty @@ -750,15 +341,6 @@ static inline void invoke_softirq(void) } else { wakeup_softirqd(); } -#else /* PREEMPT_RT_FULL */ - unsigned long flags; - - local_irq_save(flags); - if (__this_cpu_read(ksoftirqd) && - __this_cpu_read(ksoftirqd)->softirqs_raised) - wakeup_softirqd(); - local_irq_restore(flags); -#endif } static inline void tick_irq_exit(void) @@ -786,13 +368,33 @@ void irq_exit(void) #endif account_irq_exit_time(current); + trace_hardirq_exit(); sub_preempt_count(HARDIRQ_OFFSET); if (!in_interrupt() && local_softirq_pending()) invoke_softirq(); tick_irq_exit(); rcu_irq_exit(); - trace_hardirq_exit(); /* must be last! */ +} + +/* + * This function must run with irqs disabled! + */ +inline void raise_softirq_irqoff(unsigned int nr) +{ + __raise_softirq_irqoff(nr); + + /* + * If we're in an interrupt or softirq, we're done + * (this also catches softirq-disabled code). We will + * actually run the softirq once we return from + * the irq or softirq. + * + * Otherwise we wake up ksoftirqd to make sure we + * schedule the softirq soon. + */ + if (!in_interrupt()) + wakeup_softirqd(); } void raise_softirq(unsigned int nr) @@ -804,6 +406,12 @@ void raise_softirq(unsigned int nr) local_irq_restore(flags); } +void __raise_softirq_irqoff(unsigned int nr) +{ + trace_softirq_raise(nr); + or_softirq_pending(1UL << nr); +} + void open_softirq(int nr, void (*action)(struct softirq_action *)) { softirq_vec[nr].action = action; @@ -821,45 +429,15 @@ struct tasklet_head static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); -static void inline -__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr) -{ - if (tasklet_trylock(t)) { -again: - /* We may have been preempted before tasklet_trylock - * and __tasklet_action may have already run. - * So double check the sched bit while the takslet - * is locked before adding it to the list. - */ - if (test_bit(TASKLET_STATE_SCHED, &t->state)) { - t->next = NULL; - *head->tail = t; - head->tail = &(t->next); - raise_softirq_irqoff(nr); - tasklet_unlock(t); - } else { - /* This is subtle. If we hit the corner case above - * It is possible that we get preempted right here, - * and another task has successfully called - * tasklet_schedule(), then this function, and - * failed on the trylock. Thus we must be sure - * before releasing the tasklet lock, that the - * SCHED_BIT is clear. Otherwise the tasklet - * may get its SCHED_BIT set, but not added to the - * list - */ - if (!tasklet_tryunlock(t)) - goto again; - } - } -} - void __tasklet_schedule(struct tasklet_struct *t) { unsigned long flags; local_irq_save(flags); - __tasklet_common_schedule(t, &__get_cpu_var(tasklet_vec), TASKLET_SOFTIRQ); + t->next = NULL; + *__this_cpu_read(tasklet_vec.tail) = t; + __this_cpu_write(tasklet_vec.tail, &(t->next)); + raise_softirq_irqoff(TASKLET_SOFTIRQ); local_irq_restore(flags); } @@ -870,7 +448,10 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) unsigned long flags; local_irq_save(flags); - __tasklet_common_schedule(t, &__get_cpu_var(tasklet_hi_vec), HI_SOFTIRQ); + t->next = NULL; + *__this_cpu_read(tasklet_hi_vec.tail) = t; + __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); + raise_softirq_irqoff(HI_SOFTIRQ); local_irq_restore(flags); } @@ -878,117 +459,48 @@ EXPORT_SYMBOL(__tasklet_hi_schedule); void __tasklet_hi_schedule_first(struct tasklet_struct *t) { - __tasklet_hi_schedule(t); -} + BUG_ON(!irqs_disabled()); -EXPORT_SYMBOL(__tasklet_hi_schedule_first); - -void tasklet_enable(struct tasklet_struct *t) -{ - if (!atomic_dec_and_test(&t->count)) - return; - if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) - tasklet_schedule(t); + t->next = __this_cpu_read(tasklet_hi_vec.head); + __this_cpu_write(tasklet_hi_vec.head, t); + __raise_softirq_irqoff(HI_SOFTIRQ); } -EXPORT_SYMBOL(tasklet_enable); +EXPORT_SYMBOL(__tasklet_hi_schedule_first); -void tasklet_hi_enable(struct tasklet_struct *t) +static void tasklet_action(struct softirq_action *a) { - if (!atomic_dec_and_test(&t->count)) - return; - if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) - tasklet_hi_schedule(t); -} - -EXPORT_SYMBOL(tasklet_hi_enable); + struct tasklet_struct *list; -static void -__tasklet_action(struct softirq_action *a, struct tasklet_struct *list) -{ - int loops = 1000000; + local_irq_disable(); + list = __this_cpu_read(tasklet_vec.head); + __this_cpu_write(tasklet_vec.head, NULL); + __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head); + local_irq_enable(); while (list) { struct tasklet_struct *t = list; list = list->next; - /* - * Should always succeed - after a tasklist got on the - * list (after getting the SCHED bit set from 0 to 1), - * nothing but the tasklet softirq it got queued to can - * lock it: - */ - if (!tasklet_trylock(t)) { - WARN_ON(1); - continue; - } - - t->next = NULL; - - /* - * If we cannot handle the tasklet because it's disabled, - * mark it as pending. tasklet_enable() will later - * re-schedule the tasklet. - */ - if (unlikely(atomic_read(&t->count))) { -out_disabled: - /* implicit unlock: */ - wmb(); - t->state = TASKLET_STATEF_PENDING; - continue; - } - - /* - * After this point on the tasklet might be rescheduled - * on another CPU, but it can only be added to another - * CPU's tasklet list if we unlock the tasklet (which we - * dont do yet). - */ - if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) - WARN_ON(1); - -again: - t->func(t->data); - - /* - * Try to unlock the tasklet. We must use cmpxchg, because - * another CPU might have scheduled or disabled the tasklet. - * We only allow the STATE_RUN -> 0 transition here. - */ - while (!tasklet_tryunlock(t)) { - /* - * If it got disabled meanwhile, bail out: - */ - if (atomic_read(&t->count)) - goto out_disabled; - /* - * If it got scheduled meanwhile, re-execute - * the tasklet function: - */ - if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) - goto again; - if (!--loops) { - printk("hm, tasklet state: %08lx\n", t->state); - WARN_ON(1); + if (tasklet_trylock(t)) { + if (!atomic_read(&t->count)) { + if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) + BUG(); + t->func(t->data); tasklet_unlock(t); - break; + continue; } + tasklet_unlock(t); } - } -} - -static void tasklet_action(struct softirq_action *a) -{ - struct tasklet_struct *list; - local_irq_disable(); - list = __get_cpu_var(tasklet_vec).head; - __get_cpu_var(tasklet_vec).head = NULL; - __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head; - local_irq_enable(); - - __tasklet_action(a, list); + local_irq_disable(); + t->next = NULL; + *__this_cpu_read(tasklet_vec.tail) = t; + __this_cpu_write(tasklet_vec.tail, &(t->next)); + __raise_softirq_irqoff(TASKLET_SOFTIRQ); + local_irq_enable(); + } } static void tasklet_hi_action(struct softirq_action *a) @@ -1001,7 +513,29 @@ static void tasklet_hi_action(struct softirq_action *a) __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head); local_irq_enable(); - __tasklet_action(a, list); + while (list) { + struct tasklet_struct *t = list; + + list = list->next; + + if (tasklet_trylock(t)) { + if (!atomic_read(&t->count)) { + if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) + BUG(); + t->func(t->data); + tasklet_unlock(t); + continue; + } + tasklet_unlock(t); + } + + local_irq_disable(); + t->next = NULL; + *__this_cpu_read(tasklet_hi_vec.tail) = t; + __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); + __raise_softirq_irqoff(HI_SOFTIRQ); + local_irq_enable(); + } } @@ -1024,7 +558,7 @@ void tasklet_kill(struct tasklet_struct *t) while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { do { - msleep(1); + yield(); } while (test_bit(TASKLET_STATE_SCHED, &t->state)); } tasklet_unlock_wait(t); @@ -1228,26 +762,22 @@ void __init softirq_init(void) open_softirq(HI_SOFTIRQ, tasklet_hi_action); } -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) -void tasklet_unlock_wait(struct tasklet_struct *t) +static int ksoftirqd_should_run(unsigned int cpu) { - while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { - /* - * Hack for now to avoid this busy-loop: - */ -#ifdef CONFIG_PREEMPT_RT_FULL - msleep(1); -#else - barrier(); -#endif - } + return local_softirq_pending(); } -EXPORT_SYMBOL(tasklet_unlock_wait); -#endif -static int ksoftirqd_should_run(unsigned int cpu) +static void run_ksoftirqd(unsigned int cpu) { - return ksoftirqd_softirq_pending(); + local_irq_disable(); + if (local_softirq_pending()) { + __do_softirq(); + rcu_note_context_switch(cpu); + local_irq_enable(); + cond_resched(); + return; + } + local_irq_enable(); } #ifdef CONFIG_HOTPLUG_CPU @@ -1330,8 +860,6 @@ static struct notifier_block cpu_nfb = { static struct smp_hotplug_thread softirq_threads = { .store = &ksoftirqd, - .setup = ksoftirqd_set_sched_params, - .cleanup = ksoftirqd_clr_sched_params, .thread_should_run = ksoftirqd_should_run, .thread_fn = run_ksoftirqd, .thread_comm = "ksoftirqd/%u", |