summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2014-01-31 13:20:31 (GMT)
committerScott Wood <scottwood@freescale.com>2015-02-13 22:20:57 (GMT)
commit66ac3feebc0fa881fc4b9d03110bd257536852e7 (patch)
tree59f131743c457e418f24975692ed06817407231d /kernel
parentef6bcccdb8ae11d181029a5e2a866140fdc4f1ce (diff)
downloadlinux-fsl-qoriq-66ac3feebc0fa881fc4b9d03110bd257536852e7.tar.xz
irq_work: allow certain work in hard irq context
irq_work is processed in softirq context on -RT because we want to avoid long latencies which might arise from processing lots of perf events. The noHZ-full mode requires its callback to be called from real hardirq context (commit 76c24fb ("nohz: New APIs to re-evaluate the tick on full dynticks CPUs")). If it is called from a thread context we might get wrong results for checks like "is_idle_task(current)". This patch introduces a second list (hirq_work_list) which will be used if irq_work_run() has been invoked from hardirq context and process only work items marked with IRQ_WORK_HARD_IRQ. This patch also removes arch_irq_work_raise() from sparc & powerpc like it is already done for x86. Atleast for powerpc it is somehow superfluous because it is called from the timer interrupt which should invoke update_process_times(). Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq_work.c22
-rw-r--r--kernel/time/tick-sched.c1
-rw-r--r--kernel/timer.c2
3 files changed, 21 insertions, 4 deletions
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index f6e4377..35d21f9 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -20,6 +20,9 @@
static DEFINE_PER_CPU(struct llist_head, irq_work_list);
+#ifdef CONFIG_PREEMPT_RT_FULL
+static DEFINE_PER_CPU(struct llist_head, hirq_work_list);
+#endif
static DEFINE_PER_CPU(int, irq_work_raised);
/*
@@ -48,7 +51,11 @@ static bool irq_work_claim(struct irq_work *work)
return true;
}
+#ifdef CONFIG_PREEMPT_RT_FULL
+void arch_irq_work_raise(void)
+#else
void __weak arch_irq_work_raise(void)
+#endif
{
/*
* Lame architectures will get the timer tick callback
@@ -70,8 +77,12 @@ void irq_work_queue(struct irq_work *work)
/* Queue the entry and raise the IPI if needed. */
preempt_disable();
- llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
-
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (work->flags & IRQ_WORK_HARD_IRQ)
+ llist_add(&work->llnode, &__get_cpu_var(hirq_work_list));
+ else
+#endif
+ llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
/*
* If the work is not "lazy" or the tick is stopped, raise the irq
* work interrupt (if supported by the arch), otherwise, just wait
@@ -115,7 +126,12 @@ static void __irq_work_run(void)
__this_cpu_write(irq_work_raised, 0);
barrier();
- this_list = &__get_cpu_var(irq_work_list);
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (in_irq())
+ this_list = &__get_cpu_var(hirq_work_list);
+ else
+#endif
+ this_list = &__get_cpu_var(irq_work_list);
if (llist_empty(this_list))
return;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index fa4c28b..7e5c94b 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -221,6 +221,7 @@ static void nohz_full_kick_work_func(struct irq_work *work)
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
.func = nohz_full_kick_work_func,
+ .flags = IRQ_WORK_HARD_IRQ,
};
/*
diff --git a/kernel/timer.c b/kernel/timer.c
index a85ac8f..611a597 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1425,7 +1425,7 @@ void update_process_times(int user_tick)
scheduler_tick();
run_local_timers();
rcu_check_callbacks(cpu, user_tick);
-#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL)
+#if defined(CONFIG_IRQ_WORK)
if (in_irq())
irq_work_run();
#endif