summaryrefslogtreecommitdiff
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2014-05-14 18:19:12 (GMT)
committerScott Wood <scottwood@freescale.com>2014-05-14 18:37:18 (GMT)
commit86ba38e6f5f2fbfe9b49e153ea89593b26482019 (patch)
treef99d2906b0eafca507f37289e68052fc105cc2dc /kernel/rcutree_plugin.h
parent07c8b57b111585a617b2b456497fc9b33c00743c (diff)
downloadlinux-fsl-qoriq-86ba38e6f5f2fbfe9b49e153ea89593b26482019.tar.xz
Reset to 3.12.19
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h178
1 files changed, 130 insertions, 48 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index c849bd4..511e6b4 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -24,6 +24,12 @@
* Paul E. McKenney <paulmck@linux.vnet.ibm.com>
*/
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <linux/oom.h>
+#include <linux/smpboot.h>
+#include "time/tick-internal.h"
+
#define RCU_KTHREAD_PRIO 1
#ifdef CONFIG_RCU_BOOST
@@ -353,7 +359,7 @@ void rcu_read_unlock_special(struct task_struct *t)
}
/* Hardware IRQ handlers cannot block. */
- if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) {
+ if (in_irq() || in_serving_softirq()) {
local_irq_restore(flags);
return;
}
@@ -650,6 +656,15 @@ static void rcu_preempt_check_callbacks(int cpu)
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
}
+#ifdef CONFIG_RCU_BOOST
+
+static void rcu_preempt_do_callbacks(void)
+{
+ rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
+}
+
+#endif /* #ifdef CONFIG_RCU_BOOST */
+
/*
* Queue a preemptible-RCU callback for invocation after a grace period.
*/
@@ -1111,19 +1126,6 @@ void exit_rcu(void)
#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
-/*
- * If boosting, set rcuc kthreads to realtime priority.
- */
-static void rcu_cpu_kthread_setup(unsigned int cpu)
-{
-#ifdef CONFIG_RCU_BOOST
- struct sched_param sp;
-
- sp.sched_priority = RCU_KTHREAD_PRIO;
- sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
-#endif /* #ifdef CONFIG_RCU_BOOST */
-}
-
#ifdef CONFIG_RCU_BOOST
#include "rtmutex_common.h"
@@ -1155,6 +1157,16 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp)
#endif /* #else #ifdef CONFIG_RCU_TRACE */
+static void rcu_wake_cond(struct task_struct *t, int status)
+{
+ /*
+ * If the thread is yielding, only wake it when this
+ * is invoked from idle
+ */
+ if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
+ wake_up_process(t);
+}
+
/*
* Carry out RCU priority boosting on the task indicated by ->exp_tasks
* or ->boost_tasks, advancing the pointer to the next task in the
@@ -1298,6 +1310,23 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
}
/*
+ * Wake up the per-CPU kthread to invoke RCU callbacks.
+ */
+static void invoke_rcu_callbacks_kthread(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __this_cpu_write(rcu_cpu_has_work, 1);
+ if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
+ current != __this_cpu_read(rcu_cpu_kthread_task)) {
+ rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
+ __this_cpu_read(rcu_cpu_kthread_status));
+ }
+ local_irq_restore(flags);
+}
+
+/*
* Is the current CPU running the RCU-callbacks kthread?
* Caller must have preemption disabled.
*/
@@ -1351,6 +1380,67 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
return 0;
}
+static void rcu_kthread_do_work(void)
+{
+ rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
+ rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
+ rcu_preempt_do_callbacks();
+}
+
+static void rcu_cpu_kthread_setup(unsigned int cpu)
+{
+ struct sched_param sp;
+
+ sp.sched_priority = RCU_KTHREAD_PRIO;
+ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
+}
+
+static void rcu_cpu_kthread_park(unsigned int cpu)
+{
+ per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
+}
+
+static int rcu_cpu_kthread_should_run(unsigned int cpu)
+{
+ return __get_cpu_var(rcu_cpu_has_work);
+}
+
+/*
+ * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
+ * RCU softirq used in flavors and configurations of RCU that do not
+ * support RCU priority boosting.
+ */
+static void rcu_cpu_kthread(unsigned int cpu)
+{
+ unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
+ char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
+ int spincnt;
+
+ for (spincnt = 0; spincnt < 10; spincnt++) {
+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
+ local_bh_disable();
+ *statusp = RCU_KTHREAD_RUNNING;
+ this_cpu_inc(rcu_cpu_kthread_loops);
+ local_irq_disable();
+ work = *workp;
+ *workp = 0;
+ local_irq_enable();
+ if (work)
+ rcu_kthread_do_work();
+ local_bh_enable();
+ if (*workp == 0) {
+ trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
+ *statusp = RCU_KTHREAD_WAITING;
+ return;
+ }
+ }
+ *statusp = RCU_KTHREAD_YIELDING;
+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
+ schedule_timeout_interruptible(2);
+ trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
+ *statusp = RCU_KTHREAD_WAITING;
+}
+
/*
* Set the per-rcu_node kthread's affinity to cover all CPUs that are
* served by the rcu_node in question. The CPU hotplug lock is still
@@ -1384,14 +1474,27 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
free_cpumask_var(cm);
}
+static struct smp_hotplug_thread rcu_cpu_thread_spec = {
+ .store = &rcu_cpu_kthread_task,
+ .thread_should_run = rcu_cpu_kthread_should_run,
+ .thread_fn = rcu_cpu_kthread,
+ .thread_comm = "rcuc/%u",
+ .setup = rcu_cpu_kthread_setup,
+ .park = rcu_cpu_kthread_park,
+};
+
/*
* Spawn all kthreads -- called as soon as the scheduler is running.
*/
static int __init rcu_spawn_kthreads(void)
{
struct rcu_node *rnp;
+ int cpu;
rcu_scheduler_fully_active = 1;
+ for_each_possible_cpu(cpu)
+ per_cpu(rcu_cpu_has_work, cpu) = 0;
+ BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
rnp = rcu_get_root(rcu_state);
(void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
if (NUM_RCU_NODES > 1) {
@@ -1419,6 +1522,11 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
+static void invoke_rcu_callbacks_kthread(void)
+{
+ WARN_ON_ONCE(1);
+}
+
static bool rcu_is_callbacks_kthread(void)
{
return false;
@@ -1445,7 +1553,7 @@ static void rcu_prepare_kthreads(int cpu)
#endif /* #else #ifdef CONFIG_RCU_BOOST */
-#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL)
+#if !defined(CONFIG_RCU_FAST_NO_HZ)
/*
* Check to see if any future RCU-related work will need to be done
@@ -1461,9 +1569,6 @@ int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
*delta_jiffies = ULONG_MAX;
return rcu_cpu_has_callbacks(cpu, NULL);
}
-#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */
-
-#if !defined(CONFIG_RCU_FAST_NO_HZ)
/*
* Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
@@ -1561,8 +1666,6 @@ static bool rcu_try_advance_all_cbs(void)
return cbs_ready;
}
-#ifndef CONFIG_PREEMPT_RT_FULL
-
/*
* Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
* to invoke. If the CPU has callbacks, try to advance them. Tell the
@@ -1601,7 +1704,6 @@ int rcu_needs_cpu(int cpu, unsigned long *dj)
}
return 0;
}
-#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */
/*
* Prepare a CPU for idle from an RCU perspective. The first major task
@@ -1959,7 +2061,7 @@ static int rcu_nocb_needs_gp(struct rcu_state *rsp)
*/
static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
{
- swait_wake_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
+ wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
}
/*
@@ -1977,8 +2079,8 @@ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
static void rcu_init_one_nocb(struct rcu_node *rnp)
{
- init_swait_head(&rnp->nocb_gp_wq[0]);
- init_swait_head(&rnp->nocb_gp_wq[1]);
+ init_waitqueue_head(&rnp->nocb_gp_wq[0]);
+ init_waitqueue_head(&rnp->nocb_gp_wq[1]);
}
/* Is the specified CPU a no-CPUs CPU? */
@@ -2018,7 +2120,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
return;
len = atomic_long_read(&rdp->nocb_q_count);
if (old_rhpp == &rdp->nocb_head) {
- swait_wake(&rdp->nocb_wq); /* ... only if queue was empty ... */
+ wake_up(&rdp->nocb_wq); /* ... only if queue was empty ... */
rdp->qlen_last_fqs_check = 0;
} else if (len > rdp->qlen_last_fqs_check + qhimark) {
wake_up_process(t); /* ... or if many callbacks queued. */
@@ -2108,7 +2210,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
*/
trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
for (;;) {
- swait_event_interruptible(
+ wait_event_interruptible(
rnp->nocb_gp_wq[c & 0x1],
(d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c)));
if (likely(d))
@@ -2136,7 +2238,7 @@ static int rcu_nocb_kthread(void *arg)
for (;;) {
/* If not polling, wait for next batch of callbacks. */
if (!rcu_nocb_poll)
- swait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
+ wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
list = ACCESS_ONCE(rdp->nocb_head);
if (!list) {
schedule_timeout_interruptible(1);
@@ -2186,7 +2288,7 @@ static int rcu_nocb_kthread(void *arg)
static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
{
rdp->nocb_tail = &rdp->nocb_head;
- init_swait_head(&rdp->nocb_wq);
+ init_waitqueue_head(&rdp->nocb_wq);
}
/* Create a kthread for each RCU flavor for each no-CBs CPU. */
@@ -2701,23 +2803,3 @@ static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
}
#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
-
-/*
- * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
- * grace-period kthread will do force_quiescent_state() processing?
- * The idea is to avoid waking up RCU core processing on such a
- * CPU unless the grace period has extended for too long.
- *
- * This code relies on the fact that all NO_HZ_FULL CPUs are also
- * CONFIG_RCU_NOCB_CPUs.
- */
-static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
-{
-#ifdef CONFIG_NO_HZ_FULL
- if (tick_nohz_full_cpu(smp_processor_id()) &&
- (!rcu_gp_in_progress(rsp) ||
- ULONG_CMP_LT(jiffies, ACCESS_ONCE(rsp->gp_start) + HZ)))
- return 1;
-#endif /* #ifdef CONFIG_NO_HZ_FULL */
- return 0;
-}