summaryrefslogtreecommitdiff
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2014-05-14 18:19:12 (GMT)
committerScott Wood <scottwood@freescale.com>2014-05-14 18:37:18 (GMT)
commit86ba38e6f5f2fbfe9b49e153ea89593b26482019 (patch)
treef99d2906b0eafca507f37289e68052fc105cc2dc /kernel/rcutree.c
parent07c8b57b111585a617b2b456497fc9b33c00743c (diff)
downloadlinux-fsl-qoriq-86ba38e6f5f2fbfe9b49e153ea89593b26482019.tar.xz
Reset to 3.12.19
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c152
1 files changed, 20 insertions, 132 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 507fab1..32618b3 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -55,11 +55,6 @@
#include <linux/random.h>
#include <linux/ftrace_event.h>
#include <linux/suspend.h>
-#include <linux/delay.h>
-#include <linux/gfp.h>
-#include <linux/oom.h>
-#include <linux/smpboot.h>
-#include "time/tick-internal.h"
#include "rcutree.h"
#include <trace/events/rcu.h>
@@ -150,6 +145,8 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
*/
static int rcu_scheduler_fully_active __read_mostly;
+#ifdef CONFIG_RCU_BOOST
+
/*
* Control variables for per-CPU and per-rcu_node kthreads. These
* handle all flavors of RCU.
@@ -159,6 +156,8 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
DEFINE_PER_CPU(char, rcu_cpu_has_work);
+#endif /* #ifdef CONFIG_RCU_BOOST */
+
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
static void invoke_rcu_core(void);
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
@@ -200,19 +199,6 @@ void rcu_sched_qs(int cpu)
rdp->passed_quiesce = 1;
}
-#ifdef CONFIG_PREEMPT_RT_FULL
-static void rcu_preempt_qs(int cpu);
-
-void rcu_bh_qs(int cpu)
-{
- unsigned long flags;
-
- /* Callers to this function, rcu_preempt_qs(), must disable irqs. */
- local_irq_save(flags);
- rcu_preempt_qs(cpu);
- local_irq_restore(flags);
-}
-#else
void rcu_bh_qs(int cpu)
{
struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
@@ -221,7 +207,6 @@ void rcu_bh_qs(int cpu)
trace_rcu_grace_period(TPS("rcu_bh"), rdp->gpnum, TPS("cpuqs"));
rdp->passed_quiesce = 1;
}
-#endif
/*
* Note a context switch. This is a quiescent state for RCU-sched,
@@ -278,7 +263,6 @@ long rcu_batches_completed_sched(void)
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
-#ifndef CONFIG_PREEMPT_RT_FULL
/*
* Return the number of RCU BH batches processed thus far for debug & stats.
*/
@@ -296,7 +280,6 @@ void rcu_bh_force_quiescent_state(void)
force_quiescent_state(&rcu_bh_state);
}
EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
-#endif
/*
* Record the number of times rcutorture tests have been initiated and
@@ -1488,7 +1471,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
/* Handle grace-period start. */
for (;;) {
- swait_event_interruptible(rsp->gp_wq,
+ wait_event_interruptible(rsp->gp_wq,
rsp->gp_flags &
RCU_GP_FLAG_INIT);
if ((rsp->gp_flags & RCU_GP_FLAG_INIT) &&
@@ -1507,7 +1490,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
}
for (;;) {
rsp->jiffies_force_qs = jiffies + j;
- ret = swait_event_interruptible_timeout(rsp->gp_wq,
+ ret = wait_event_interruptible_timeout(rsp->gp_wq,
(rsp->gp_flags & RCU_GP_FLAG_FQS) ||
(!ACCESS_ONCE(rnp->qsmask) &&
!rcu_preempt_blocked_readers_cgp(rnp)),
@@ -1545,7 +1528,7 @@ static void rsp_wakeup(struct irq_work *work)
struct rcu_state *rsp = container_of(work, struct rcu_state, wakeup_work);
/* Wake up rcu_gp_kthread() to start the grace period. */
- swait_wake(&rsp->gp_wq);
+ wake_up(&rsp->gp_wq);
}
/*
@@ -1619,7 +1602,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
{
WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
- swait_wake(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */
+ wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */
}
/*
@@ -2189,8 +2172,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
}
rsp->gp_flags |= RCU_GP_FLAG_FQS;
raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
- /* Memory barrier implied by wake_up() path. */
- swait_wake(&rsp->gp_wq);
+ wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */
}
/*
@@ -2227,14 +2209,16 @@ __rcu_process_callbacks(struct rcu_state *rsp)
/*
* Do RCU core processing for the current CPU.
*/
-static void rcu_process_callbacks(void)
+static void rcu_process_callbacks(struct softirq_action *unused)
{
struct rcu_state *rsp;
if (cpu_is_offline(smp_processor_id()))
return;
+ trace_rcu_utilization(TPS("Start RCU core"));
for_each_rcu_flavor(rsp)
__rcu_process_callbacks(rsp);
+ trace_rcu_utilization(TPS("End RCU core"));
}
/*
@@ -2248,105 +2232,18 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
{
if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
return;
- rcu_do_batch(rsp, rdp);
-}
-
-static void rcu_wake_cond(struct task_struct *t, int status)
-{
- /*
- * If the thread is yielding, only wake it when this
- * is invoked from idle
- */
- if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
- wake_up_process(t);
-}
-
-/*
- * Wake up this CPU's rcuc kthread to do RCU core processing.
- */
-static void invoke_rcu_core(void)
-{
- unsigned long flags;
- struct task_struct *t;
-
- if (!cpu_online(smp_processor_id()))
+ if (likely(!rsp->boost)) {
+ rcu_do_batch(rsp, rdp);
return;
- local_irq_save(flags);
- __this_cpu_write(rcu_cpu_has_work, 1);
- t = __this_cpu_read(rcu_cpu_kthread_task);
- if (t != NULL && current != t)
- rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status));
- local_irq_restore(flags);
-}
-
-static void rcu_cpu_kthread_park(unsigned int cpu)
-{
- per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
-}
-
-static int rcu_cpu_kthread_should_run(unsigned int cpu)
-{
- return __this_cpu_read(rcu_cpu_has_work);
-}
-
-/*
- * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
- * RCU softirq used in flavors and configurations of RCU that do not
- * support RCU priority boosting.
- */
-static void rcu_cpu_kthread(unsigned int cpu)
-{
- unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
- char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
- int spincnt;
-
- for (spincnt = 0; spincnt < 10; spincnt++) {
- trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
- local_bh_disable();
- *statusp = RCU_KTHREAD_RUNNING;
- this_cpu_inc(rcu_cpu_kthread_loops);
- local_irq_disable();
- work = *workp;
- *workp = 0;
- local_irq_enable();
- if (work)
- rcu_process_callbacks();
- local_bh_enable();
- if (*workp == 0) {
- trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
- *statusp = RCU_KTHREAD_WAITING;
- return;
- }
}
- *statusp = RCU_KTHREAD_YIELDING;
- trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
- schedule_timeout_interruptible(2);
- trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
- *statusp = RCU_KTHREAD_WAITING;
+ invoke_rcu_callbacks_kthread();
}
-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
- .store = &rcu_cpu_kthread_task,
- .thread_should_run = rcu_cpu_kthread_should_run,
- .thread_fn = rcu_cpu_kthread,
- .thread_comm = "rcuc/%u",
- .setup = rcu_cpu_kthread_setup,
- .park = rcu_cpu_kthread_park,
-};
-
-/*
- * Spawn per-CPU RCU core processing kthreads.
- */
-static int __init rcu_spawn_core_kthreads(void)
+static void invoke_rcu_core(void)
{
- int cpu;
-
- for_each_possible_cpu(cpu)
- per_cpu(rcu_cpu_has_work, cpu) = 0;
- BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
- return 0;
+ if (cpu_online(smp_processor_id()))
+ raise_softirq(RCU_SOFTIRQ);
}
-early_initcall(rcu_spawn_core_kthreads);
/*
* Handle any core-RCU processing required by a call_rcu() invocation.
@@ -2476,7 +2373,6 @@ void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
-#ifndef CONFIG_PREEMPT_RT_FULL
/*
* Queue an RCU callback for invocation after a quicker grace period.
*/
@@ -2485,7 +2381,6 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
__call_rcu(head, func, &rcu_bh_state, -1, 0);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
-#endif
/*
* Because a context switch is a grace period for RCU-sched and RCU-bh,
@@ -2563,7 +2458,6 @@ void synchronize_sched(void)
}
EXPORT_SYMBOL_GPL(synchronize_sched);
-#ifndef CONFIG_PREEMPT_RT_FULL
/**
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
*
@@ -2590,7 +2484,6 @@ void synchronize_rcu_bh(void)
wait_rcu_gp(call_rcu_bh);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
-#endif
static int synchronize_sched_expedited_cpu_stop(void *data)
{
@@ -2765,10 +2658,6 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
/* Check for CPU stalls, if enabled. */
check_cpu_stall(rsp, rdp);
- /* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
- if (rcu_nohz_full_cpu(rsp))
- return 0;
-
/* Is the RCU core waiting for a quiescent state from this CPU? */
if (rcu_scheduler_fully_active &&
rdp->qs_pending && !rdp->passed_quiesce) {
@@ -3002,7 +2891,6 @@ static void _rcu_barrier(struct rcu_state *rsp)
mutex_unlock(&rsp->barrier_mutex);
}
-#ifndef CONFIG_PREEMPT_RT_FULL
/**
* rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
*/
@@ -3011,7 +2899,6 @@ void rcu_barrier_bh(void)
_rcu_barrier(&rcu_bh_state);
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
-#endif
/**
* rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
@@ -3315,7 +3202,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
}
rsp->rda = rda;
- init_swait_head(&rsp->gp_wq);
+ init_waitqueue_head(&rsp->gp_wq);
init_irq_work(&rsp->wakeup_work, rsp_wakeup);
rnp = rsp->level[rcu_num_lvls - 1];
for_each_possible_cpu(i) {
@@ -3411,6 +3298,7 @@ void __init rcu_init(void)
rcu_init_one(&rcu_sched_state, &rcu_sched_data);
rcu_init_one(&rcu_bh_state, &rcu_bh_data);
__rcu_init_preempt();
+ open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
/*
* We don't need protection against CPU-hotplug here because