summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-04-21 09:04:42 (GMT)
committerIngo Molnar <mingo@kernel.org>2013-04-21 09:04:42 (GMT)
commit2727872dfe5d273f313f8a0c0dd0fcc58e96cde7 (patch)
tree069ceb12d981f33d997afd214c2082a6685a2fd0
parent65d798f0f9339ae2c4ebe9480e3260b33382a584 (diff)
parentf98823ac758ba1aa77c6e3f8ad4ef3ad84ee0a7c (diff)
downloadlinux-fsl-qoriq-2727872dfe5d273f313f8a0c0dd0fcc58e96cde7.tar.xz
Merge branch 'timers/nohz-reviewed' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into timers/nohz
Pull full dynticks timekeeping and RCU improvements from Frederic Weisbecker. Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--Documentation/kernel-parameters.txt6
-rw-r--r--include/linux/rcupdate.h7
-rw-r--r--include/linux/tick.h2
-rw-r--r--init/main.c1
-rw-r--r--kernel/rcutree.c6
-rw-r--r--kernel/rcutree.h1
-rw-r--r--kernel/rcutree_plugin.h13
-rw-r--r--kernel/time/Kconfig10
-rw-r--r--kernel/time/tick-sched.c77
9 files changed, 72 insertions, 51 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 82365dd..4865e9b 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1916,8 +1916,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nohz_full= [KNL,BOOT]
In kernels built with CONFIG_NO_HZ_FULL=y, set
the specified list of CPUs whose tick will be stopped
- whenever possible. You need to keep at least one online
- CPU outside the range to maintain the timekeeping.
+ whenever possible. The boot CPU will be forced outside
+ the range to maintain the timekeeping.
+ The CPUs in this range must also be included in the
+ rcu_nocbs= set.
noiotrap [SH] Disables trapped I/O port accesses.
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index b758ce1..8e0948c 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -999,4 +999,11 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#define kfree_rcu(ptr, rcu_head) \
__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
+#ifdef CONFIG_RCU_NOCB_CPU
+extern bool rcu_is_nocb_cpu(int cpu);
+#else
+static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
+#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
+
+
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/tick.h b/include/linux/tick.h
index b4e3b0c..0b6873c 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -158,8 +158,10 @@ static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
# endif /* !CONFIG_NO_HZ_COMMON */
#ifdef CONFIG_NO_HZ_FULL
+extern void tick_nohz_init(void);
extern int tick_nohz_full_cpu(int cpu);
#else
+static inline void tick_nohz_init(void) { }
static inline int tick_nohz_full_cpu(int cpu) { return 0; }
#endif
diff --git a/init/main.c b/init/main.c
index 63534a1..2acb5bb 100644
--- a/init/main.c
+++ b/init/main.c
@@ -547,6 +547,7 @@ asmlinkage void __init start_kernel(void)
idr_init_cache();
perf_event_init();
rcu_init();
+ tick_nohz_init();
radix_tree_init();
/* init some links before init_ISA_irqs() */
early_irq_init();
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index f5ab502..1d4ceff 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1695,7 +1695,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
struct rcu_node *rnp, struct rcu_data *rdp)
{
/* No-CBs CPUs do not have orphanable callbacks. */
- if (is_nocb_cpu(rdp->cpu))
+ if (rcu_is_nocb_cpu(rdp->cpu))
return;
/*
@@ -2757,10 +2757,10 @@ static void _rcu_barrier(struct rcu_state *rsp)
* corresponding CPU's preceding callbacks have been invoked.
*/
for_each_possible_cpu(cpu) {
- if (!cpu_online(cpu) && !is_nocb_cpu(cpu))
+ if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
continue;
rdp = per_cpu_ptr(rsp->rda, cpu);
- if (is_nocb_cpu(cpu)) {
+ if (rcu_is_nocb_cpu(cpu)) {
_rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
rsp->n_barrier_done);
atomic_inc(&rsp->barrier_cpu_count);
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index f993c0a..38acc49 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -529,7 +529,6 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
static void print_cpu_stall_info_end(void);
static void zero_cpu_stall_ticks(struct rcu_data *rdp);
static void increment_cpu_stall_ticks(void);
-static bool is_nocb_cpu(int cpu);
static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
bool lazy);
static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index a5745e9..0cd91cc 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -2167,7 +2167,7 @@ static int __init parse_rcu_nocb_poll(char *arg)
early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
/* Is the specified CPU a no-CPUs CPU? */
-static bool is_nocb_cpu(int cpu)
+bool rcu_is_nocb_cpu(int cpu)
{
if (have_rcu_nocb_mask)
return cpumask_test_cpu(cpu, rcu_nocb_mask);
@@ -2225,7 +2225,7 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
bool lazy)
{
- if (!is_nocb_cpu(rdp->cpu))
+ if (!rcu_is_nocb_cpu(rdp->cpu))
return 0;
__call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy);
return 1;
@@ -2242,7 +2242,7 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
long qll = rsp->qlen_lazy;
/* If this is not a no-CBs CPU, tell the caller to do it the old way. */
- if (!is_nocb_cpu(smp_processor_id()))
+ if (!rcu_is_nocb_cpu(smp_processor_id()))
return 0;
rsp->qlen = 0;
rsp->qlen_lazy = 0;
@@ -2282,7 +2282,7 @@ static bool nocb_cpu_expendable(int cpu)
* If there are no no-CB CPUs or if this CPU is not a no-CB CPU,
* then offlining this CPU is harmless. Let it happen.
*/
- if (!have_rcu_nocb_mask || is_nocb_cpu(cpu))
+ if (!have_rcu_nocb_mask || rcu_is_nocb_cpu(cpu))
return 1;
/* If no memory, play it safe and keep the CPU around. */
@@ -2464,11 +2464,6 @@ static void __init rcu_init_nocb(void)
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
-static bool is_nocb_cpu(int cpu)
-{
- return false;
-}
-
static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
bool lazy)
{
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 358d601..99c3f13 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -128,6 +128,16 @@ config NO_HZ_FULL
endchoice
+config NO_HZ_FULL_ALL
+ bool "Full dynticks system on all CPUs by default"
+ depends on NO_HZ_FULL
+ help
+ If the user doesn't pass the nohz_full boot option to
+ define the range of full dynticks CPUs, consider that all
+ CPUs in the system are full dynticks by default.
+ Note the boot CPU will still be kept outside the range to
+ handle the timekeeping duty.
+
config NO_HZ
bool "Old Idle dynticks config"
depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 369b576..a76e090 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -158,11 +158,21 @@ int tick_nohz_full_cpu(int cpu)
/* Parse the boot-time nohz CPU list from the kernel parameters. */
static int __init tick_nohz_full_setup(char *str)
{
+ int cpu;
+
alloc_bootmem_cpumask_var(&nohz_full_mask);
- if (cpulist_parse(str, nohz_full_mask) < 0)
+ if (cpulist_parse(str, nohz_full_mask) < 0) {
pr_warning("NOHZ: Incorrect nohz_full cpumask\n");
- else
- have_nohz_full_mask = true;
+ return 1;
+ }
+
+ cpu = smp_processor_id();
+ if (cpumask_test_cpu(cpu, nohz_full_mask)) {
+ pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu);
+ cpumask_clear_cpu(cpu, nohz_full_mask);
+ }
+ have_nohz_full_mask = true;
+
return 1;
}
__setup("nohz_full=", tick_nohz_full_setup);
@@ -193,51 +203,46 @@ static int __cpuinit tick_nohz_cpu_down_callback(struct notifier_block *nfb,
*/
static char __initdata nohz_full_buf[NR_CPUS + 1];
-static int __init init_tick_nohz_full(void)
+static int tick_nohz_init_all(void)
{
- cpumask_var_t online_nohz;
- int cpu;
+ int err = -1;
- if (!have_nohz_full_mask)
- return 0;
+#ifdef CONFIG_NO_HZ_FULL_ALL
+ if (!alloc_cpumask_var(&nohz_full_mask, GFP_KERNEL)) {
+ pr_err("NO_HZ: Can't allocate full dynticks cpumask\n");
+ return err;
+ }
+ err = 0;
+ cpumask_setall(nohz_full_mask);
+ cpumask_clear_cpu(smp_processor_id(), nohz_full_mask);
+ have_nohz_full_mask = true;
+#endif
+ return err;
+}
- cpu_notifier(tick_nohz_cpu_down_callback, 0);
+void __init tick_nohz_init(void)
+{
+ int cpu;
- if (!zalloc_cpumask_var(&online_nohz, GFP_KERNEL)) {
- pr_warning("NO_HZ: Not enough memory to check full nohz mask\n");
- return -ENOMEM;
+ if (!have_nohz_full_mask) {
+ if (tick_nohz_init_all() < 0)
+ return;
}
- /*
- * CPUs can probably not be concurrently offlined on initcall time.
- * But we are paranoid, aren't we?
- */
- get_online_cpus();
+ cpu_notifier(tick_nohz_cpu_down_callback, 0);
- /* Ensure we keep a CPU outside the dynticks range for timekeeping */
- cpumask_and(online_nohz, cpu_online_mask, nohz_full_mask);
- if (cpumask_equal(online_nohz, cpu_online_mask)) {
- pr_warning("NO_HZ: Must keep at least one online CPU "
- "out of nohz_full range\n");
- /*
- * We know the current CPU doesn't have its tick stopped.
- * Let's use it for the timekeeping duty.
- */
- preempt_disable();
- cpu = smp_processor_id();
- pr_warning("NO_HZ: Clearing %d from nohz_full range\n", cpu);
- cpumask_clear_cpu(cpu, nohz_full_mask);
- preempt_enable();
+ /* Make sure full dynticks CPU are also RCU nocbs */
+ for_each_cpu(cpu, nohz_full_mask) {
+ if (!rcu_is_nocb_cpu(cpu)) {
+ pr_warning("NO_HZ: CPU %d is not RCU nocb: "
+ "cleared from nohz_full range", cpu);
+ cpumask_clear_cpu(cpu, nohz_full_mask);
+ }
}
- put_online_cpus();
- free_cpumask_var(online_nohz);
cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask);
pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf);
-
- return 0;
}
-core_initcall(init_tick_nohz_full);
#else
#define have_nohz_full_mask (0)
#endif