From 9b9fb610f6800e0db46cccd8618dd7e609c9bb5a Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Tue, 10 Jan 2012 09:24:05 +0900 Subject: sched: Remove empty #ifdefs Signed-off-by: Hiroshi Shimamoto Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/4F0B8525.8070901@ct.jp.nec.com Signed-off-by: Ingo Molnar diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4dbfd04a..457c881 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7136,10 +7136,6 @@ void set_curr_task(int cpu, struct task_struct *p) #endif -#ifdef CONFIG_RT_GROUP_SCHED -#else /* !CONFIG_RT_GROUP_SCHED */ -#endif /* CONFIG_RT_GROUP_SCHED */ - #ifdef CONFIG_CGROUP_SCHED /* task_group_lock serializes the addition/removal of task groups */ static DEFINE_SPINLOCK(task_group_lock); @@ -7248,9 +7244,6 @@ void sched_move_task(struct task_struct *tsk) } #endif /* CONFIG_CGROUP_SCHED */ -#ifdef CONFIG_FAIR_GROUP_SCHED -#endif - #if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH) static unsigned long to_ratio(u64 period, u64 runtime) { -- cgit v0.10.2 From 6db9dc150eabce7053c8df2a2146aa0d6748ec42 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Tue, 10 Jan 2012 08:46:23 -0200 Subject: sched: Fix CONFIG_CGROUP_SCHED dependency The dependency bug was pointed out by this build warning: warning: (SCHED_AUTOGROUP) selects CGROUP_SCHED which has unmet direct dependencies (CGROUPS && EXPERIMENTAL) Signed-off-by: Fabio Estevam Cc: a.p.zijlstra@chello.nl Cc: Fabio Estevam Link: http://lkml.kernel.org/r/1326192383-5113-1-git-send-email-festevam@gmail.com Signed-off-by: Ingo Molnar diff --git a/init/Kconfig b/init/Kconfig index 82b6a4c..a34cd17 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -702,7 +702,6 @@ config CGROUP_PERF menuconfig CGROUP_SCHED bool "Group CPU scheduler" - depends on EXPERIMENTAL default n help This feature lets CPU scheduler recognize task groups and control CPU -- cgit v0.10.2 From bced76aeaca03b45e3b4bdb868cada328e497847 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 11 Jan 2012 13:11:12 +0100 Subject: sched: Fix lockup by limiting load-balance retries on lock-break Eric and David reported dead machines and traced it to commit a195f004 ("sched: Fix load-balance lock-breaking"), it turns out there's still a scenario where we can end up re-trying forever. Since there is no strict forward progress guarantee in the load-balance iteration we can get stuck re-retrying the same task-set over and over. Creating a forward progress guarantee with the existing structure is somewhat non-trivial, for now simply terminate the retry loop after a few tries. Reported-by: Eric Dumazet Tested-by: Eric Dumazet Reported-by: David Ahern [ logic cleanup as suggested by Eric ] Signed-off-by: Peter Zijlstra Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Frederic Weisbecker Cc: Suresh Siddha Link: http://lkml.kernel.org/r/1326297936.2442.157.camel@twins Signed-off-by: Ingo Molnar diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8e42de9..84adb2d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3130,8 +3130,10 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) } #define LBF_ALL_PINNED 0x01 -#define LBF_NEED_BREAK 0x02 -#define LBF_ABORT 0x04 +#define LBF_NEED_BREAK 0x02 /* clears into HAD_BREAK */ +#define LBF_HAD_BREAK 0x04 +#define LBF_HAD_BREAKS 0x0C /* count HAD_BREAKs overflows into ABORT */ +#define LBF_ABORT 0x10 /* * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? @@ -4508,7 +4510,9 @@ redo: goto out_balanced; if (lb_flags & LBF_NEED_BREAK) { - lb_flags &= ~LBF_NEED_BREAK; + lb_flags += LBF_HAD_BREAK - LBF_NEED_BREAK; + if (lb_flags & LBF_ABORT) + goto out_balanced; goto redo; } -- cgit v0.10.2