summaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-09-25 17:03:56 (GMT)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-09-25 17:03:56 (GMT)
commit593d1006cdf710ab3469c0c37c184fea0bc3da97 (patch)
treee4db58440018a52089e8d6b39160f753ab10df99 /kernel/sched/core.c
parent5217192b85480353aeeb395574e60d0db04f3676 (diff)
parent9b20aa63b8fc9a6a3b6831f4eae3621755e51211 (diff)
downloadlinux-fsl-qoriq-593d1006cdf710ab3469c0c37c184fea0bc3da97.tar.xz
Merge remote-tracking branch 'tip/core/rcu' into next.2012.09.25b
Resolved conflict in kernel/sched/core.c using Peter Zijlstra's approach from https://lkml.org/lkml/2012/9/5/585.
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c52
1 files changed, 2 insertions, 50 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8c38b5e..1a48cdb 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5342,9 +5342,6 @@ static void migrate_tasks(unsigned int dead_cpu)
*/
rq->stop = NULL;
- /* Ensure any throttled groups are reachable by pick_next_task */
- unthrottle_offline_cfs_rqs(rq);
-
for ( ; ; ) {
/*
* There's this thread running, bail when that's the only
@@ -5610,15 +5607,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
break;
case CPU_DEAD:
- {
- struct rq *dest_rq;
-
- local_irq_save(flags);
- dest_rq = cpu_rq(smp_processor_id());
- raw_spin_lock(&dest_rq->lock);
- calc_load_migrate(rq);
- raw_spin_unlock_irqrestore(&dest_rq->lock, flags);
- }
+ calc_load_migrate(rq);
break;
#endif
}
@@ -6027,11 +6016,6 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu)
* SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
* allows us to avoid some pointer chasing select_idle_sibling().
*
- * Iterate domains and sched_groups downward, assigning CPUs to be
- * select_idle_sibling() hw buddy. Cross-wiring hw makes bouncing
- * due to random perturbation self canceling, ie sw buddies pull
- * their counterpart to their CPU's hw counterpart.
- *
* Also keep a unique ID per domain (we use the first cpu number in
* the cpumask of the domain), this allows us to quickly tell if
* two cpus are in the same cache domain, see cpus_share_cache().
@@ -6045,40 +6029,8 @@ static void update_top_cache_domain(int cpu)
int id = cpu;
sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
- if (sd) {
- struct sched_domain *tmp = sd;
- struct sched_group *sg, *prev;
- bool right;
-
- /*
- * Traverse to first CPU in group, and count hops
- * to cpu from there, switching direction on each
- * hop, never ever pointing the last CPU rightward.
- */
- do {
- id = cpumask_first(sched_domain_span(tmp));
- prev = sg = tmp->groups;
- right = 1;
-
- while (cpumask_first(sched_group_cpus(sg)) != id)
- sg = sg->next;
-
- while (!cpumask_test_cpu(cpu, sched_group_cpus(sg))) {
- prev = sg;
- sg = sg->next;
- right = !right;
- }
-
- /* A CPU went down, never point back to domain start. */
- if (right && cpumask_first(sched_group_cpus(sg->next)) == id)
- right = false;
-
- sg = right ? sg->next : prev;
- tmp->idle_buddy = cpumask_first(sched_group_cpus(sg));
- } while ((tmp = tmp->child));
-
+ if (sd)
id = cpumask_first(sched_domain_span(sd));
- }
rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
per_cpu(sd_llc_id, cpu) = id;