From 6956dc568f34107f1d02b24f87efe7250803fc87 Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Fri, 20 Jul 2012 14:19:50 +0800 Subject: sched/numa: Add SD_PERFER_SIBLING to CPU domain Commit 8e7fbcbc22c ("sched: Remove stale power aware scheduling remnants and dysfunctional knobs") removed SD_PERFER_SIBLING from the CPU domain. On NUMA machines this causes that load_balance() doesn't perfer LCPU in same physical CPU package. It causes some actual performance regressions on our NUMA machines from Core2 to NHM and SNB. Adding this domain flag again recovers the performance drop. This change doesn't have any bad impact on any of my benchmarks: specjbb, kbuild, fio, hackbench .. etc, on all my machines. Signed-off-by: Alex Shi Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1342765190-21540-1-git-send-email-alex.shi@intel.com Signed-off-by: Ingo Molnar diff --git a/include/linux/topology.h b/include/linux/topology.h index e91cd43..fec12d6 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -164,6 +164,7 @@ int arch_update_cpu_topology(void); | 0*SD_SHARE_CPUPOWER \ | 0*SD_SHARE_PKG_RESOURCES \ | 0*SD_SERIALIZE \ + | 1*SD_PREFER_SIBLING \ , \ .last_balance = jiffies, \ .balance_interval = 1, \ -- cgit v0.10.2 From 45afb1734fa6323a8ba08bd6c392ee227df67dde Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Sat, 7 Jul 2012 16:49:02 +0900 Subject: sched: Use task_rq_unlock() in __sched_setscheduler() It seems there's no specific reason to open-code it. I guess commit 0122ec5b02f76 ("sched: Add p->pi_lock to task_rq_lock()") simply missed it. Let's be consistent with others. Signed-off-by: Namhyung Kim Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1341647342-6742-1-git-send-email-namhyung@kernel.org Signed-off-by: Ingo Molnar diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5d011ef..2cb4e77 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4340,9 +4340,7 @@ recheck: */ if (unlikely(policy == p->policy && (!rt_policy(policy) || param->sched_priority == p->rt_priority))) { - - __task_rq_unlock(rq); - raw_spin_unlock_irqrestore(&p->pi_lock, flags); + task_rq_unlock(rq, p, &flags); return 0; } -- cgit v0.10.2 From 014acbf0d5c8445e0ff88ae60edd676dd9cc461c Mon Sep 17 00:00:00 2001 From: Ying Xue Date: Thu, 12 Jul 2012 15:03:42 +0800 Subject: sched: Fix minor code style issues Delete redudant spaces between type name and data name or operators. Signed-off-by: Ying Xue Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1342076622-6606-1-git-send-email-ying.xue0@gmail.com Signed-off-by: Ingo Molnar diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index d72586f..23aa789 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -65,8 +65,8 @@ static int convert_prio(int prio) int cpupri_find(struct cpupri *cp, struct task_struct *p, struct cpumask *lowest_mask) { - int idx = 0; - int task_pri = convert_prio(p->prio); + int idx = 0; + int task_pri = convert_prio(p->prio); if (task_pri >= MAX_RT_PRIO) return 0; @@ -137,9 +137,9 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, */ void cpupri_set(struct cpupri *cp, int cpu, int newpri) { - int *currpri = &cp->cpu_to_pri[cpu]; - int oldpri = *currpri; - int do_mb = 0; + int *currpri = &cp->cpu_to_pri[cpu]; + int oldpri = *currpri; + int do_mb = 0; newpri = convert_prio(newpri); -- cgit v0.10.2 From a7e4786b937a3ae918a7520cfdba557a80915fa7 Mon Sep 17 00:00:00 2001 From: "Srivatsa S. Bhat" Date: Sat, 21 Jul 2012 00:54:59 +0530 Subject: sched: Fix comment about PREEMPT_ACTIVE bit location PREEMPT_ACTIVE flag is bit 27, not 28. Fix the comment. Signed-off-by: Srivatsa S. Bhat Cc: paulmck@linux.vnet.ibm.com Cc: josh@joshtriplett.org Cc: rostedt@goodmis.org Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20120720192459.6149.14821.stgit@srivatsabhat.in.ibm.com Signed-off-by: Ingo Molnar diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index bb7f309..305f23c 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -22,7 +22,7 @@ * * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) * - bit 26 is the NMI_MASK - * - bit 28 is the PREEMPT_ACTIVE flag + * - bit 27 is the PREEMPT_ACTIVE flag * * PREEMPT_MASK: 0x000000ff * SOFTIRQ_MASK: 0x0000ff00 -- cgit v0.10.2 From b9403130a5350fca59a50ed11c198cb8c7e54119 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Thu, 12 Jul 2012 16:10:13 +0800 Subject: sched/cleanups: Add load balance cpumask pointer to 'struct lb_env' With this patch struct ld_env will have a pointer of the load balancing cpumask and we don't need to pass a cpumask around anymore. Signed-off-by: Michael Wang Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/4FFE8665.3080705@linux.vnet.ibm.com Signed-off-by: Ingo Molnar diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 22321db..d0cc03b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3069,6 +3069,9 @@ struct lb_env { int new_dst_cpu; enum cpu_idle_type idle; long imbalance; + /* The set of CPUs under consideration for load-balancing */ + struct cpumask *cpus; + unsigned int flags; unsigned int loop; @@ -3653,8 +3656,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group) */ static inline void update_sg_lb_stats(struct lb_env *env, struct sched_group *group, int load_idx, - int local_group, const struct cpumask *cpus, - int *balance, struct sg_lb_stats *sgs) + int local_group, int *balance, struct sg_lb_stats *sgs) { unsigned long nr_running, max_nr_running, min_nr_running; unsigned long load, max_cpu_load, min_cpu_load; @@ -3671,7 +3673,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, max_nr_running = 0; min_nr_running = ~0UL; - for_each_cpu_and(i, sched_group_cpus(group), cpus) { + for_each_cpu_and(i, sched_group_cpus(group), env->cpus) { struct rq *rq = cpu_rq(i); nr_running = rq->nr_running; @@ -3800,8 +3802,7 @@ static bool update_sd_pick_busiest(struct lb_env *env, * @sds: variable to hold the statistics for this sched_domain. */ static inline void update_sd_lb_stats(struct lb_env *env, - const struct cpumask *cpus, - int *balance, struct sd_lb_stats *sds) + int *balance, struct sd_lb_stats *sds) { struct sched_domain *child = env->sd->child; struct sched_group *sg = env->sd->groups; @@ -3818,8 +3819,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg)); memset(&sgs, 0, sizeof(sgs)); - update_sg_lb_stats(env, sg, load_idx, local_group, - cpus, balance, &sgs); + update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs); if (local_group && !(*balance)) return; @@ -4055,7 +4055,6 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s * to restore balance. * * @env: The load balancing environment. - * @cpus: The set of CPUs under consideration for load-balancing. * @balance: Pointer to a variable indicating if this_cpu * is the appropriate cpu to perform load balancing at this_level. * @@ -4065,7 +4064,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s * put to idle by rebalancing its tasks onto our group. */ static struct sched_group * -find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance) +find_busiest_group(struct lb_env *env, int *balance) { struct sd_lb_stats sds; @@ -4075,7 +4074,7 @@ find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance) * Compute the various statistics relavent for load balancing at * this level. */ - update_sd_lb_stats(env, cpus, balance, &sds); + update_sd_lb_stats(env, balance, &sds); /* * this_cpu is not the appropriate cpu to perform load balancing at @@ -4155,8 +4154,7 @@ ret: * find_busiest_queue - find the busiest runqueue among the cpus in group. */ static struct rq *find_busiest_queue(struct lb_env *env, - struct sched_group *group, - const struct cpumask *cpus) + struct sched_group *group) { struct rq *busiest = NULL, *rq; unsigned long max_load = 0; @@ -4171,7 +4169,7 @@ static struct rq *find_busiest_queue(struct lb_env *env, if (!capacity) capacity = fix_small_capacity(env->sd, group); - if (!cpumask_test_cpu(i, cpus)) + if (!cpumask_test_cpu(i, env->cpus)) continue; rq = cpu_rq(i); @@ -4252,6 +4250,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, .dst_grpmask = sched_group_cpus(sd->groups), .idle = idle, .loop_break = sched_nr_migrate_break, + .cpus = cpus, }; cpumask_copy(cpus, cpu_active_mask); @@ -4260,7 +4259,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, schedstat_inc(sd, lb_count[idle]); redo: - group = find_busiest_group(&env, cpus, balance); + group = find_busiest_group(&env, balance); if (*balance == 0) goto out_balanced; @@ -4270,7 +4269,7 @@ redo: goto out_balanced; } - busiest = find_busiest_queue(&env, group, cpus); + busiest = find_busiest_queue(&env, group); if (!busiest) { schedstat_inc(sd, lb_nobusyq[idle]); goto out_balanced; -- cgit v0.10.2