diff options
author | Peter Zijlstra <peterz@infradead.org> | 2008-11-11 10:52:33 (GMT) |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-11 10:57:22 (GMT) |
commit | 2002c69595a092518107f7e3c1294c9710bc92ae (patch) | |
tree | 5935add4d4cd426b5b824474b7ec2ffea48d2951 /kernel | |
parent | ad474caca3e2a0550b7ce0706527ad5ab389a4d4 (diff) | |
download | linux-fsl-qoriq-2002c69595a092518107f7e3c1294c9710bc92ae.tar.xz |
sched: release buddies on yield
Clear buddies on yield, so that the buddy rules don't schedule them
despite them being placed right-most.
This fixed a performance regression with yield-happy binary JVMs.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Tested-by: Lin Ming <ming.m.lin@intel.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_fair.c | 17 |
1 files changed, 12 insertions, 5 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 51aa3e1..98345e4 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -716,6 +716,15 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) __enqueue_entity(cfs_rq, se); } +static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) +{ + if (cfs_rq->last == se) + cfs_rq->last = NULL; + + if (cfs_rq->next == se) + cfs_rq->next = NULL; +} + static void dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) { @@ -738,11 +747,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) #endif } - if (cfs_rq->last == se) - cfs_rq->last = NULL; - - if (cfs_rq->next == se) - cfs_rq->next = NULL; + clear_buddies(cfs_rq, se); if (se != cfs_rq->curr) __dequeue_entity(cfs_rq, se); @@ -977,6 +982,8 @@ static void yield_task_fair(struct rq *rq) if (unlikely(cfs_rq->nr_running == 1)) return; + clear_buddies(cfs_rq, se); + if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) { update_rq_clock(rq); /* |