summaryrefslogtreecommitdiff
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorGregory Haskins <ghaskins@novell.com>2008-01-25 20:08:23 (GMT)
committerIngo Molnar <mingo@elte.hu>2008-01-25 20:08:23 (GMT)
commitcdc8eb984ce47a7c90a049f45229f7b0d59ba781 (patch)
treecebcaf9d854b843a5381cae18cffa6fd836a20ab /kernel/sched_rt.c
parentcb46984504048db946cd551c261df4e70d59a8ea (diff)
downloadlinux-cdc8eb984ce47a7c90a049f45229f7b0d59ba781.tar.xz
sched: RT-balance, only adjust overload state when changing
The overload set/clears were originally idempotent when this logic was first implemented. But that is no longer true due to the addition of the atomic counter and this logic was never updated to work properly with that change. So only adjust the overload state if it is actually changing to avoid getting out of sync. Signed-off-by: Gregory Haskins <ghaskins@novell.com> Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 57fa3d9..a386758 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -34,9 +34,11 @@ static inline void rt_clear_overload(struct rq *rq)
static void update_rt_migration(struct rq *rq)
{
if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
- rt_set_overload(rq);
- rq->rt.overloaded = 1;
- } else {
+ if (!rq->rt.overloaded) {
+ rt_set_overload(rq);
+ rq->rt.overloaded = 1;
+ }
+ } else if (rq->rt.overloaded) {
rt_clear_overload(rq);
rq->rt.overloaded = 0;
}