summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kernel/sched/core.c26
-rw-r--r--kernel/sched/deadline.c15
-rw-r--r--kernel/sched/fair.c26
-rw-r--r--kernel/sched/idle_task.c12
-rw-r--r--kernel/sched/rt.c16
-rw-r--r--kernel/sched/sched.h1
6 files changed, 44 insertions, 52 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index dedb5f0..3068f37 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2169,13 +2169,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
#ifdef CONFIG_SMP
-/* assumes rq->lock is held */
-static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
-{
- if (prev->sched_class->pre_schedule)
- prev->sched_class->pre_schedule(rq, prev);
-}
-
/* rq->lock is NOT held, but preemption is disabled */
static inline void post_schedule(struct rq *rq)
{
@@ -2193,10 +2186,6 @@ static inline void post_schedule(struct rq *rq)
#else
-static inline void pre_schedule(struct rq *rq, struct task_struct *p)
-{
-}
-
static inline void post_schedule(struct rq *rq)
{
}
@@ -2592,7 +2581,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
* Optimization: we know that if all tasks are in
* the fair class we can call that function directly:
*/
- if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
+ if (likely(prev->sched_class == &fair_sched_class &&
+ rq->nr_running == rq->cfs.h_nr_running)) {
p = fair_sched_class.pick_next_task(rq, prev);
if (likely(p))
return p;
@@ -2695,18 +2685,6 @@ need_resched:
switch_count = &prev->nvcsw;
}
- pre_schedule(rq, prev);
-
- if (unlikely(!rq->nr_running)) {
- /*
- * We must set idle_stamp _before_ calling idle_balance(), such
- * that we measure the duration of idle_balance() as idle time.
- */
- rq->idle_stamp = rq_clock(rq);
- if (idle_balance(rq))
- rq->idle_stamp = 0;
- }
-
if (prev->on_rq || rq->skip_clock_update < 0)
update_rq_clock(rq);
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 50797d5..ed31ef6 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -944,6 +944,8 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
resched_task(rq->curr);
}
+static int pull_dl_task(struct rq *this_rq);
+
#endif /* CONFIG_SMP */
/*
@@ -998,6 +1000,11 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
dl_rq = &rq->dl;
+#ifdef CONFIG_SMP
+ if (dl_task(prev))
+ pull_dl_task(rq);
+#endif
+
if (unlikely(!dl_rq->dl_nr_running))
return NULL;
@@ -1429,13 +1436,6 @@ skip:
return ret;
}
-static void pre_schedule_dl(struct rq *rq, struct task_struct *prev)
-{
- /* Try to pull other tasks here */
- if (dl_task(prev))
- pull_dl_task(rq);
-}
-
static void post_schedule_dl(struct rq *rq)
{
push_dl_tasks(rq);
@@ -1628,7 +1628,6 @@ const struct sched_class dl_sched_class = {
.set_cpus_allowed = set_cpus_allowed_dl,
.rq_online = rq_online_dl,
.rq_offline = rq_offline_dl,
- .pre_schedule = pre_schedule_dl,
.post_schedule = post_schedule_dl,
.task_woken = task_woken_dl,
#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a81b241..43b49fe 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2577,7 +2577,8 @@ void idle_exit_fair(struct rq *this_rq)
update_rq_runnable_avg(this_rq, 0);
}
-#else
+#else /* CONFIG_SMP */
+
static inline void update_entity_load_avg(struct sched_entity *se,
int update_cfs_rq) {}
static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
@@ -2589,7 +2590,7 @@ static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
int sleep) {}
static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
int force_update) {}
-#endif
+#endif /* CONFIG_SMP */
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
@@ -4682,9 +4683,10 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev)
struct sched_entity *se;
struct task_struct *p;
+again: __maybe_unused
#ifdef CONFIG_FAIR_GROUP_SCHED
if (!cfs_rq->nr_running)
- return NULL;
+ goto idle;
if (!prev || prev->sched_class != &fair_sched_class)
goto simple;
@@ -4760,7 +4762,7 @@ simple:
#endif
if (!cfs_rq->nr_running)
- return NULL;
+ goto idle;
if (prev)
prev->sched_class->put_prev_task(rq, prev);
@@ -4777,6 +4779,22 @@ simple:
hrtick_start_fair(rq, p);
return p;
+
+idle:
+#ifdef CONFIG_SMP
+ idle_enter_fair(rq);
+ /*
+ * We must set idle_stamp _before_ calling idle_balance(), such that we
+ * measure the duration of idle_balance() as idle time.
+ */
+ rq->idle_stamp = rq_clock(rq);
+ if (idle_balance(rq)) { /* drops rq->lock */
+ rq->idle_stamp = 0;
+ goto again;
+ }
+#endif
+
+ return NULL;
}
/*
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index 721371b..f7d03af 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -13,13 +13,8 @@ select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
{
return task_cpu(p); /* IDLE tasks as never migrated */
}
-
-static void pre_schedule_idle(struct rq *rq, struct task_struct *prev)
-{
- idle_exit_fair(rq);
- rq_last_tick_reset(rq);
-}
#endif /* CONFIG_SMP */
+
/*
* Idle tasks are unconditionally rescheduled:
*/
@@ -56,6 +51,10 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
{
+#ifdef CONFIG_SMP
+ idle_exit_fair(rq);
+ rq_last_tick_reset(rq);
+#endif
}
static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
@@ -99,7 +98,6 @@ const struct sched_class idle_sched_class = {
#ifdef CONFIG_SMP
.select_task_rq = select_task_rq_idle,
- .pre_schedule = pre_schedule_idle,
#endif
.set_curr_task = set_curr_task_idle,
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index a15ca1c..72f9ec7 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -229,6 +229,8 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
#ifdef CONFIG_SMP
+static int pull_rt_task(struct rq *this_rq);
+
static inline int rt_overloaded(struct rq *rq)
{
return atomic_read(&rq->rd->rto_count);
@@ -1330,6 +1332,12 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
struct task_struct *p;
struct rt_rq *rt_rq = &rq->rt;
+#ifdef CONFIG_SMP
+ /* Try to pull RT tasks here if we lower this rq's prio */
+ if (rq->rt.highest_prio.curr > prev->prio)
+ pull_rt_task(rq);
+#endif
+
if (!rt_rq->rt_nr_running)
return NULL;
@@ -1721,13 +1729,6 @@ skip:
return ret;
}
-static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
-{
- /* Try to pull RT tasks here if we lower this rq's prio */
- if (rq->rt.highest_prio.curr > prev->prio)
- pull_rt_task(rq);
-}
-
static void post_schedule_rt(struct rq *rq)
{
push_rt_tasks(rq);
@@ -2004,7 +2005,6 @@ const struct sched_class rt_sched_class = {
.set_cpus_allowed = set_cpus_allowed_rt,
.rq_online = rq_online_rt,
.rq_offline = rq_offline_rt,
- .pre_schedule = pre_schedule_rt,
.post_schedule = post_schedule_rt,
.task_woken = task_woken_rt,
.switched_from = switched_from_rt,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c534cf4..1bf34c2 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1118,7 +1118,6 @@ struct sched_class {
int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
- void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq);
void (*task_waking) (struct task_struct *task);
void (*task_woken) (struct rq *this_rq, struct task_struct *task);