summaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c60
1 files changed, 59 insertions, 1 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c8c8ac1..5ef3a68 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -540,6 +540,37 @@ void resched_task(struct task_struct *p)
smp_send_reschedule(cpu);
}
+#ifdef CONFIG_PREEMPT_LAZY
+void resched_task_lazy(struct task_struct *p)
+{
+ int cpu;
+
+ if (!sched_feat(PREEMPT_LAZY)) {
+ resched_task(p);
+ return;
+ }
+
+ assert_raw_spin_locked(&task_rq(p)->lock);
+
+ if (test_tsk_need_resched(p))
+ return;
+
+ if (test_tsk_need_resched_lazy(p))
+ return;
+
+ set_tsk_need_resched_lazy(p);
+
+ cpu = task_cpu(p);
+ if (cpu == smp_processor_id())
+ return;
+
+ /* NEED_RESCHED_LAZY must be visible before we test polling */
+ smp_mb();
+ if (!tsk_is_polling(p))
+ smp_send_reschedule(cpu);
+}
+#endif
+
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@ -704,6 +735,17 @@ void resched_task(struct task_struct *p)
assert_raw_spin_locked(&task_rq(p)->lock);
set_tsk_need_resched(p);
}
+#ifdef CONFIG_PREEMPT_LAZY
+void resched_task_lazy(struct task_struct *p)
+{
+ if (!sched_feat(PREEMPT_LAZY)) {
+ resched_task(p);
+ return;
+ }
+ assert_raw_spin_locked(&task_rq(p)->lock);
+ set_tsk_need_resched_lazy(p);
+}
+#endif
#endif /* CONFIG_SMP */
#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
@@ -1736,6 +1778,9 @@ void sched_fork(struct task_struct *p)
/* Want to start with kernel preemption disabled. */
task_thread_info(p)->preempt_count = 1;
#endif
+#ifdef CONFIG_HAVE_PREEMPT_LAZY
+ task_thread_info(p)->preempt_lazy_count = 0;
+#endif
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
#endif
@@ -2388,6 +2433,7 @@ void migrate_disable(void)
}
preempt_disable();
+ preempt_lazy_disable();
pin_current_cpu();
p->migrate_disable = 1;
preempt_enable();
@@ -2442,6 +2488,7 @@ void migrate_enable(void)
unpin_current_cpu();
preempt_enable();
+ preempt_lazy_enable();
}
EXPORT_SYMBOL(migrate_enable);
#else
@@ -2569,6 +2616,7 @@ need_resched:
put_prev_task(rq, prev);
next = pick_next_task(rq);
clear_tsk_need_resched(prev);
+ clear_tsk_need_resched_lazy(prev);
rq->skip_clock_update = 0;
if (likely(prev != next)) {
@@ -2673,6 +2721,14 @@ asmlinkage void __sched notrace preempt_schedule(void)
if (likely(!preemptible()))
return;
+#ifdef CONFIG_PREEMPT_LAZY
+ /*
+ * Check for lazy preemption
+ */
+ if (current_thread_info()->preempt_lazy_count &&
+ !test_thread_flag(TIF_NEED_RESCHED))
+ return;
+#endif
do {
add_preempt_count_notrace(PREEMPT_ACTIVE);
/*
@@ -4417,7 +4473,9 @@ void init_idle(struct task_struct *idle, int cpu)
/* Set the preempt count _outside_ the spinlocks! */
task_thread_info(idle)->preempt_count = 0;
-
+#ifdef CONFIG_HAVE_PREEMPT_LAZY
+ task_thread_info(idle)->preempt_lazy_count = 0;
+#endif
/*
* The idle tasks have their own, simple scheduling class:
*/