From 3472eaa1f12e217e2b8b0ef658ff861b2308cbbd Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 21 Sep 2014 21:33:38 +0200 Subject: sched: normalize_rt_tasks(): Don't use _irqsave for tasklist_lock, use task_rq_lock() 1. read_lock(tasklist_lock) does not need to disable irqs. 2. ->mm != NULL is a common mistake, use PF_KTHREAD. 3. The second ->mm check can be simply removed. 4. task_rq_lock() looks better than raw_spin_lock(&p->pi_lock) + __task_rq_lock(). Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Cc: Kirill Tkhai Cc: Mike Galbraith Cc: Linus Torvalds Link: http://lkml.kernel.org/r/20140921193338.GA28621@redhat.com Signed-off-by: Ingo Molnar diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0abfb7e..d65566d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7220,12 +7220,12 @@ void normalize_rt_tasks(void) unsigned long flags; struct rq *rq; - read_lock_irqsave(&tasklist_lock, flags); + read_lock(&tasklist_lock); for_each_process_thread(g, p) { /* * Only normalize user tasks: */ - if (!p->mm) + if (p->flags & PF_KTHREAD) continue; p->se.exec_start = 0; @@ -7240,20 +7240,16 @@ void normalize_rt_tasks(void) * Renice negative nice level userspace * tasks back to 0: */ - if (task_nice(p) < 0 && p->mm) + if (task_nice(p) < 0) set_user_nice(p, 0); continue; } - raw_spin_lock(&p->pi_lock); - rq = __task_rq_lock(p); - + rq = task_rq_lock(p, &flags); normalize_task(rq, p); - - __task_rq_unlock(rq); - raw_spin_unlock(&p->pi_lock); + task_rq_unlock(rq, p, &flags); } - read_unlock_irqrestore(&tasklist_lock, flags); + read_unlock(&tasklist_lock); } #endif /* CONFIG_MAGIC_SYSRQ */ -- cgit v0.10.2