summaryrefslogtreecommitdiff
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-06-22 17:47:03 (GMT)
committerScott Wood <scottwood@freescale.com>2014-05-14 18:38:39 (GMT)
commit16f5ff622ad84d4fe2ff2a0b272775fc6d8018b6 (patch)
tree0ce1a9d9b70eb22d58e99a789f510f7f4e67ce75 /kernel/workqueue.c
parent9eee7e4d4299bfe58a70e640e4ae96a1f4c1a131 (diff)
downloadlinux-fsl-qoriq-16f5ff622ad84d4fe2ff2a0b272775fc6d8018b6.tar.xz
sched: Distangle worker accounting from rqlock
The worker accounting for cpu bound workers is plugged into the core scheduler code and the wakeup code. This is not a hard requirement and can be avoided by keeping track of the state in the workqueue code itself. Keep track of the sleeping state in the worker itself and call the notifier before entering the core scheduler. There might be false positives when the task is woken between that call and actually scheduling, but that's not really different from scheduling and being woken immediately after switching away. There is also no harm from updating nr_running when the task returns from scheduling instead of accounting it in the wakeup code. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20110622174919.135236139@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c55
1 files changed, 23 insertions, 32 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index b35f90e..9efb7ce 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -808,44 +808,31 @@ static void wake_up_worker(struct worker_pool *pool)
}
/**
- * wq_worker_waking_up - a worker is waking up
- * @task: task waking up
- * @cpu: CPU @task is waking up to
+ * wq_worker_running - a worker is running again
+ * @task: task returning from sleep
*
- * This function is called during try_to_wake_up() when a worker is
- * being awoken.
- *
- * CONTEXT:
- * spin_lock_irq(rq->lock)
+ * This function is called when a worker returns from schedule()
*/
-void wq_worker_waking_up(struct task_struct *task, int cpu)
+void wq_worker_running(struct task_struct *task)
{
struct worker *worker = kthread_data(task);
- if (!(worker->flags & WORKER_NOT_RUNNING)) {
- WARN_ON_ONCE(worker->pool->cpu != cpu);
+ if (!worker->sleeping)
+ return;
+ if (!(worker->flags & WORKER_NOT_RUNNING))
atomic_inc(&worker->pool->nr_running);
- }
+ worker->sleeping = 0;
}
/**
* wq_worker_sleeping - a worker is going to sleep
* @task: task going to sleep
- * @cpu: CPU in question, must be the current CPU number
- *
- * This function is called during schedule() when a busy worker is
- * going to sleep. Worker on the same cpu can be woken up by
- * returning pointer to its task.
- *
- * CONTEXT:
- * spin_lock_irq(rq->lock)
- *
- * Return:
- * Worker task on @cpu to wake up, %NULL if none.
+ * This function is called from schedule() when a busy worker is
+ * going to sleep.
*/
-struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
+void wq_worker_sleeping(struct task_struct *task)
{
- struct worker *worker = kthread_data(task), *to_wakeup = NULL;
+ struct worker *next, *worker = kthread_data(task);
struct worker_pool *pool;
/*
@@ -854,14 +841,15 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
* checking NOT_RUNNING.
*/
if (worker->flags & WORKER_NOT_RUNNING)
- return NULL;
+ return;
pool = worker->pool;
- /* this can only happen on the local cpu */
- if (WARN_ON_ONCE(cpu != raw_smp_processor_id()))
- return NULL;
+ if (WARN_ON_ONCE(worker->sleeping))
+ return;
+ worker->sleeping = 1;
+ spin_lock_irq(&pool->lock);
/*
* The counterpart of the following dec_and_test, implied mb,
* worklist not empty test sequence is in insert_work().
@@ -874,9 +862,12 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
* lock is safe.
*/
if (atomic_dec_and_test(&pool->nr_running) &&
- !list_empty(&pool->worklist))
- to_wakeup = first_worker(pool);
- return to_wakeup ? to_wakeup->task : NULL;
+ !list_empty(&pool->worklist)) {
+ next = first_worker(pool);
+ if (next)
+ wake_up_process(next->task);
+ }
+ spin_unlock_irq(&pool->lock);
}
/**