summaryrefslogtreecommitdiff
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-04-01 18:23:35 (GMT)
committerTejun Heo <tj@kernel.org>2013-04-01 18:23:35 (GMT)
commitdce90d47c4288c7d3c1988bebb059ea7451d5fd5 (patch)
tree636f36bd20d7aba953b28346f927be5cf33ea055 /kernel/workqueue.c
parent1befcf3073fa083e7dc48c384ce06f3bd900f514 (diff)
downloadlinux-fsl-qoriq-dce90d47c4288c7d3c1988bebb059ea7451d5fd5.tar.xz
workqueue: introduce put_pwq_unlocked()
Factor out lock pool, put_pwq(), unlock sequence into put_pwq_unlocked(). The two existing places are converted and there will be more with NUMA affinity support. This is to prepare for NUMA affinity support for unbound workqueues and doesn't introduce any functional difference. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c36
1 files changed, 23 insertions, 13 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 3825c14..d9a4aeb 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1057,6 +1057,25 @@ static void put_pwq(struct pool_workqueue *pwq)
schedule_work(&pwq->unbound_release_work);
}
+/**
+ * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
+ * @pwq: pool_workqueue to put (can be %NULL)
+ *
+ * put_pwq() with locking. This function also allows %NULL @pwq.
+ */
+static void put_pwq_unlocked(struct pool_workqueue *pwq)
+{
+ if (pwq) {
+ /*
+ * As both pwqs and pools are sched-RCU protected, the
+ * following lock operations are safe.
+ */
+ spin_lock_irq(&pwq->pool->lock);
+ put_pwq(pwq);
+ spin_unlock_irq(&pwq->pool->lock);
+ }
+}
+
static void pwq_activate_delayed_work(struct work_struct *work)
{
struct pool_workqueue *pwq = get_work_pwq(work);
@@ -3759,12 +3778,7 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
mutex_unlock(&wq->mutex);
- if (last_pwq) {
- spin_lock_irq(&last_pwq->pool->lock);
- put_pwq(last_pwq);
- spin_unlock_irq(&last_pwq->pool->lock);
- }
-
+ put_pwq_unlocked(last_pwq);
ret = 0;
/* fall through */
out_free:
@@ -3979,16 +3993,12 @@ void destroy_workqueue(struct workqueue_struct *wq)
} else {
/*
* We're the sole accessor of @wq at this point. Directly
- * access the first pwq and put the base ref. As both pwqs
- * and pools are sched-RCU protected, the lock operations
- * are safe. @wq will be freed when the last pwq is
- * released.
+ * access the first pwq and put the base ref. @wq will be
+ * freed when the last pwq is released.
*/
pwq = list_first_entry(&wq->pwqs, struct pool_workqueue,
pwqs_node);
- spin_lock_irq(&pwq->pool->lock);
- put_pwq(pwq);
- spin_unlock_irq(&pwq->pool->lock);
+ put_pwq_unlocked(pwq);
}
}
EXPORT_SYMBOL_GPL(destroy_workqueue);