diff options
author | Scott Wood <scottwood@freescale.com> | 2014-05-14 18:19:12 (GMT) |
---|---|---|
committer | Scott Wood <scottwood@freescale.com> | 2014-05-14 18:37:18 (GMT) |
commit | 86ba38e6f5f2fbfe9b49e153ea89593b26482019 (patch) | |
tree | f99d2906b0eafca507f37289e68052fc105cc2dc /kernel/rtmutex.c | |
parent | 07c8b57b111585a617b2b456497fc9b33c00743c (diff) | |
download | linux-fsl-qoriq-86ba38e6f5f2fbfe9b49e153ea89593b26482019.tar.xz |
Reset to 3.12.19
Diffstat (limited to 'kernel/rtmutex.c')
-rw-r--r-- | kernel/rtmutex.c | 743 |
1 files changed, 41 insertions, 702 deletions
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 4057bc6..0dd6aec 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -8,12 +8,6 @@ * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt * Copyright (C) 2006 Esben Nielsen * - * Adaptive Spinlocks: - * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich, - * and Peter Morreale, - * Adaptive Spinlocks simplification: - * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com> - * * See Documentation/rt-mutex-design.txt for details. */ #include <linux/spinlock.h> @@ -21,7 +15,6 @@ #include <linux/sched.h> #include <linux/sched/rt.h> #include <linux/timer.h> -#include <linux/ww_mutex.h> #include "rtmutex_common.h" @@ -75,12 +68,6 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) clear_rt_mutex_waiters(lock); } -static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) -{ - return waiter && waiter != PI_WAKEUP_INPROGRESS && - waiter != PI_REQUEUE_INPROGRESS; -} - /* * We can speed up the acquire/release, if the architecture * supports cmpxchg and if there's no debugging state to be set up @@ -104,12 +91,6 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) } #endif -static inline void init_lists(struct rt_mutex *lock) -{ - if (unlikely(!lock->wait_list.node_list.prev)) - plist_head_init(&lock->wait_list); -} - /* * Calculate task priority from the waiter list priority * @@ -126,18 +107,6 @@ int rt_mutex_getprio(struct task_struct *task) } /* - * Called by sched_setscheduler() to check whether the priority change - * is overruled by a possible priority boosting. - */ -int rt_mutex_check_prio(struct task_struct *task, int newprio) -{ - if (!task_has_pi_waiters(task)) - return 0; - - return task_top_pi_waiter(task)->pi_list_entry.prio <= newprio; -} - -/* * Adjust the priority of a task, after its pi_waiters got modified. * * This can be both boosting and unboosting. task->pi_lock must be held. @@ -168,14 +137,6 @@ static void rt_mutex_adjust_prio(struct task_struct *task) raw_spin_unlock_irqrestore(&task->pi_lock, flags); } -static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter) -{ - if (waiter->savestate) - wake_up_lock_sleeper(waiter->task); - else - wake_up_process(waiter->task); -} - /* * Max number of times we'll walk the boosting chain: */ @@ -249,7 +210,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * reached or the state of the chain has changed while we * dropped the locks. */ - if (!rt_mutex_real_waiter(waiter)) + if (!waiter) goto out_unlock_pi; /* @@ -300,15 +261,13 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, /* Release the task */ raw_spin_unlock_irqrestore(&task->pi_lock, flags); if (!rt_mutex_owner(lock)) { - struct rt_mutex_waiter *lock_top_waiter; - /* * If the requeue above changed the top waiter, then we need * to wake the new top waiter up to try to get the lock. */ - lock_top_waiter = rt_mutex_top_waiter(lock); - if (top_waiter != lock_top_waiter) - rt_mutex_wake_waiter(lock_top_waiter); + + if (top_waiter != rt_mutex_top_waiter(lock)) + wake_up_process(rt_mutex_top_waiter(lock)->task); raw_spin_unlock(&lock->wait_lock); goto out_put_task; } @@ -353,25 +312,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, return ret; } - -#define STEAL_NORMAL 0 -#define STEAL_LATERAL 1 - -/* - * Note that RT tasks are excluded from lateral-steals to prevent the - * introduction of an unbounded latency - */ -static inline int lock_is_stealable(struct task_struct *task, - struct task_struct *pendowner, int mode) -{ - if (mode == STEAL_NORMAL || rt_task(task)) { - if (task->prio >= pendowner->prio) - return 0; - } else if (task->prio > pendowner->prio) - return 0; - return 1; -} - /* * Try to take an rt-mutex * @@ -381,9 +321,8 @@ static inline int lock_is_stealable(struct task_struct *task, * @task: the task which wants to acquire the lock * @waiter: the waiter that is queued to the lock's wait list. (could be NULL) */ -static int -__try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, - struct rt_mutex_waiter *waiter, int mode) +static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, + struct rt_mutex_waiter *waiter) { /* * We have to be careful here if the atomic speedups are @@ -416,14 +355,12 @@ __try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, * 3) it is top waiter */ if (rt_mutex_has_waiters(lock)) { - struct task_struct *pown = rt_mutex_top_waiter(lock)->task; - - if (task != pown && !lock_is_stealable(task, pown, mode)) - return 0; + if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) { + if (!waiter || waiter != rt_mutex_top_waiter(lock)) + return 0; + } } - /* We got the lock. */ - if (waiter || rt_mutex_has_waiters(lock)) { unsigned long flags; struct rt_mutex_waiter *top; @@ -448,6 +385,7 @@ __try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, raw_spin_unlock_irqrestore(&task->pi_lock, flags); } + /* We got the lock. */ debug_rt_mutex_lock(lock); rt_mutex_set_owner(lock, task); @@ -457,13 +395,6 @@ __try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, return 1; } -static inline int -try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, - struct rt_mutex_waiter *waiter) -{ - return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL); -} - /* * Task blocks on lock. * @@ -482,23 +413,6 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, int chain_walk = 0, res; raw_spin_lock_irqsave(&task->pi_lock, flags); - - /* - * In the case of futex requeue PI, this will be a proxy - * lock. The task will wake unaware that it is enqueueed on - * this lock. Avoid blocking on two locks and corrupting - * pi_blocked_on via the PI_WAKEUP_INPROGRESS - * flag. futex_wait_requeue_pi() sets this when it wakes up - * before requeue (due to a signal or timeout). Do not enqueue - * the task if PI_WAKEUP_INPROGRESS is set. - */ - if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) { - raw_spin_unlock_irqrestore(&task->pi_lock, flags); - return -EAGAIN; - } - - BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)); - __rt_mutex_adjust_prio(task); waiter->task = task; waiter->lock = lock; @@ -523,7 +437,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, plist_add(&waiter->pi_list_entry, &owner->pi_waiters); __rt_mutex_adjust_prio(owner); - if (rt_mutex_real_waiter(owner->pi_blocked_on)) + if (owner->pi_blocked_on) chain_walk = 1; raw_spin_unlock_irqrestore(&owner->pi_lock, flags); } @@ -578,7 +492,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock) raw_spin_unlock_irqrestore(¤t->pi_lock, flags); - rt_mutex_wake_waiter(waiter); + wake_up_process(waiter->task); } /* @@ -617,7 +531,7 @@ static void remove_waiter(struct rt_mutex *lock, } __rt_mutex_adjust_prio(owner); - if (rt_mutex_real_waiter(owner->pi_blocked_on)) + if (owner->pi_blocked_on) chain_walk = 1; raw_spin_unlock_irqrestore(&owner->pi_lock, flags); @@ -651,371 +565,23 @@ void rt_mutex_adjust_pi(struct task_struct *task) raw_spin_lock_irqsave(&task->pi_lock, flags); waiter = task->pi_blocked_on; - if (!rt_mutex_real_waiter(waiter) || - waiter->list_entry.prio == task->prio) { + if (!waiter || waiter->list_entry.prio == task->prio) { raw_spin_unlock_irqrestore(&task->pi_lock, flags); return; } + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + /* gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(task); - raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); } -#ifdef CONFIG_PREEMPT_RT_FULL -/* - * preemptible spin_lock functions: - */ -static inline void rt_spin_lock_fastlock(struct rt_mutex *lock, - void (*slowfn)(struct rt_mutex *lock)) -{ - might_sleep(); - - if (likely(rt_mutex_cmpxchg(lock, NULL, current))) - rt_mutex_deadlock_account_lock(lock, current); - else - slowfn(lock); -} - -static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock, - void (*slowfn)(struct rt_mutex *lock)) -{ - if (likely(rt_mutex_cmpxchg(lock, current, NULL))) - rt_mutex_deadlock_account_unlock(current); - else - slowfn(lock); -} - -#ifdef CONFIG_SMP -/* - * Note that owner is a speculative pointer and dereferencing relies - * on rcu_read_lock() and the check against the lock owner. - */ -static int adaptive_wait(struct rt_mutex *lock, - struct task_struct *owner) -{ - int res = 0; - - rcu_read_lock(); - for (;;) { - if (owner != rt_mutex_owner(lock)) - break; - /* - * Ensure that owner->on_cpu is dereferenced _after_ - * checking the above to be valid. - */ - barrier(); - if (!owner->on_cpu) { - res = 1; - break; - } - cpu_relax(); - } - rcu_read_unlock(); - return res; -} -#else -static int adaptive_wait(struct rt_mutex *lock, - struct task_struct *orig_owner) -{ - return 1; -} -#endif - -# define pi_lock(lock) raw_spin_lock_irq(lock) -# define pi_unlock(lock) raw_spin_unlock_irq(lock) - -/* - * Slow path lock function spin_lock style: this variant is very - * careful not to miss any non-lock wakeups. - * - * We store the current state under p->pi_lock in p->saved_state and - * the try_to_wake_up() code handles this accordingly. - */ -static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock) -{ - struct task_struct *lock_owner, *self = current; - struct rt_mutex_waiter waiter, *top_waiter; - int ret; - - rt_mutex_init_waiter(&waiter, true); - - raw_spin_lock(&lock->wait_lock); - init_lists(lock); - - if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) { - raw_spin_unlock(&lock->wait_lock); - return; - } - - BUG_ON(rt_mutex_owner(lock) == self); - - /* - * We save whatever state the task is in and we'll restore it - * after acquiring the lock taking real wakeups into account - * as well. We are serialized via pi_lock against wakeups. See - * try_to_wake_up(). - */ - pi_lock(&self->pi_lock); - self->saved_state = self->state; - __set_current_state(TASK_UNINTERRUPTIBLE); - pi_unlock(&self->pi_lock); - - ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0); - BUG_ON(ret); - - for (;;) { - /* Try to acquire the lock again. */ - if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL)) - break; - - top_waiter = rt_mutex_top_waiter(lock); - lock_owner = rt_mutex_owner(lock); - - raw_spin_unlock(&lock->wait_lock); - - debug_rt_mutex_print_deadlock(&waiter); - - if (top_waiter != &waiter || adaptive_wait(lock, lock_owner)) - schedule_rt_mutex(lock); - - raw_spin_lock(&lock->wait_lock); - - pi_lock(&self->pi_lock); - __set_current_state(TASK_UNINTERRUPTIBLE); - pi_unlock(&self->pi_lock); - } - - /* - * Restore the task state to current->saved_state. We set it - * to the original state above and the try_to_wake_up() code - * has possibly updated it when a real (non-rtmutex) wakeup - * happened while we were blocked. Clear saved_state so - * try_to_wakeup() does not get confused. - */ - pi_lock(&self->pi_lock); - __set_current_state(self->saved_state); - self->saved_state = TASK_RUNNING; - pi_unlock(&self->pi_lock); - - /* - * try_to_take_rt_mutex() sets the waiter bit - * unconditionally. We might have to fix that up: - */ - fixup_rt_mutex_waiters(lock); - - BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock)); - BUG_ON(!plist_node_empty(&waiter.list_entry)); - - raw_spin_unlock(&lock->wait_lock); - - debug_rt_mutex_free_waiter(&waiter); -} - -/* - * Slow path to release a rt_mutex spin_lock style - */ -static void __sched __rt_spin_lock_slowunlock(struct rt_mutex *lock) -{ - debug_rt_mutex_unlock(lock); - - rt_mutex_deadlock_account_unlock(current); - - if (!rt_mutex_has_waiters(lock)) { - lock->owner = NULL; - raw_spin_unlock(&lock->wait_lock); - return; - } - - wakeup_next_waiter(lock); - - raw_spin_unlock(&lock->wait_lock); - - /* Undo pi boosting.when necessary */ - rt_mutex_adjust_prio(current); -} - -static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) -{ - raw_spin_lock(&lock->wait_lock); - __rt_spin_lock_slowunlock(lock); -} - -static void noinline __sched rt_spin_lock_slowunlock_hirq(struct rt_mutex *lock) -{ - int ret; - - do { - ret = raw_spin_trylock(&lock->wait_lock); - } while (!ret); - - __rt_spin_lock_slowunlock(lock); -} - -void __lockfunc rt_spin_lock(spinlock_t *lock) -{ - rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); - spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); -} -EXPORT_SYMBOL(rt_spin_lock); - -void __lockfunc __rt_spin_lock(struct rt_mutex *lock) -{ - rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock); -} -EXPORT_SYMBOL(__rt_spin_lock); - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) -{ - rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); - spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); -} -EXPORT_SYMBOL(rt_spin_lock_nested); -#endif - -void __lockfunc rt_spin_unlock(spinlock_t *lock) -{ - /* NOTE: we always pass in '1' for nested, for simplicity */ - spin_release(&lock->dep_map, 1, _RET_IP_); - rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); -} -EXPORT_SYMBOL(rt_spin_unlock); - -void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock) -{ - /* NOTE: we always pass in '1' for nested, for simplicity */ - spin_release(&lock->dep_map, 1, _RET_IP_); - rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock_hirq); -} - -void __lockfunc __rt_spin_unlock(struct rt_mutex *lock) -{ - rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock); -} -EXPORT_SYMBOL(__rt_spin_unlock); - -/* - * Wait for the lock to get unlocked: instead of polling for an unlock - * (like raw spinlocks do), we lock and unlock, to force the kernel to - * schedule if there's contention: - */ -void __lockfunc rt_spin_unlock_wait(spinlock_t *lock) -{ - spin_lock(lock); - spin_unlock(lock); -} -EXPORT_SYMBOL(rt_spin_unlock_wait); - -int __lockfunc rt_spin_trylock(spinlock_t *lock) -{ - int ret = rt_mutex_trylock(&lock->lock); - - if (ret) - spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); - return ret; -} -EXPORT_SYMBOL(rt_spin_trylock); - -int __lockfunc rt_spin_trylock_bh(spinlock_t *lock) -{ - int ret; - - local_bh_disable(); - ret = rt_mutex_trylock(&lock->lock); - if (ret) { - migrate_disable(); - spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); - } else - local_bh_enable(); - return ret; -} -EXPORT_SYMBOL(rt_spin_trylock_bh); - -int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) -{ - int ret; - - *flags = 0; - ret = rt_mutex_trylock(&lock->lock); - if (ret) { - migrate_disable(); - spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); - } - return ret; -} -EXPORT_SYMBOL(rt_spin_trylock_irqsave); - -int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock) -{ - /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ - if (atomic_add_unless(atomic, -1, 1)) - return 0; - rt_spin_lock(lock); - if (atomic_dec_and_test(atomic)){ - migrate_disable(); - return 1; - } - rt_spin_unlock(lock); - return 0; -} -EXPORT_SYMBOL(atomic_dec_and_spin_lock); - -void -__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key) -{ -#ifdef CONFIG_DEBUG_LOCK_ALLOC - /* - * Make sure we are not reinitializing a held lock: - */ - debug_check_no_locks_freed((void *)lock, sizeof(*lock)); - lockdep_init_map(&lock->dep_map, name, key, 0); -#endif -} -EXPORT_SYMBOL(__rt_spin_lock_init); - -#endif /* PREEMPT_RT_FULL */ - -#ifdef CONFIG_PREEMPT_RT_FULL -static inline int __sched -__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) -{ - struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); - struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); - - if (!hold_ctx) - return 0; - - if (unlikely(ctx == hold_ctx)) - return -EALREADY; - - if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && - (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { -#ifdef CONFIG_DEBUG_MUTEXES - DEBUG_LOCKS_WARN_ON(ctx->contending_lock); - ctx->contending_lock = ww; -#endif - return -EDEADLK; - } - - return 0; -} -#else -static inline int __sched -__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) -{ - BUG(); - return 0; -} - -#endif - /** * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop * @lock: the rt_mutex to take * @state: the state the task should block in (TASK_INTERRUPTIBLE - * or TASK_UNINTERRUPTIBLE) + * or TASK_UNINTERRUPTIBLE) * @timeout: the pre-initialized and started timer, or NULL for none * @waiter: the pre-initialized rt_mutex_waiter * @@ -1024,8 +590,7 @@ __mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - struct rt_mutex_waiter *waiter, - struct ww_acquire_ctx *ww_ctx) + struct rt_mutex_waiter *waiter) { int ret = 0; @@ -1048,12 +613,6 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, break; } - if (ww_ctx && ww_ctx->acquired > 0) { - ret = __mutex_lock_check_stamp(lock, ww_ctx); - if (ret) - break; - } - raw_spin_unlock(&lock->wait_lock); debug_rt_mutex_print_deadlock(waiter); @@ -1067,102 +626,23 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, return ret; } -static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, - struct ww_acquire_ctx *ww_ctx) -{ -#ifdef CONFIG_DEBUG_MUTEXES - /* - * If this WARN_ON triggers, you used ww_mutex_lock to acquire, - * but released with a normal mutex_unlock in this call. - * - * This should never happen, always use ww_mutex_unlock. - */ - DEBUG_LOCKS_WARN_ON(ww->ctx); - - /* - * Not quite done after calling ww_acquire_done() ? - */ - DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); - - if (ww_ctx->contending_lock) { - /* - * After -EDEADLK you tried to - * acquire a different ww_mutex? Bad! - */ - DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); - - /* - * You called ww_mutex_lock after receiving -EDEADLK, - * but 'forgot' to unlock everything else first? - */ - DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); - ww_ctx->contending_lock = NULL; - } - - /* - * Naughty, using a different class will lead to undefined behavior! - */ - DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); -#endif - ww_ctx->acquired++; -} - -#ifdef CONFIG_PREEMPT_RT_FULL -static void ww_mutex_account_lock(struct rt_mutex *lock, - struct ww_acquire_ctx *ww_ctx) -{ - struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); - struct rt_mutex_waiter *waiter; - - /* - * This branch gets optimized out for the common case, - * and is only important for ww_mutex_lock. - */ - ww_mutex_lock_acquired(ww, ww_ctx); - ww->ctx = ww_ctx; - - /* - * Give any possible sleeping processes the chance to wake up, - * so they can recheck if they have to back off. - */ - plist_for_each_entry(waiter, &lock->wait_list, list_entry) { - - /* XXX debug rt mutex waiter wakeup */ - - BUG_ON(waiter->lock != lock); - rt_mutex_wake_waiter(waiter); - } -} - -#else - -static void ww_mutex_account_lock(struct rt_mutex *lock, - struct ww_acquire_ctx *ww_ctx) -{ - BUG(); -} -#endif - /* * Slow path lock function: */ static int __sched rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - int detect_deadlock, struct ww_acquire_ctx *ww_ctx) + int detect_deadlock) { struct rt_mutex_waiter waiter; int ret = 0; - rt_mutex_init_waiter(&waiter, false); + debug_rt_mutex_init_waiter(&waiter); raw_spin_lock(&lock->wait_lock); - init_lists(lock); /* Try to acquire the lock again: */ if (try_to_take_rt_mutex(lock, current, NULL)) { - if (ww_ctx) - ww_mutex_account_lock(lock, ww_ctx); raw_spin_unlock(&lock->wait_lock); return 0; } @@ -1179,14 +659,12 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock); if (likely(!ret)) - ret = __rt_mutex_slowlock(lock, state, timeout, &waiter, ww_ctx); + ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); set_current_state(TASK_RUNNING); if (unlikely(ret)) remove_waiter(lock, &waiter); - else if (ww_ctx) - ww_mutex_account_lock(lock, ww_ctx); /* * try_to_take_rt_mutex() sets the waiter bit @@ -1213,9 +691,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock) { int ret = 0; - if (!raw_spin_trylock(&lock->wait_lock)) - return ret; - init_lists(lock); + raw_spin_lock(&lock->wait_lock); if (likely(rt_mutex_owner(lock) != current)) { @@ -1266,33 +742,30 @@ rt_mutex_slowunlock(struct rt_mutex *lock) */ static inline int rt_mutex_fastlock(struct rt_mutex *lock, int state, - int detect_deadlock, struct ww_acquire_ctx *ww_ctx, + int detect_deadlock, int (*slowfn)(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - int detect_deadlock, - struct ww_acquire_ctx *ww_ctx)) + int detect_deadlock)) { if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_deadlock_account_lock(lock, current); return 0; } else - return slowfn(lock, state, NULL, detect_deadlock, ww_ctx); + return slowfn(lock, state, NULL, detect_deadlock); } static inline int rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, int detect_deadlock, - struct ww_acquire_ctx *ww_ctx, int (*slowfn)(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - int detect_deadlock, - struct ww_acquire_ctx *ww_ctx)) + int detect_deadlock)) { if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_deadlock_account_lock(lock, current); return 0; } else - return slowfn(lock, state, timeout, detect_deadlock, ww_ctx); + return slowfn(lock, state, timeout, detect_deadlock); } static inline int @@ -1325,19 +798,19 @@ void __sched rt_mutex_lock(struct rt_mutex *lock) { might_sleep(); - rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, rt_mutex_slowlock); + rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock); } EXPORT_SYMBOL_GPL(rt_mutex_lock); /** * rt_mutex_lock_interruptible - lock a rt_mutex interruptible * - * @lock: the rt_mutex to be locked + * @lock: the rt_mutex to be locked * @detect_deadlock: deadlock detection on/off * * Returns: - * 0 on success - * -EINTR when interrupted by a signal + * 0 on success + * -EINTR when interrupted by a signal * -EDEADLK when the lock would deadlock (when deadlock detection is on) */ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, @@ -1346,43 +819,22 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, might_sleep(); return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, - detect_deadlock, NULL, rt_mutex_slowlock); + detect_deadlock, rt_mutex_slowlock); } EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); /** - * rt_mutex_lock_killable - lock a rt_mutex killable - * - * @lock: the rt_mutex to be locked - * @detect_deadlock: deadlock detection on/off - * - * Returns: - * 0 on success - * -EINTR when interrupted by a signal - * -EDEADLK when the lock would deadlock (when deadlock detection is on) - */ -int __sched rt_mutex_lock_killable(struct rt_mutex *lock, - int detect_deadlock) -{ - might_sleep(); - - return rt_mutex_fastlock(lock, TASK_KILLABLE, - detect_deadlock, NULL, rt_mutex_slowlock); -} -EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); - -/** * rt_mutex_timed_lock - lock a rt_mutex interruptible * the timeout structure is provided * by the caller * - * @lock: the rt_mutex to be locked + * @lock: the rt_mutex to be locked * @timeout: timeout structure or NULL (no timeout) * @detect_deadlock: deadlock detection on/off * * Returns: - * 0 on success - * -EINTR when interrupted by a signal + * 0 on success + * -EINTR when interrupted by a signal * -ETIMEDOUT when the timeout expired * -EDEADLK when the lock would deadlock (when deadlock detection is on) */ @@ -1393,7 +845,7 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, might_sleep(); return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, - detect_deadlock, NULL, rt_mutex_slowlock); + detect_deadlock, rt_mutex_slowlock); } EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); @@ -1451,11 +903,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy); void __rt_mutex_init(struct rt_mutex *lock, const char *name) { lock->owner = NULL; + raw_spin_lock_init(&lock->wait_lock); plist_head_init(&lock->wait_list); debug_rt_mutex_init(lock, name); } -EXPORT_SYMBOL(__rt_mutex_init); +EXPORT_SYMBOL_GPL(__rt_mutex_init); /** * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a @@ -1470,7 +923,7 @@ EXPORT_SYMBOL(__rt_mutex_init); void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner) { - rt_mutex_init(lock); + __rt_mutex_init(lock, NULL); debug_rt_mutex_proxy_lock(lock, proxy_owner); rt_mutex_set_owner(lock, proxy_owner); rt_mutex_deadlock_account_lock(lock, proxy_owner); @@ -1519,35 +972,6 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, return 1; } -#ifdef CONFIG_PREEMPT_RT_FULL - /* - * In PREEMPT_RT there's an added race. - * If the task, that we are about to requeue, times out, - * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue - * to skip this task. But right after the task sets - * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then - * block on the spin_lock(&hb->lock), which in RT is an rtmutex. - * This will replace the PI_WAKEUP_INPROGRESS with the actual - * lock that it blocks on. We *must not* place this task - * on this proxy lock in that case. - * - * To prevent this race, we first take the task's pi_lock - * and check if it has updated its pi_blocked_on. If it has, - * we assume that it woke up and we return -EAGAIN. - * Otherwise, we set the task's pi_blocked_on to - * PI_REQUEUE_INPROGRESS, so that if the task is waking up - * it will know that we are in the process of requeuing it. - */ - raw_spin_lock_irq(&task->pi_lock); - if (task->pi_blocked_on) { - raw_spin_unlock_irq(&task->pi_lock); - raw_spin_unlock(&lock->wait_lock); - return -EAGAIN; - } - task->pi_blocked_on = PI_REQUEUE_INPROGRESS; - raw_spin_unlock_irq(&task->pi_lock); -#endif - ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); if (ret && !rt_mutex_owner(lock)) { @@ -1617,7 +1041,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, set_current_state(TASK_INTERRUPTIBLE); - ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL); + ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); set_current_state(TASK_RUNNING); @@ -1634,88 +1058,3 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, return ret; } - -static inline int -ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) -{ -#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH - unsigned tmp; - - if (ctx->deadlock_inject_countdown-- == 0) { - tmp = ctx->deadlock_inject_interval; - if (tmp > UINT_MAX/4) - tmp = UINT_MAX; - else - tmp = tmp*2 + tmp + tmp/2; - - ctx->deadlock_inject_interval = tmp; - ctx->deadlock_inject_countdown = tmp; - ctx->contending_lock = lock; - - ww_mutex_unlock(lock); - - return -EDEADLK; - } -#endif - - return 0; -} - -#ifdef CONFIG_PREEMPT_RT_FULL -int __sched -__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) -{ - int ret; - - might_sleep(); - - mutex_acquire(&lock->base.dep_map, 0, 0, _RET_IP_); - ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx); - if (ret) - mutex_release(&lock->base.dep_map, 1, _RET_IP_); - else if (!ret && ww_ctx->acquired > 1) - return ww_mutex_deadlock_injection(lock, ww_ctx); - - return ret; -} -EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); - -int __sched -__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) -{ - int ret; - - might_sleep(); - - mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, - _RET_IP_); - ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx); - if (ret) - mutex_release(&lock->base.dep_map, 1, _RET_IP_); - else if (!ret && ww_ctx->acquired > 1) - return ww_mutex_deadlock_injection(lock, ww_ctx); - - return ret; -} -EXPORT_SYMBOL_GPL(__ww_mutex_lock); - -void __sched ww_mutex_unlock(struct ww_mutex *lock) -{ - /* - * The unlocking fastpath is the 0->1 transition from 'locked' - * into 'unlocked' state: - */ - if (lock->ctx) { -#ifdef CONFIG_DEBUG_MUTEXES - DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); -#endif - if (lock->ctx->acquired > 0) - lock->ctx->acquired--; - lock->ctx = NULL; - } - - mutex_release(&lock->base.dep_map, 1, _RET_IP_); - rt_mutex_unlock(&lock->base.lock); -} -EXPORT_SYMBOL(ww_mutex_unlock); -#endif |