summaryrefslogtreecommitdiff
path: root/kernel/rtmutex.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rtmutex.c')
-rw-r--r--kernel/rtmutex.c383
1 files changed, 366 insertions, 17 deletions
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 7256726..bd83ef0 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -8,6 +8,12 @@
* Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
* Copyright (C) 2006 Esben Nielsen
*
+ * Adaptive Spinlocks:
+ * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
+ * and Peter Morreale,
+ * Adaptive Spinlocks simplification:
+ * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
+ *
* See Documentation/rt-mutex-design.txt for details.
*/
#include <linux/spinlock.h>
@@ -149,6 +155,12 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
}
#endif
+static inline void init_lists(struct rt_mutex *lock)
+{
+ if (unlikely(!lock->wait_list.node_list.prev))
+ plist_head_init(&lock->wait_list);
+}
+
/*
* Calculate task priority from the waiter list priority
*
@@ -207,6 +219,14 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
+static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)
+{
+ if (waiter->savestate)
+ wake_up_lock_sleeper(waiter->task);
+ else
+ wake_up_process(waiter->task);
+}
+
/*
* Max number of times we'll walk the boosting chain:
*/
@@ -365,13 +385,15 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
/* Release the task */
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
if (!rt_mutex_owner(lock)) {
+ struct rt_mutex_waiter *lock_top_waiter;
+
/*
* If the requeue above changed the top waiter, then we need
* to wake the new top waiter up to try to get the lock.
*/
-
- if (top_waiter != rt_mutex_top_waiter(lock))
- wake_up_process(rt_mutex_top_waiter(lock)->task);
+ lock_top_waiter = rt_mutex_top_waiter(lock);
+ if (top_waiter != lock_top_waiter)
+ rt_mutex_wake_waiter(lock_top_waiter);
raw_spin_unlock(&lock->wait_lock);
goto out_put_task;
}
@@ -431,6 +453,25 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
return ret;
}
+
+#define STEAL_NORMAL 0
+#define STEAL_LATERAL 1
+
+/*
+ * Note that RT tasks are excluded from lateral-steals to prevent the
+ * introduction of an unbounded latency
+ */
+static inline int lock_is_stealable(struct task_struct *task,
+ struct task_struct *pendowner, int mode)
+{
+ if (mode == STEAL_NORMAL || rt_task(task)) {
+ if (task->prio >= pendowner->prio)
+ return 0;
+ } else if (task->prio > pendowner->prio)
+ return 0;
+ return 1;
+}
+
/*
* Try to take an rt-mutex
*
@@ -440,8 +481,9 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* @task: the task which wants to acquire the lock
* @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
*/
-static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
- struct rt_mutex_waiter *waiter)
+static int
+__try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+ struct rt_mutex_waiter *waiter, int mode)
{
/*
* We have to be careful here if the atomic speedups are
@@ -474,12 +516,14 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
* 3) it is top waiter
*/
if (rt_mutex_has_waiters(lock)) {
- if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) {
- if (!waiter || waiter != rt_mutex_top_waiter(lock))
- return 0;
- }
+ struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
+
+ if (task != pown && !lock_is_stealable(task, pown, mode))
+ return 0;
}
+ /* We got the lock. */
+
if (waiter || rt_mutex_has_waiters(lock)) {
unsigned long flags;
struct rt_mutex_waiter *top;
@@ -504,7 +548,6 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
- /* We got the lock. */
debug_rt_mutex_lock(lock);
rt_mutex_set_owner(lock, task);
@@ -514,6 +557,13 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
return 1;
}
+static inline int
+try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+ struct rt_mutex_waiter *waiter)
+{
+ return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
+}
+
/*
* Task blocks on lock.
*
@@ -663,7 +713,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
* long as we hold lock->wait_lock. The waiter task needs to
* acquire it in order to dequeue the waiter.
*/
- wake_up_process(waiter->task);
+ rt_mutex_wake_waiter(waiter);
}
/*
@@ -743,19 +793,317 @@ void rt_mutex_adjust_pi(struct task_struct *task)
return;
}
next_lock = waiter->lock;
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(task);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task);
}
+#ifdef CONFIG_PREEMPT_RT_FULL
+/*
+ * preemptible spin_lock functions:
+ */
+static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
+ void (*slowfn)(struct rt_mutex *lock))
+{
+ might_sleep();
+
+ if (likely(rt_mutex_cmpxchg(lock, NULL, current)))
+ rt_mutex_deadlock_account_lock(lock, current);
+ else
+ slowfn(lock);
+}
+
+static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
+ void (*slowfn)(struct rt_mutex *lock))
+{
+ if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
+ rt_mutex_deadlock_account_unlock(current);
+ else
+ slowfn(lock);
+}
+
+#ifdef CONFIG_SMP
+/*
+ * Note that owner is a speculative pointer and dereferencing relies
+ * on rcu_read_lock() and the check against the lock owner.
+ */
+static int adaptive_wait(struct rt_mutex *lock,
+ struct task_struct *owner)
+{
+ int res = 0;
+
+ rcu_read_lock();
+ for (;;) {
+ if (owner != rt_mutex_owner(lock))
+ break;
+ /*
+ * Ensure that owner->on_cpu is dereferenced _after_
+ * checking the above to be valid.
+ */
+ barrier();
+ if (!owner->on_cpu) {
+ res = 1;
+ break;
+ }
+ cpu_relax();
+ }
+ rcu_read_unlock();
+ return res;
+}
+#else
+static int adaptive_wait(struct rt_mutex *lock,
+ struct task_struct *orig_owner)
+{
+ return 1;
+}
+#endif
+
+# define pi_lock(lock) raw_spin_lock_irq(lock)
+# define pi_unlock(lock) raw_spin_unlock_irq(lock)
+
+/*
+ * Slow path lock function spin_lock style: this variant is very
+ * careful not to miss any non-lock wakeups.
+ *
+ * We store the current state under p->pi_lock in p->saved_state and
+ * the try_to_wake_up() code handles this accordingly.
+ */
+static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
+{
+ struct task_struct *lock_owner, *self = current;
+ struct rt_mutex_waiter waiter, *top_waiter;
+ int ret;
+
+ rt_mutex_init_waiter(&waiter, true);
+
+ raw_spin_lock(&lock->wait_lock);
+ init_lists(lock);
+
+ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
+ raw_spin_unlock(&lock->wait_lock);
+ return;
+ }
+
+ BUG_ON(rt_mutex_owner(lock) == self);
+
+ /*
+ * We save whatever state the task is in and we'll restore it
+ * after acquiring the lock taking real wakeups into account
+ * as well. We are serialized via pi_lock against wakeups. See
+ * try_to_wake_up().
+ */
+ pi_lock(&self->pi_lock);
+ self->saved_state = self->state;
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ pi_unlock(&self->pi_lock);
+
+ ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0);
+ BUG_ON(ret);
+
+ for (;;) {
+ /* Try to acquire the lock again. */
+ if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL))
+ break;
+
+ top_waiter = rt_mutex_top_waiter(lock);
+ lock_owner = rt_mutex_owner(lock);
+
+ raw_spin_unlock(&lock->wait_lock);
+
+ debug_rt_mutex_print_deadlock(&waiter);
+
+ if (top_waiter != &waiter || adaptive_wait(lock, lock_owner))
+ schedule_rt_mutex(lock);
+
+ raw_spin_lock(&lock->wait_lock);
+
+ pi_lock(&self->pi_lock);
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ pi_unlock(&self->pi_lock);
+ }
+
+ /*
+ * Restore the task state to current->saved_state. We set it
+ * to the original state above and the try_to_wake_up() code
+ * has possibly updated it when a real (non-rtmutex) wakeup
+ * happened while we were blocked. Clear saved_state so
+ * try_to_wakeup() does not get confused.
+ */
+ pi_lock(&self->pi_lock);
+ __set_current_state(self->saved_state);
+ self->saved_state = TASK_RUNNING;
+ pi_unlock(&self->pi_lock);
+
+ /*
+ * try_to_take_rt_mutex() sets the waiter bit
+ * unconditionally. We might have to fix that up:
+ */
+ fixup_rt_mutex_waiters(lock);
+
+ BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
+ BUG_ON(!plist_node_empty(&waiter.list_entry));
+
+ raw_spin_unlock(&lock->wait_lock);
+
+ debug_rt_mutex_free_waiter(&waiter);
+}
+
+/*
+ * Slow path to release a rt_mutex spin_lock style
+ */
+static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
+{
+ raw_spin_lock(&lock->wait_lock);
+
+ debug_rt_mutex_unlock(lock);
+
+ rt_mutex_deadlock_account_unlock(current);
+
+ if (!rt_mutex_has_waiters(lock)) {
+ lock->owner = NULL;
+ raw_spin_unlock(&lock->wait_lock);
+ return;
+ }
+
+ wakeup_next_waiter(lock);
+
+ raw_spin_unlock(&lock->wait_lock);
+
+ /* Undo pi boosting.when necessary */
+ rt_mutex_adjust_prio(current);
+}
+
+void __lockfunc rt_spin_lock(spinlock_t *lock)
+{
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+}
+EXPORT_SYMBOL(rt_spin_lock);
+
+void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
+{
+ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
+}
+EXPORT_SYMBOL(__rt_spin_lock);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
+{
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+}
+EXPORT_SYMBOL(rt_spin_lock_nested);
+#endif
+
+void __lockfunc rt_spin_unlock(spinlock_t *lock)
+{
+ /* NOTE: we always pass in '1' for nested, for simplicity */
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
+}
+EXPORT_SYMBOL(rt_spin_unlock);
+
+void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
+{
+ rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
+}
+EXPORT_SYMBOL(__rt_spin_unlock);
+
+/*
+ * Wait for the lock to get unlocked: instead of polling for an unlock
+ * (like raw spinlocks do), we lock and unlock, to force the kernel to
+ * schedule if there's contention:
+ */
+void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
+{
+ spin_lock(lock);
+ spin_unlock(lock);
+}
+EXPORT_SYMBOL(rt_spin_unlock_wait);
+
+int __lockfunc rt_spin_trylock(spinlock_t *lock)
+{
+ int ret;
+
+ migrate_disable();
+ ret = rt_mutex_trylock(&lock->lock);
+ if (ret)
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ else
+ migrate_enable();
+
+ return ret;
+}
+EXPORT_SYMBOL(rt_spin_trylock);
+
+int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
+{
+ int ret;
+
+ local_bh_disable();
+ ret = rt_mutex_trylock(&lock->lock);
+ if (ret) {
+ migrate_disable();
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ } else
+ local_bh_enable();
+ return ret;
+}
+EXPORT_SYMBOL(rt_spin_trylock_bh);
+
+int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
+{
+ int ret;
+
+ *flags = 0;
+ migrate_disable();
+ ret = rt_mutex_trylock(&lock->lock);
+ if (ret)
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ else
+ migrate_enable();
+ return ret;
+}
+EXPORT_SYMBOL(rt_spin_trylock_irqsave);
+
+int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock)
+{
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ if (atomic_add_unless(atomic, -1, 1))
+ return 0;
+ migrate_disable();
+ rt_spin_lock(lock);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ rt_spin_unlock(lock);
+ migrate_enable();
+ return 0;
+}
+EXPORT_SYMBOL(atomic_dec_and_spin_lock);
+
+void
+__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+ * Make sure we are not reinitializing a held lock:
+ */
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+ lockdep_init_map(&lock->dep_map, name, key, 0);
+#endif
+}
+EXPORT_SYMBOL(__rt_spin_lock_init);
+
+#endif /* PREEMPT_RT_FULL */
+
/**
* __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
* @lock: the rt_mutex to take
* @state: the state the task should block in (TASK_INTERRUPTIBLE
- * or TASK_UNINTERRUPTIBLE)
+ * or TASK_UNINTERRUPTIBLE)
* @timeout: the pre-initialized and started timer, or NULL for none
* @waiter: the pre-initialized rt_mutex_waiter
*
@@ -831,9 +1179,10 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct rt_mutex_waiter waiter;
int ret = 0;
- debug_rt_mutex_init_waiter(&waiter);
+ rt_mutex_init_waiter(&waiter, false);
raw_spin_lock(&lock->wait_lock);
+ init_lists(lock);
/* Try to acquire the lock again: */
if (try_to_take_rt_mutex(lock, current, NULL)) {
@@ -888,6 +1237,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
int ret = 0;
raw_spin_lock(&lock->wait_lock);
+ init_lists(lock);
if (likely(rt_mutex_owner(lock) != current)) {
@@ -1157,12 +1507,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
lock->owner = NULL;
- raw_spin_lock_init(&lock->wait_lock);
plist_head_init(&lock->wait_list);
debug_rt_mutex_init(lock, name);
}
-EXPORT_SYMBOL_GPL(__rt_mutex_init);
+EXPORT_SYMBOL(__rt_mutex_init);
/**
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
@@ -1177,7 +1526,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner)
{
- __rt_mutex_init(lock, NULL);
+ rt_mutex_init(lock);
debug_rt_mutex_proxy_lock(lock, proxy_owner);
rt_mutex_set_owner(lock, proxy_owner);
rt_mutex_deadlock_account_lock(lock, proxy_owner);