summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2012-09-28 09:49:42 (GMT)
committerScott Wood <scottwood@freescale.com>2015-02-13 22:21:07 (GMT)
commita4dbe1db8e49816d2419438bde59d8e30b36a00b (patch)
treecae38857a686dd297e4dea1b6049b59d511f6b25 /kernel
parent3fe62a9bc6cbe636d8707b18979113b8bd1bb301 (diff)
downloadlinux-fsl-qoriq-a4dbe1db8e49816d2419438bde59d8e30b36a00b.tar.xz
rt: rwsem/rwlock: lockdep annotations
rwlocks and rwsems on RT do not allow multiple readers. Annotate the lockdep acquire functions accordingly. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: stable-rt@vger.kernel.org
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rt.c44
1 files changed, 25 insertions, 19 deletions
diff --git a/kernel/rt.c b/kernel/rt.c
index a9925c6..5d17727 100644
--- a/kernel/rt.c
+++ b/kernel/rt.c
@@ -213,17 +213,16 @@ int __lockfunc rt_read_trylock(rwlock_t *rwlock)
*/
if (rt_mutex_owner(lock) != current) {
ret = rt_mutex_trylock(lock);
- if (ret)
+ if (ret) {
+ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
migrate_disable();
-
+ }
} else if (!rwlock->read_depth) {
ret = 0;
}
- if (ret) {
+ if (ret)
rwlock->read_depth++;
- rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
- }
return ret;
}
@@ -241,12 +240,11 @@ void __lockfunc rt_read_lock(rwlock_t *rwlock)
{
struct rt_mutex *lock = &rwlock->lock;
- rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
-
/*
* recursive read locks succeed when current owns the lock
*/
if (rt_mutex_owner(lock) != current) {
+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
__rt_spin_lock(lock);
migrate_disable();
}
@@ -266,10 +264,9 @@ EXPORT_SYMBOL(rt_write_unlock);
void __lockfunc rt_read_unlock(rwlock_t *rwlock)
{
- rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
-
/* Release the lock only when read_depth is down to 0 */
if (--rwlock->read_depth == 0) {
+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
__rt_spin_unlock(&rwlock->lock);
migrate_enable();
}
@@ -319,9 +316,10 @@ EXPORT_SYMBOL(rt_up_write);
void rt_up_read(struct rw_semaphore *rwsem)
{
- rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
- if (--rwsem->read_depth == 0)
+ if (--rwsem->read_depth == 0) {
+ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
rt_mutex_unlock(&rwsem->lock);
+ }
}
EXPORT_SYMBOL(rt_up_read);
@@ -360,6 +358,13 @@ void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
}
EXPORT_SYMBOL(rt_down_write_nested);
+void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
+ struct lockdep_map *nest)
+{
+ rwsem_acquire_nest(&rwsem->dep_map, 0, 0, nest, _RET_IP_);
+ rt_mutex_lock(&rwsem->lock);
+}
+
int rt_down_read_trylock(struct rw_semaphore *rwsem)
{
struct rt_mutex *lock = &rwsem->lock;
@@ -370,15 +375,16 @@ int rt_down_read_trylock(struct rw_semaphore *rwsem)
* but not when read_depth == 0 which means that the rwsem is
* write locked.
*/
- if (rt_mutex_owner(lock) != current)
+ if (rt_mutex_owner(lock) != current) {
ret = rt_mutex_trylock(&rwsem->lock);
- else if (!rwsem->read_depth)
+ if (ret)
+ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
+ } else if (!rwsem->read_depth) {
ret = 0;
+ }
- if (ret) {
+ if (ret)
rwsem->read_depth++;
- rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
- }
return ret;
}
EXPORT_SYMBOL(rt_down_read_trylock);
@@ -387,10 +393,10 @@ static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
{
struct rt_mutex *lock = &rwsem->lock;
- rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_);
-
- if (rt_mutex_owner(lock) != current)
+ if (rt_mutex_owner(lock) != current) {
+ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
rt_mutex_lock(&rwsem->lock);
+ }
rwsem->read_depth++;
}