diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2012-09-28 09:49:42 (GMT) |
---|---|---|
committer | Scott Wood <scottwood@freescale.com> | 2013-04-04 22:09:37 (GMT) |
commit | ae70961f02eea901b8a68fec0f664e2b6e2431d7 (patch) | |
tree | 71204c4d681e41159f61c03cceedf41da2a63e5f /kernel | |
parent | 22177270e20d57551701d2898c34415838a1ed23 (diff) | |
download | linux-fsl-qoriq-ae70961f02eea901b8a68fec0f664e2b6e2431d7.tar.xz |
rt: rwsem/rwlock: lockdep annotations
rwlocks and rwsems on RT do not allow multiple readers. Annotate the
lockdep acquire functions accordingly.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable-rt@vger.kernel.org
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rt.c | 53 |
1 files changed, 32 insertions, 21 deletions
diff --git a/kernel/rt.c b/kernel/rt.c index 92a16e1..433ae42 100644 --- a/kernel/rt.c +++ b/kernel/rt.c @@ -216,15 +216,17 @@ int __lockfunc rt_read_trylock(rwlock_t *rwlock) * write locked. */ migrate_disable(); - if (rt_mutex_owner(lock) != current) + if (rt_mutex_owner(lock) != current) { ret = rt_mutex_trylock(lock); - else if (!rwlock->read_depth) + if (ret) + rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); + } else if (!rwlock->read_depth) { ret = 0; + } - if (ret) { + if (ret) rwlock->read_depth++; - rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); - } else + else migrate_enable(); return ret; @@ -242,13 +244,13 @@ void __lockfunc rt_read_lock(rwlock_t *rwlock) { struct rt_mutex *lock = &rwlock->lock; - rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); - /* * recursive read locks succeed when current owns the lock */ - if (rt_mutex_owner(lock) != current) + if (rt_mutex_owner(lock) != current) { + rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); __rt_spin_lock(lock); + } rwlock->read_depth++; } @@ -264,11 +266,11 @@ EXPORT_SYMBOL(rt_write_unlock); void __lockfunc rt_read_unlock(rwlock_t *rwlock) { - rwlock_release(&rwlock->dep_map, 1, _RET_IP_); - /* Release the lock only when read_depth is down to 0 */ - if (--rwlock->read_depth == 0) + if (--rwlock->read_depth == 0) { + rwlock_release(&rwlock->dep_map, 1, _RET_IP_); __rt_spin_unlock(&rwlock->lock); + } } EXPORT_SYMBOL(rt_read_unlock); @@ -315,9 +317,10 @@ EXPORT_SYMBOL(rt_up_write); void rt_up_read(struct rw_semaphore *rwsem) { - rwsem_release(&rwsem->dep_map, 1, _RET_IP_); - if (--rwsem->read_depth == 0) + if (--rwsem->read_depth == 0) { + rwsem_release(&rwsem->dep_map, 1, _RET_IP_); rt_mutex_unlock(&rwsem->lock); + } } EXPORT_SYMBOL(rt_up_read); @@ -356,6 +359,13 @@ void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass) } EXPORT_SYMBOL(rt_down_write_nested); +void rt_down_write_nested_lock(struct rw_semaphore *rwsem, + struct lockdep_map *nest) +{ + rwsem_acquire_nest(&rwsem->dep_map, 0, 0, nest, _RET_IP_); + rt_mutex_lock(&rwsem->lock); +} + int rt_down_read_trylock(struct rw_semaphore *rwsem) { struct rt_mutex *lock = &rwsem->lock; @@ -366,15 +376,16 @@ int rt_down_read_trylock(struct rw_semaphore *rwsem) * but not when read_depth == 0 which means that the rwsem is * write locked. */ - if (rt_mutex_owner(lock) != current) + if (rt_mutex_owner(lock) != current) { ret = rt_mutex_trylock(&rwsem->lock); - else if (!rwsem->read_depth) + if (ret) + rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); + } else if (!rwsem->read_depth) { ret = 0; + } - if (ret) { + if (ret) rwsem->read_depth++; - rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); - } return ret; } EXPORT_SYMBOL(rt_down_read_trylock); @@ -383,10 +394,10 @@ static void __rt_down_read(struct rw_semaphore *rwsem, int subclass) { struct rt_mutex *lock = &rwsem->lock; - rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_); - - if (rt_mutex_owner(lock) != current) + if (rt_mutex_owner(lock) != current) { + rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_); rt_mutex_lock(&rwsem->lock); + } rwsem->read_depth++; } |