summaryrefslogtreecommitdiff
path: root/kernel/rt.c
diff options
context:
space:
mode:
authorNicholas Mc Guire <der.herr@hofr.at>2014-01-02 09:19:15 (GMT)
committerScott Wood <scottwood@freescale.com>2014-05-14 18:38:27 (GMT)
commit4e9c130da7f8ded41adc905e8e452c53866b9168 (patch)
tree01c438e3da6b6bb939c6dc19a6fbcd076565fe55 /kernel/rt.c
parent9e8cafa44cd98849c693febc2aa38a9ffefcc9b1 (diff)
downloadlinux-fsl-qoriq-4e9c130da7f8ded41adc905e8e452c53866b9168.tar.xz
read_lock migrate_disable pushdown to rt_read_lock
pushdown of migrate_disable/enable from read_*lock* to the rt_read_*lock* api level general mapping to mutexes: read_*lock* `-> rt_read_*lock* `-> __spin_lock (the sleeping spin locks) `-> rt_mutex The real read_lock* mapping: read_lock_irqsave -. read_lock_irq `-> rt_read_lock_irqsave() `->read_lock ---------. \ read_lock_bh ------+ \ `--> rt_read_lock() if (rt_mutex_owner(lock) != current){ `-> __rt_spin_lock() rt_spin_lock_fastlock() `->rt_mutex_cmpxchg() migrate_disable() } rwlock->read_depth++; read_trylock mapping: read_trylock `-> rt_read_trylock if (rt_mutex_owner(lock) != current){ `-> rt_mutex_trylock() rt_mutex_fasttrylock() rt_mutex_cmpxchg() migrate_disable() } rwlock->read_depth++; read_unlock* mapping: read_unlock_bh --------+ read_unlock_irq -------+ read_unlock_irqrestore + read_unlock -----------+ `-> rt_read_unlock() if(--rwlock->read_depth==0){ `-> __rt_spin_unlock() rt_spin_lock_fastunlock() `-> rt_mutex_cmpxchg() migrate_disable() } So calls to migrate_disable/enable() are better placed at the rt_read_* level of lock/trylock/unlock as all of the read_*lock* API has this as a common path. In the rt_read* API of lock/trylock/unlock the nesting level is already being recorded in rwlock->read_depth, so we can push down the migrate disable/enable to that level and condition it on the read_depth going from 0 to 1 -> migrate_disable and 1 to 0 -> migrate_enable. This eliminates the recursive calls that were needed when migrate_disable/enable was done at the read_*lock* level. The approach to read_*_bh also eliminates the concerns raised with the regards to api inbalances (read_lock_bh -> read_unlock+local_bh_enable) Tested-by: Carsten Emde <C.Emde@osadl.org> Signed-off-by: Nicholas Mc Guire <der.herr@hofr.at> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Diffstat (limited to 'kernel/rt.c')
-rw-r--r--kernel/rt.c20
1 files changed, 13 insertions, 7 deletions
diff --git a/kernel/rt.c b/kernel/rt.c
index d712968..a9925c6 100644
--- a/kernel/rt.c
+++ b/kernel/rt.c
@@ -211,17 +211,19 @@ int __lockfunc rt_read_trylock(rwlock_t *rwlock)
* but not when read_depth == 0 which means that the lock is
* write locked.
*/
- migrate_disable();
- if (rt_mutex_owner(lock) != current)
+ if (rt_mutex_owner(lock) != current) {
ret = rt_mutex_trylock(lock);
- else if (!rwlock->read_depth)
+ if (ret)
+ migrate_disable();
+
+ } else if (!rwlock->read_depth) {
ret = 0;
+ }
if (ret) {
rwlock->read_depth++;
rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
- } else
- migrate_enable();
+ }
return ret;
}
@@ -244,8 +246,10 @@ void __lockfunc rt_read_lock(rwlock_t *rwlock)
/*
* recursive read locks succeed when current owns the lock
*/
- if (rt_mutex_owner(lock) != current)
+ if (rt_mutex_owner(lock) != current) {
__rt_spin_lock(lock);
+ migrate_disable();
+ }
rwlock->read_depth++;
}
@@ -265,8 +269,10 @@ void __lockfunc rt_read_unlock(rwlock_t *rwlock)
rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
/* Release the lock only when read_depth is down to 0 */
- if (--rwlock->read_depth == 0)
+ if (--rwlock->read_depth == 0) {
__rt_spin_unlock(&rwlock->lock);
+ migrate_enable();
+ }
}
EXPORT_SYMBOL(rt_read_unlock);