diff options
author | Nicholas Mc Guire <der.herr@hofr.at> | 2014-01-17 19:41:58 (GMT) |
---|---|---|
committer | Scott Wood <scottwood@freescale.com> | 2014-05-14 18:38:02 (GMT) |
commit | 7f11536abb58e277f1c29eef8b5e40b3c11beee5 (patch) | |
tree | a9ca07c7ae783ebdf31eeb06a5ac6f59c4b9ef82 /include/linux | |
parent | c976fc4bc616420e1554191863628a7c08c64409 (diff) | |
download | linux-fsl-qoriq-7f11536abb58e277f1c29eef8b5e40b3c11beee5.tar.xz |
use local spin_locks in local_lock
Drop recursive call to migrate_disabel/enable for local_*lock* api
reported by Steven Rostedt.
local_lock will call migrate_disable via get_local_var - call tree is
get_locked_var
`-> local_lock(lvar)
`-> __local_lock(&get_local_var(lvar));
`--> # define get_local_var(var) (*({
migrate_disable();
&__get_cpu_var(var); })) \
thus there should be no need to call migrate_disable/enable recursively in
spin_try/lock/unlock. This patch addes a spin_trylock_local and replaces
the migration disabling calls by the local calls.
This patch is incomplete as it does not yet cover the _irq/_irqsave variants
by local locks. This patch requires the API cleanup in kernel/softirq.c or
it would break softirq_lock/unlock with respect to migration.
Signed-off-by: Nicholas Mc Guire <der.herr@hofr.at>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/locallock.h | 18 |
1 files changed, 14 insertions, 4 deletions
diff --git a/include/linux/locallock.h b/include/linux/locallock.h index e7bd8be..eb338ce 100644 --- a/include/linux/locallock.h +++ b/include/linux/locallock.h @@ -36,10 +36,20 @@ struct local_irq_lock { spin_lock_init(&per_cpu(lvar, __cpu).lock); \ } while (0) +/* + * spin_lock|trylock|unlock_local flavour that does not migrate disable + * used for __local_lock|trylock|unlock where get_local_var/put_local_var + * already takes care of the migrate_disable/enable + * for CONFIG_PREEMPT_BASE map to the normal spin_* calls. + */ +# define spin_lock_local(lock) spin_lock(lock) +# define spin_trylock_local(lock) spin_trylock(lock) +# define spin_unlock_local(lock) spin_unlock(lock) + static inline void __local_lock(struct local_irq_lock *lv) { if (lv->owner != current) { - spin_lock(&lv->lock); + spin_lock_local(&lv->lock); LL_WARN(lv->owner); LL_WARN(lv->nestcnt); lv->owner = current; @@ -52,7 +62,7 @@ static inline void __local_lock(struct local_irq_lock *lv) static inline int __local_trylock(struct local_irq_lock *lv) { - if (lv->owner != current && spin_trylock(&lv->lock)) { + if (lv->owner != current && spin_trylock_local(&lv->lock)) { LL_WARN(lv->owner); LL_WARN(lv->nestcnt); lv->owner = current; @@ -79,7 +89,7 @@ static inline void __local_unlock(struct local_irq_lock *lv) return; lv->owner = NULL; - spin_unlock(&lv->lock); + spin_unlock_local(&lv->lock); } #define local_unlock(lvar) \ @@ -211,7 +221,7 @@ static inline int __local_unlock_irqrestore(struct local_irq_lock *lv, &__get_cpu_var(var); \ })) -#define put_locked_var(lvar, var) local_unlock(lvar) +#define put_locked_var(lvar, var) local_unlock(lvar); #define local_lock_cpu(lvar) \ ({ \ |