summaryrefslogtreecommitdiff
path: root/arch/tile/include/asm/spinlock_64.h
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@ezchip.com>2015-04-28 17:00:42 (GMT)
committerChris Metcalf <cmetcalf@ezchip.com>2015-04-29 02:43:16 (GMT)
commit627ae54854edfbf29d5997015c190de22eef497f (patch)
treeefa812a770bc06ce188f77262b5916e171ceaca1 /arch/tile/include/asm/spinlock_64.h
parent14c3dec2a875d898262be79c0f85e5f2b70a71b0 (diff)
downloadlinux-627ae54854edfbf29d5997015c190de22eef497f.tar.xz
tile: use READ_ONCE() in arch_spin_is_locked()
This avoid potential issues if callers were to loop on these routines without some kind of memory barrier. Currently there are no such users in-tree, but it seems better safe than sorry. Also, in the tilepro case we read "current" before "next", which gives us a slightly better guarantee that the lock was actually unlocked at least momentarily if we return claiming that it is not locked. None of the callers actually rely on this behavior, as far as I know, however. Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Diffstat (limited to 'arch/tile/include/asm/spinlock_64.h')
-rw-r--r--arch/tile/include/asm/spinlock_64.h5
1 files changed, 4 insertions, 1 deletions
diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h
index 9a12b9c..b9718fb 100644
--- a/arch/tile/include/asm/spinlock_64.h
+++ b/arch/tile/include/asm/spinlock_64.h
@@ -18,6 +18,8 @@
#ifndef _ASM_TILE_SPINLOCK_64_H
#define _ASM_TILE_SPINLOCK_64_H
+#include <linux/compiler.h>
+
/* Shifts and masks for the various fields in "lock". */
#define __ARCH_SPIN_CURRENT_SHIFT 17
#define __ARCH_SPIN_NEXT_MASK 0x7fff
@@ -44,7 +46,8 @@ static inline u32 arch_spin_next(u32 val)
/* The lock is locked if a task would have to wait to get it. */
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
- u32 val = lock->lock;
+ /* Use READ_ONCE() to ensure that calling this in a loop is OK. */
+ u32 val = READ_ONCE(lock->lock);
return arch_spin_current(val) != arch_spin_next(val);
}