summaryrefslogtreecommitdiff
path: root/arch/tile/include/asm/spinlock_32.h
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@ezchip.com>2015-04-28 17:00:42 (GMT)
committerChris Metcalf <cmetcalf@ezchip.com>2015-04-29 02:43:16 (GMT)
commit627ae54854edfbf29d5997015c190de22eef497f (patch)
treeefa812a770bc06ce188f77262b5916e171ceaca1 /arch/tile/include/asm/spinlock_32.h
parent14c3dec2a875d898262be79c0f85e5f2b70a71b0 (diff)
downloadlinux-627ae54854edfbf29d5997015c190de22eef497f.tar.xz
tile: use READ_ONCE() in arch_spin_is_locked()
This avoid potential issues if callers were to loop on these routines without some kind of memory barrier. Currently there are no such users in-tree, but it seems better safe than sorry. Also, in the tilepro case we read "current" before "next", which gives us a slightly better guarantee that the lock was actually unlocked at least momentarily if we return claiming that it is not locked. None of the callers actually rely on this behavior, as far as I know, however. Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Diffstat (limited to 'arch/tile/include/asm/spinlock_32.h')
-rw-r--r--arch/tile/include/asm/spinlock_32.h6
1 files changed, 5 insertions, 1 deletions
diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h
index c0a77b3..b14b1ba 100644
--- a/arch/tile/include/asm/spinlock_32.h
+++ b/arch/tile/include/asm/spinlock_32.h
@@ -41,8 +41,12 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
* to claim the lock is held, since it will be momentarily
* if not already. There's no need to wait for a "valid"
* lock->next_ticket to become available.
+ * Use READ_ONCE() to ensure that calling this in a loop is OK.
*/
- return lock->next_ticket != lock->current_ticket;
+ int curr = READ_ONCE(lock->current_ticket);
+ int next = READ_ONCE(lock->next_ticket);
+
+ return next != curr;
}
void arch_spin_lock(arch_spinlock_t *lock);