summaryrefslogtreecommitdiff
path: root/arch/arm/include/asm/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/spinlock.h')
-rw-r--r--arch/arm/include/asm/spinlock.h37
1 files changed, 25 insertions, 12 deletions
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 17eb355..da1af52 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -5,17 +5,36 @@
#error SMP not supported on pre-ARMv6 CPUs
#endif
+/*
+ * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
+ * extensions, so when running on UP, we have to patch these instructions away.
+ */
+#define ALT_SMP(smp, up) \
+ "9998: " smp "\n" \
+ " .pushsection \".alt.smp.init\", \"a\"\n" \
+ " .long 9998b\n" \
+ " " up "\n" \
+ " .popsection\n"
+
+#ifdef CONFIG_THUMB2_KERNEL
+#define SEV ALT_SMP("sev.w", "nop.w")
+#define WFE(cond) ALT_SMP("wfe" cond ".w", "nop.w")
+#else
+#define SEV ALT_SMP("sev", "nop")
+#define WFE(cond) ALT_SMP("wfe" cond, "nop")
+#endif
+
static inline void dsb_sev(void)
{
#if __LINUX_ARM_ARCH__ >= 7
__asm__ __volatile__ (
"dsb\n"
- "sev"
+ SEV
);
-#elif defined(CONFIG_CPU_32v6K)
+#else
__asm__ __volatile__ (
"mcr p15, 0, %0, c7, c10, 4\n"
- "sev"
+ SEV
: : "r" (0)
);
#endif
@@ -46,9 +65,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
-#ifdef CONFIG_CPU_32v6K
-" wfene\n"
-#endif
+ WFE("ne")
" strexeq %0, %2, [%1]\n"
" teqeq %0, #0\n"
" bne 1b"
@@ -107,9 +124,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
-#ifdef CONFIG_CPU_32v6K
-" wfene\n"
-#endif
+ WFE("ne")
" strexeq %0, %2, [%1]\n"
" teq %0, #0\n"
" bne 1b"
@@ -176,9 +191,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
"1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
" strexpl %1, %0, [%2]\n"
-#ifdef CONFIG_CPU_32v6K
-" wfemi\n"
-#endif
+ WFE("mi")
" rsbpls %0, %1, #0\n"
" bmi 1b"
: "=&r" (tmp), "=&r" (tmp2)