summaryrefslogtreecommitdiff
path: root/include/asm-mips/system.h
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2006-10-31 03:45:07 (GMT)
committerRalf Baechle <ralf@linux-mips.org>2006-12-04 22:43:14 (GMT)
commit0004a9dfeaa709a7f853487aba19932c9b1a87c8 (patch)
treee9f1f4b1ca897e57f46778cef283617ba83fc855 /include/asm-mips/system.h
parent08f57f7ffe5819e537301b1f1109fa4fc670bfff (diff)
downloadlinux-fsl-qoriq-0004a9dfeaa709a7f853487aba19932c9b1a87c8.tar.xz
[MIPS] Cleanup memory barriers for weakly ordered systems.
Also the R4000 / R4600 LL/SC instructions imply a sync so no explicit sync needed. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'include/asm-mips/system.h')
-rw-r--r--include/asm-mips/system.h156
1 files changed, 10 insertions, 146 deletions
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 3056fee..9428057 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
* Copyright (C) 1996 by Paul M. Antoine
* Copyright (C) 1999 Silicon Graphics
* Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
@@ -16,132 +16,12 @@
#include <linux/irqflags.h>
#include <asm/addrspace.h>
+#include <asm/barrier.h>
#include <asm/cpu-features.h>
#include <asm/dsp.h>
#include <asm/ptrace.h>
#include <asm/war.h>
-/*
- * read_barrier_depends - Flush all pending reads that subsequents reads
- * depend on.
- *
- * No data-dependent reads from memory-like regions are ever reordered
- * over this barrier. All reads preceding this primitive are guaranteed
- * to access memory (but not necessarily other CPUs' caches) before any
- * reads following this primitive that depend on the data return by
- * any of the preceding reads. This primitive is much lighter weight than
- * rmb() on most CPUs, and is never heavier weight than is
- * rmb().
- *
- * These ordering constraints are respected by both the local CPU
- * and the compiler.
- *
- * Ordering is not guaranteed by anything other than these primitives,
- * not even by data dependencies. See the documentation for
- * memory_barrier() for examples and URLs to more information.
- *
- * For example, the following code would force ordering (the initial
- * value of "a" is zero, "b" is one, and "p" is "&a"):
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * b = 2;
- * memory_barrier();
- * p = &b; q = p;
- * read_barrier_depends();
- * d = *q;
- * </programlisting>
- *
- * because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends(). However,
- * the following code, with the same initial values for "a" and "b":
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * a = 2;
- * memory_barrier();
- * b = 3; y = b;
- * read_barrier_depends();
- * x = a;
- * </programlisting>
- *
- * does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b". Therefore, on some CPUs, such
- * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
- * in cases like this where there are no data dependencies.
- */
-
-#define read_barrier_depends() do { } while(0)
-
-#ifdef CONFIG_CPU_HAS_SYNC
-#define __sync() \
- __asm__ __volatile__( \
- ".set push\n\t" \
- ".set noreorder\n\t" \
- ".set mips2\n\t" \
- "sync\n\t" \
- ".set pop" \
- : /* no output */ \
- : /* no input */ \
- : "memory")
-#else
-#define __sync() do { } while(0)
-#endif
-
-#define __fast_iob() \
- __asm__ __volatile__( \
- ".set push\n\t" \
- ".set noreorder\n\t" \
- "lw $0,%0\n\t" \
- "nop\n\t" \
- ".set pop" \
- : /* no output */ \
- : "m" (*(int *)CKSEG1) \
- : "memory")
-
-#define fast_wmb() __sync()
-#define fast_rmb() __sync()
-#define fast_mb() __sync()
-#define fast_iob() \
- do { \
- __sync(); \
- __fast_iob(); \
- } while (0)
-
-#ifdef CONFIG_CPU_HAS_WB
-
-#include <asm/wbflush.h>
-
-#define wmb() fast_wmb()
-#define rmb() fast_rmb()
-#define mb() wbflush()
-#define iob() wbflush()
-
-#else /* !CONFIG_CPU_HAS_WB */
-
-#define wmb() fast_wmb()
-#define rmb() fast_rmb()
-#define mb() fast_mb()
-#define iob() fast_iob()
-
-#endif /* !CONFIG_CPU_HAS_WB */
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_read_barrier_depends() read_barrier_depends()
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while(0)
-#endif
-
-#define set_mb(var, value) \
-do { var = value; mb(); } while (0)
/*
* switch_to(n) should switch tasks to task nr n, first
@@ -217,9 +97,6 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
" .set mips3 \n"
" sc %2, %1 \n"
" beqzl %2, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
@@ -235,9 +112,6 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
" .set mips3 \n"
" sc %2, %1 \n"
" beqz %2, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
@@ -251,6 +125,8 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
local_irq_restore(flags); /* implies memory barrier */
}
+ smp_mb();
+
return retval;
}
@@ -268,9 +144,6 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
" move %2, %z4 \n"
" scd %2, %1 \n"
" beqzl %2, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
@@ -284,9 +157,6 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
" move %2, %z4 \n"
" scd %2, %1 \n"
" beqz %2, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
@@ -300,6 +170,8 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
local_irq_restore(flags); /* implies memory barrier */
}
+ smp_mb();
+
return retval;
}
#else
@@ -345,9 +217,6 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
" .set mips3 \n"
" sc $1, %1 \n"
" beqzl $1, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
"2: \n"
" .set pop \n"
: "=&r" (retval), "=R" (*m)
@@ -365,9 +234,6 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
" .set mips3 \n"
" sc $1, %1 \n"
" beqz $1, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
"2: \n"
" .set pop \n"
: "=&r" (retval), "=R" (*m)
@@ -383,6 +249,8 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
local_irq_restore(flags); /* implies memory barrier */
}
+ smp_mb();
+
return retval;
}
@@ -402,9 +270,6 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
" move $1, %z4 \n"
" scd $1, %1 \n"
" beqzl $1, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
"2: \n"
" .set pop \n"
: "=&r" (retval), "=R" (*m)
@@ -420,9 +285,6 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
" move $1, %z4 \n"
" scd $1, %1 \n"
" beqz $1, 1b \n"
-#ifdef CONFIG_SMP
- " sync \n"
-#endif
"2: \n"
" .set pop \n"
: "=&r" (retval), "=R" (*m)
@@ -438,6 +300,8 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
local_irq_restore(flags); /* implies memory barrier */
}
+ smp_mb();
+
return retval;
}
#else