summaryrefslogtreecommitdiff
path: root/arch/arm/include
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2010-03-24 15:49:54 (GMT)
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-03-25 21:13:50 (GMT)
commite7c5650f6067f65f8e961394f376d4862808d0d2 (patch)
treeac1a298272cf4b452f3b36fef7878982d7eead9c /arch/arm/include
parent23107c542068b2b94390aa333f6b330af64961e4 (diff)
downloadlinux-e7c5650f6067f65f8e961394f376d4862808d0d2.tar.xz
ARM: 5996/1: ARM: Change the mandatory barriers implementation (4/4)
The mandatory barriers (mb, rmb, wmb) are used even on uniprocessor systems for things like ordering Normal Non-cacheable memory accesses with DMA transfer (via Device memory writes). The current implementation uses dmb() for mb() and friends but this is not sufficient. The DMB only ensures the relative ordering of the observability of accesses by other processors or devices acting as masters. In case of DMA transfers started by writes to device memory, the relative ordering is not ensured because accesses to slave ports of a device are not considered observable by the DMB definition. A DSB is required for the data to reach the main memory (even if mapped as Normal Non-cacheable) before the device receives the notification to begin the transfer. Furthermore, some L2 cache controllers (like L2x0 or PL310) buffer stores to Normal Non-cacheable memory and this would need to be drained with the outer_sync() function call. The patch also allows platforms to define their own mandatory barriers implementation by selecting CONFIG_ARCH_HAS_BARRIERS and providing a mach/barriers.h file. Note that the SMP barriers are unchanged (being DMBs as before) since they are only guaranteed to work with Normal Cacheable memory. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/system.h16
1 files changed, 10 insertions, 6 deletions
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index ca88e6a..4ace45e 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -60,6 +60,8 @@
#include <linux/linkage.h>
#include <linux/irqflags.h>
+#include <asm/outercache.h>
+
#define __exception __attribute__((section(".exception.text")))
struct thread_info;
@@ -137,10 +139,12 @@ extern unsigned int user_debug;
#define dmb() __asm__ __volatile__ ("" : : : "memory")
#endif
-#if __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP)
-#define mb() dmb()
+#ifdef CONFIG_ARCH_HAS_BARRIERS
+#include <mach/barriers.h>
+#elif __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP)
+#define mb() do { dsb(); outer_sync(); } while (0)
#define rmb() dmb()
-#define wmb() dmb()
+#define wmb() mb()
#else
#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
@@ -152,9 +156,9 @@ extern unsigned int user_debug;
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#else
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
+#define smp_mb() dmb()
+#define smp_rmb() dmb()
+#define smp_wmb() dmb()
#endif
#define read_barrier_depends() do { } while(0)