From bd91508b50ade5c73b3749bf4e5ede31d2da7ef8 Mon Sep 17 00:00:00 2001 From: Alexey Brodkin Date: Wed, 8 Jun 2016 07:57:19 +0300 Subject: arc/cache: really do invalidate_dcache_all() even if IOC exists invalidate_dcache_all() could be used in different use-cases and what is especially important most of those cases won't be related to DMAed data to or from peripherals, i.e. we'll be doing invalidation of data used purely by CPU cores. Given that IOC engine only snoops data that goes through DMA we need to care ourselves about data used only by CPU cores and so remove dependency on IOC from invalidate_dcache_all() and always do real invalidation. Signed-off-by: Alexey Brodkin diff --git a/arch/arc/lib/cache.c b/arch/arc/lib/cache.c index d1fb661..a27499e 100644 --- a/arch/arc/lib/cache.c +++ b/arch/arc/lib/cache.c @@ -417,13 +417,10 @@ void flush_cache(unsigned long start, unsigned long size) void invalidate_dcache_all(void) { -#ifdef CONFIG_ISA_ARCV2 - if (!ioc_exists) -#endif - __dc_entire_op(OP_INV); + __dc_entire_op(OP_INV); #ifdef CONFIG_ISA_ARCV2 - if (slc_exists && !ioc_exists) + if (slc_exists) __slc_entire_op(OP_INV); #endif } -- cgit v0.10.2 From a4a43fcf9cca1ebd3d26f9a01b923b7393d69c54 Mon Sep 17 00:00:00 2001 From: Alexey Brodkin Date: Wed, 8 Jun 2016 08:04:03 +0300 Subject: arc/cache: Flush & invalidate all caches right before enabling IOC According to ARC HS databook it is required to flush and disable caches prior programming IOC registers. Otherwise ongoing coherent memory operations may not observe the coherency protocols as expected. But since in ARC HS v2.1 there's no way to disable SLC (AKA L2 cache) we're doing our best flushing and invalidating it. Signed-off-by: Alexey Brodkin diff --git a/arch/arc/lib/cache.c b/arch/arc/lib/cache.c index a27499e..b6ec831 100644 --- a/arch/arc/lib/cache.c +++ b/arch/arc/lib/cache.c @@ -209,6 +209,9 @@ void cache_init(void) read_decode_cache_bcr_arcv2(); if (ioc_exists) { + flush_dcache_all(); + invalidate_dcache_all(); + /* IO coherency base - 0x8z */ write_aux_reg(ARC_AUX_IO_COH_AP0_BASE, 0x80000); /* IO coherency aperture size - 512Mb: 0x8z-0xAz */ -- cgit v0.10.2 From c7d8db66ffd1c20b6a27445af892c28305e64e8a Mon Sep 17 00:00:00 2001 From: Alexey Brodkin Date: Wed, 8 Jun 2016 08:19:33 +0300 Subject: board: axs10x: Flush entire cache after programming reset vector Now when we have support of IOC (IO-Coherency block) cahce operations on regions are tuned to not be dummy stubs if IOC was found and enabled in the core. That makes flush_dcache_range() useless for our purposes here. And since we do need to flush modified reset vector to at least L2 cache (AKA SLC) so other cores will see it via its L1 instruction cache we're using always functional flush_dcache_all() here. Signed-off-by: Alexey Brodkin Cc: Marek Vasut diff --git a/board/synopsys/axs101/axs101.c b/board/synopsys/axs101/axs101.c index 84ee2bf..a5e774b 100644 --- a/board/synopsys/axs101/axs101.c +++ b/board/synopsys/axs101/axs101.c @@ -54,7 +54,7 @@ void smp_set_core_boot_addr(unsigned long addr, int corenr) writel(addr, (void __iomem *)RESET_VECTOR_ADDR); /* Make sure other cores see written value in memory */ - flush_dcache_range(RESET_VECTOR_ADDR, RESET_VECTOR_ADDR + sizeof(int)); + flush_dcache_all(); } void smp_kick_all_cpus(void) -- cgit v0.10.2 From 5bea2becf3b6897315fa01d8318df75526855745 Mon Sep 17 00:00:00 2001 From: Alexey Brodkin Date: Wed, 8 Jun 2016 08:24:54 +0300 Subject: arc: Update data accessors with use of memory barriers Memory barriers are proven to be a requirement for both compiler and real hardware to properly serialize access to critical data. For example if CPU or data bus it uses may do reordering of data accesses absence of memory barriers might easily lead to very subtle and hard to debug data corruptions. This implementation was heavily borrowed from up to date Linux kernel. Signed-off-by: Alexey Brodkin diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h index b6f7724..42e7f22 100644 --- a/arch/arc/include/asm/io.h +++ b/arch/arc/include/asm/io.h @@ -10,6 +10,46 @@ #include #include +#ifdef CONFIG_ISA_ARCV2 + +/* + * ARCv2 based HS38 cores are in-order issue, but still weakly ordered + * due to micro-arch buffering/queuing of load/store, cache hit vs. miss ... + * + * Explicit barrier provided by DMB instruction + * - Operand supports fine grained load/store/load+store semantics + * - Ensures that selected memory operation issued before it will complete + * before any subsequent memory operation of same type + * - DMB guarantees SMP as well as local barrier semantics + * (asm-generic/barrier.h ensures sane smp_*mb if not defined here, i.e. + * UP: barrier(), SMP: smp_*mb == *mb) + * - DSYNC provides DMB+completion_of_cache_bpu_maintenance_ops hence not needed + * in the general case. Plus it only provides full barrier. + */ + +#define mb() asm volatile("dmb 3\n" : : : "memory") +#define rmb() asm volatile("dmb 1\n" : : : "memory") +#define wmb() asm volatile("dmb 2\n" : : : "memory") + +#else + +/* + * ARCompact based cores (ARC700) only have SYNC instruction which is super + * heavy weight as it flushes the pipeline as well. + * There are no real SMP implementations of such cores. + */ + +#define mb() asm volatile("sync\n" : : : "memory") +#endif + +#ifdef CONFIG_ISA_ARCV2 +#define __iormb() rmb() +#define __iowmb() wmb() +#else +#define __iormb() do { } while (0) +#define __iowmb() do { } while (0) +#endif + /* * Given a physical address and a length, return a virtual address * that can be used to access the memory range with the caching @@ -72,18 +112,6 @@ static inline u32 __raw_readl(const volatile void __iomem *addr) return w; } -#define readb __raw_readb - -static inline u16 readw(const volatile void __iomem *addr) -{ - return __le16_to_cpu(__raw_readw(addr)); -} - -static inline u32 readl(const volatile void __iomem *addr) -{ - return __le32_to_cpu(__raw_readl(addr)); -} - static inline void __raw_writeb(u8 b, volatile void __iomem *addr) { __asm__ __volatile__("stb%U1 %0, %1\n" @@ -108,10 +136,6 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr) : "memory"); } -#define writeb __raw_writeb -#define writew(b, addr) __raw_writew(__cpu_to_le16(b), addr) -#define writel(b, addr) __raw_writel(__cpu_to_le32(b), addr) - static inline int __raw_readsb(unsigned int addr, void *data, int bytelen) { __asm__ __volatile__ ("1:ld.di r8, [r0]\n" @@ -184,6 +208,45 @@ static inline int __raw_writesl(unsigned int addr, void *data, int longlen) return longlen; } +/* + * MMIO can also get buffered/optimized in micro-arch, so barriers needed + * Based on ARM model for the typical use case + * + * + * + * or: + * + * + * + * http://lkml.kernel.org/r/20150622133656.GG1583@arm.com + */ +#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) +#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) +#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) + +#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); }) +#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); }) +#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); }) + +/* + * Relaxed API for drivers which can handle barrier ordering themselves + * + * Also these are defined to perform little endian accesses. + * To provide the typical device register semantics of fixed endian, + * swap the byte order for Big Endian + * + * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de + */ +#define readb_relaxed(c) __raw_readb(c) +#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \ + __raw_readw(c)); __r; }) +#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \ + __raw_readl(c)); __r; }) + +#define writeb_relaxed(v,c) __raw_writeb(v,c) +#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c) +#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c) + #define out_arch(type, endian, a, v) __raw_write##type(cpu_to_##endian(v), a) #define in_arch(type, endian, a) endian##_to_cpu(__raw_read##type(a)) -- cgit v0.10.2 From fc1e8fbbb2ea73139c2eadf2f174d6c3fc4ee03f Mon Sep 17 00:00:00 2001 From: Alexey Brodkin Date: Fri, 10 Jun 2016 18:19:25 +0300 Subject: axs103: Bump CPU frequency from 50MHz to 100MHz In the upcoming release of axs103 v1.1 CPU will run @100MHz which we support with that change. Signed-off-by: Alexey Brodkin diff --git a/configs/axs103_defconfig b/configs/axs103_defconfig index 96a3de6..c8474de 100644 --- a/configs/axs103_defconfig +++ b/configs/axs103_defconfig @@ -1,7 +1,7 @@ CONFIG_ARC=y CONFIG_ISA_ARCV2=y CONFIG_DM_SERIAL=y -CONFIG_SYS_CLK_FREQ=50000000 +CONFIG_SYS_CLK_FREQ=100000000 CONFIG_SYS_TEXT_BASE=0x81000000 CONFIG_DEFAULT_DEVICE_TREE="axs10x" CONFIG_BOOTDELAY=3 -- cgit v0.10.2