From 87f9260bf1fb7ec4c3f88c6bb353df17ca1e1c19 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 8 Feb 2014 21:18:24 +0000 Subject: ARM: dts: fix spdif pinmux configuration The spdif pinmux configuration must be connected to the spdif device to take effect, not the spdif-transmitter. Signed-off-by: Russell King diff --git a/arch/arm/boot/dts/imx6dl-hummingboard.dts b/arch/arm/boot/dts/imx6dl-hummingboard.dts index fd8fc7c..5bfae54 100644 --- a/arch/arm/boot/dts/imx6dl-hummingboard.dts +++ b/arch/arm/boot/dts/imx6dl-hummingboard.dts @@ -52,12 +52,6 @@ }; }; - codec: spdif-transmitter { - compatible = "linux,spdif-dit"; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_hummingboard_spdif>; - }; - sound-spdif { compatible = "fsl,imx-audio-spdif"; model = "imx-spdif"; @@ -111,7 +105,7 @@ }; pinctrl_hummingboard_spdif: hummingboard-spdif { - fsl,pins = ; + fsl,pins = ; }; pinctrl_hummingboard_usbh1_vbus: hummingboard-usbh1-vbus { @@ -142,6 +136,8 @@ }; &spdif { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_hummingboard_spdif>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi index 64daa3b..c2a2488 100644 --- a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi +++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi @@ -46,12 +46,6 @@ }; }; - codec: spdif-transmitter { - compatible = "linux,spdif-dit"; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_cubox_i_spdif>; - }; - sound-spdif { compatible = "fsl,imx-audio-spdif"; model = "imx-spdif"; @@ -89,7 +83,7 @@ }; pinctrl_cubox_i_spdif: cubox-i-spdif { - fsl,pins = ; + fsl,pins = ; }; pinctrl_cubox_i_usbh1_vbus: cubox-i-usbh1-vbus { @@ -121,6 +115,8 @@ }; &spdif { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_cubox_i_spdif>; status = "okay"; }; -- cgit v0.10.2 From 4d9c5b89cf3605bbc39c6e274351ff25f0d83e6a Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Sun, 2 Feb 2014 22:21:31 +0100 Subject: ARM: 7950/1: mm: Fix stage-2 device memory attributes The stage-2 memory attributes are distinct from the Hyp memory attributes and the Stage-1 memory attributes. We were using the stage-1 memory attributes for stage-2 mappings causing device mappings to be mapped as normal memory. Add the S2 equivalent defines for memory attributes and fix the comments explaining the defines while at it. Add a prot_pte_s2 field to the mem_type struct and fill out the field for device mappings accordingly. Cc: [3.9+] Acked-by: Marc Zyngier Acked-by: Catalin Marinas Signed-off-by: Christoffer Dall Signed-off-by: Russell King diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 03243f7..85c60ad 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -120,13 +120,16 @@ /* * 2nd stage PTE definitions for LPAE. */ -#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ -#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ +#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */ +#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */ +#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */ +#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */ +#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2) -#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ +#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ +#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ + +#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ /* * Hyp-mode PL2 PTE definitions for LPAE. diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index d5a982d..7ea641b 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -38,6 +38,7 @@ static inline pmd_t *pmd_off_k(unsigned long virt) struct mem_type { pteval_t prot_pte; + pteval_t prot_pte_s2; pmdval_t prot_l1; pmdval_t prot_sect; unsigned int domain; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4f08c13..a623cb3 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -232,12 +232,16 @@ __setup("noalign", noalign_setup); #endif /* ifdef CONFIG_CPU_CP15 / else */ #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN +#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE static struct mem_type mem_types[] = { [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | L_PTE_SHARED, + .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) | + s2_policy(L_PTE_S2_MT_DEV_SHARED) | + L_PTE_SHARED, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, .domain = DOMAIN_IO, @@ -508,7 +512,8 @@ static void __init build_mem_type_table(void) cp = &cache_policies[cachepolicy]; vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; s2_pgprot = cp->pte_s2; - hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte; + hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte; + s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2; /* * ARMv6 and above have extended page tables. -- cgit v0.10.2 From ca4744084772e1fee999391bee0fcdca5d91a757 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Thu, 6 Feb 2014 19:50:35 +0100 Subject: ARM: 7952/1: mm: Fix the memblock allocation for LPAE machines Commit ad6492b8 added much needed memblock_virt_alloc_low() and further commit 07bacb3 {memblock, bootmem: restore goal for alloc_low} fixed the issue with low memory limit thanks to Yinghai. But even after all these fixes, there is still one case where the limit check done with ARCH_LOW_ADDRESS_LIMIT for low memory fails. Russell pointed out the issue with 32 bit LPAE machines in below thread. https://lkml.org/lkml/2014/1/28/364 Since on some LPAE machines where memory start address is beyond 4GB, the low memory marker in memblock will be set to default ARCH_LOW_ADDRESS_LIMIT which is wrong. We can fix this by letting architectures set the ARCH_LOW_ADDRESS_LIMIT using another export similar to memblock_set_current_limit() but am not sure whether its worth the trouble. Tell me if you think otherwise. Rather am just trying to fix that one broken case using memblock_virt_alloc() in setup code since the memblock.current_limit is updated appropriately makes it work on all ARM 32 bit machines. Cc: Yinghai Lu Cc: Strashko, Grygorii Cc: Andrew Morton Signed-off-by: Santosh Shilimkar Signed-off-by: Russell King diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index b0df976..1e8b030 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -731,7 +731,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) kernel_data.end = virt_to_phys(_end - 1); for_each_memblock(memory, region) { - res = memblock_virt_alloc_low(sizeof(*res), 0); + res = memblock_virt_alloc(sizeof(*res), 0); res->name = "System RAM"; res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; -- cgit v0.10.2 From bae0ca2bc550d1ec6a118fb8f2696f18c4da3d8e Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 7 Feb 2014 19:12:20 +0100 Subject: ARM: 7953/1: mm: ensure TLB invalidation is complete before enabling MMU During __v{6,7}_setup, we invalidate the TLBs since we are about to enable the MMU on return to head.S. Unfortunately, without a subsequent dsb instruction, the invalidation is not guaranteed to have completed by the time we write to the sctlr, potentially exposing us to junk/stale translations cached in the TLB. This patch reworks the init functions so that the dsb used to ensure completion of cache/predictor maintenance is also used to ensure completion of the TLB invalidation. Cc: Reported-by: Albin Tonnerre Signed-off-by: Will Deacon Signed-off-by: Russell King diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 45dc29f..32b3558 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -208,7 +208,6 @@ __v6_setup: mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache - mcr p15, 0, r0, c7, c10, 4 @ drain write buffer #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs mcr p15, 0, r0, c2, c0, 2 @ TTB control register @@ -218,6 +217,8 @@ __v6_setup: ALT_UP(orr r8, r8, #TTB_FLAGS_UP) mcr p15, 0, r8, c2, c0, 1 @ load TTB1 #endif /* CONFIG_MMU */ + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer and + @ complete invalidations adr r5, v6_crval ldmia r5, {r5, r6} ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index bd17819..74f6033 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -351,7 +351,6 @@ __v7_setup: 4: mov r10, #0 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate - dsb #ifdef CONFIG_MMU mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup @@ -360,6 +359,7 @@ __v7_setup: mcr p15, 0, r5, c10, c2, 0 @ write PRRR mcr p15, 0, r6, c10, c2, 1 @ write NMRR #endif + dsb @ Complete invalidations #ifndef CONFIG_ARM_THUMBEE mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE and r0, r0, #(0xf << 12) @ ThumbEE enabled field -- cgit v0.10.2 From 7c8746a9eb287642deaad0e7c2cdf482dce5e4be Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 7 Feb 2014 19:12:32 +0100 Subject: ARM: 7955/1: spinlock: ensure we have a compiler barrier before sev When unlocking a spinlock, we require the following, strictly ordered sequence of events: /* dmb */ /* dsb */ Whilst the code does indeed reflect this in terms of the architecture, the final + have been contracted into a single inline asm without a "memory" clobber, therefore the compiler is at liberty to reorder the unlock to the end of the above sequence. In such a case, a waiting CPU may be woken up before the lock has been unlocked, leading to extremely poor performance. This patch reworks the dsb_sev() function to make use of the dsb() macro and ensure ordering against the unlock. Cc: Reported-by: Mark Rutland Signed-off-by: Will Deacon Signed-off-by: Russell King diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index ef3c607..ac4bfae 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -37,18 +37,9 @@ static inline void dsb_sev(void) { -#if __LINUX_ARM_ARCH__ >= 7 - __asm__ __volatile__ ( - "dsb ishst\n" - SEV - ); -#else - __asm__ __volatile__ ( - "mcr p15, 0, %0, c7, c10, 4\n" - SEV - : : "r" (0) - ); -#endif + + dsb(ishst); + __asm__(SEV); } /* -- cgit v0.10.2 From e83b366487b5582274374f8226e489cb214ae5a6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 11 Feb 2014 17:11:04 +0000 Subject: Fix uses of dma_max_pfn() when converting to a limiting address We must use a 64-bit for this, otherwise overflowed bits get lost, and that can result in a lower than intended value set. Fixes: 8e0cb8a1f6ac ("ARM: 7797/1: mmc: Use dma_max_pfn(dev) helper for bounce_limit calculations") Fixes: 7d35496dd982 ("ARM: 7796/1: scsi: Use dma_max_pfn(dev) helper for bounce_limit calculations") Tested-Acked-by: Santosh Shilimkar Reviewed-by: Ulf Hansson Signed-off-by: Russell King diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 357bbc5..3e049c1 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -197,7 +197,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) - limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; + limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; mq->card = card; mq->queue = blk_init_queue(mmc_request_fn, lock); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 7bd7f0d..62ec84b 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1684,7 +1684,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) host_dev = scsi_get_device(shost); if (host_dev && host_dev->dma_mask) - bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT; + bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT; return bounce_limit; } -- cgit v0.10.2 From 39544ac9df20f73e49fc6b9ac19ff533388c82c0 Mon Sep 17 00:00:00 2001 From: Vinayak Kale Date: Wed, 12 Feb 2014 07:30:01 +0100 Subject: ARM: 7957/1: add DSB after icache flush in __flush_icache_all() Add DSB after icache flush to complete the cache maintenance operation. Signed-off-by: Vinayak Kale Acked-by: Catalin Marinas Cc: Signed-off-by: Russell King diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index e9a49fe..8b8b616 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -212,6 +212,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, static inline void __flush_icache_all(void) { __flush_icache_preferred(); + dsb(); } /* -- cgit v0.10.2