From 3cea71bc6b470372ae407881b87128aadf0afec0 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 2 Dec 2013 16:11:00 +0000 Subject: arm64: ensure completion of TLB invalidatation Currently there is no dsb between the tlbi in __cpu_setup and the write to SCTLR_EL1 which enables the MMU in __turn_mmu_on. This means that the TLB invalidation is not guaranteed to have completed at the point address translation is enabled, leading to a number of possible issues including incorrect translations and TLB conflict faults. This patch moves the tlbi in __cpu_setup above an existing dsb used to synchronise I-cache invalidation, ensuring that the TLBs have been invalidated at the point the MMU is enabled. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Catalin Marinas diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 421b99f..0f7fec5 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -111,12 +111,12 @@ ENTRY(__cpu_setup) bl __flush_dcache_all mov lr, x28 ic iallu // I+BTB cache invalidate + tlbi vmalle1is // invalidate I + D TLBs dsb sy mov x0, #3 << 20 msr cpacr_el1, x0 // Enable FP/ASIMD msr mdscr_el1, xzr // Reset mdscr_el1 - tlbi vmalle1is // invalidate I + D TLBs /* * Memory region attributes for LPAE: * -- cgit v0.10.2 From 62aceb8ff4b3f442575eb7e23629da36020dca77 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 22 Nov 2013 21:07:31 +0000 Subject: arm64: make default NR_CPUS 8 Rather than continue to add per platform defaults, make the default a likely common core count. 8 is also the default for x86. Signed-off-by: Rob Herring Cc: Will Deacon Signed-off-by: Catalin Marinas diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 88c8b6c1..6d4dd22 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -159,8 +159,7 @@ config NR_CPUS range 2 32 depends on SMP # These have to remain sorted largest to smallest - default "8" if ARCH_XGENE - default "4" + default "8" config HOTPLUG_CPU bool "Support for hot-pluggable CPUs" -- cgit v0.10.2 From 85cc00eaa81dfa0f5bf8076c48f3ee2c2c4a77ba Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Mon, 18 Nov 2013 18:56:42 +0000 Subject: arm64: kernel: add code to set cpu boot mode to secondary_entry shim The refactoring of el2_setup split code setting up EL2 and detecting the CPU boot mode in separate chunks. This allows the code that sets up EL2 to run in an endian independent way - ie before the endianess is set up in the respective sctlr registers. This patch brings secondary_entry up-to-date so that CPUs entering the kernel through this code path set-up EL2 and the cpu boot mode properly. Signed-off-by: Lorenzo Pieralisi Acked-by: Mark Rutland Signed-off-by: Catalin Marinas diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 7009387..c68cca5 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -282,8 +282,9 @@ ENDPROC(secondary_holding_pen) * be used where CPUs are brought online dynamically by the kernel. */ ENTRY(secondary_entry) - bl __calc_phys_offset // x2=phys offset bl el2_setup // Drop to EL1 + bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET + bl set_cpu_boot_mode_flag b secondary_startup ENDPROC(secondary_entry) -- cgit v0.10.2 From 2f7dc6027522499582a520807cb9ffda589de47e Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 5 Dec 2013 16:56:50 +0000 Subject: arm64: Fix memory shareability attribute for ioremap_wc/cache Write-combine and cacheable mappings use Normal memory on arm64. On SMP systems, the pte needs the shareability bit which is set in pgprot_default. Use this for defining PROT_DEFAULT used by ioremap_wc and ioremap_cache (Device memory is shareable by default, does not need additional attributes). Signed-off-by: Catalin Marinas diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 4cc813e..5727697 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -229,7 +229,7 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot extern void __iounmap(volatile void __iomem *addr); extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); -#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) +#define PROT_DEFAULT (pgprot_default | PTE_DIRTY) #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) #define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) -- cgit v0.10.2 From db4ed53cfe9f5a00355891a631d47dfa3fd4541f Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Thu, 5 Dec 2013 12:04:51 +0000 Subject: arm64: mm: Fix PMD_SECT_PROT_NONE definition Modify the value of PMD_SECT_PROT_NONE to match that of PTE_NONE. This should have been in commit 3676f9ef5481 (Move PTE_PROT_NONE higher up). Signed-off-by: Steve Capper Cc: # 3.11+: 3676f9ef5481: arm64: Move PTE_PROT_NONE higher up Signed-off-by: Catalin Marinas diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 755f861..b1d2e26 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -43,7 +43,7 @@ * Section */ #define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) -#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 2) +#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 58) #define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */ #define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */ #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) -- cgit v0.10.2