From 20c5ea4fc131dc45c2639653b5b7aeeb2d4d0d1e Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 4 Mar 2016 10:05:39 +0100 Subject: ARM: reintroduce ioremap_cached() for creating cached I/O mappings The original ARM-only ioremap flavor 'ioremap_cached' has been renamed to 'ioremap_cache' to align with other architectures, and subsequently abused in generic code to map things like firmware tables in memory. For that reason, there is currently an effort underway to deprecate ioremap_cache, whose semantics are poorly defined, and which is typed with an __iomem annotation that is inappropriate for mappings of ordinary memory. However, original users of ioremap_cached() used it in a context where the I/O connotation is appropriate, and replacing those instances with memremap() does not make sense. So let's revive ioremap_cached(), so that we can change back those original users before we drop ioremap_cache entirely in favor of memremap. Cc: Russell King Acked-by: Dan Williams Signed-off-by: Ard Biesheuvel diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 4859820..fb94de2 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -395,6 +395,12 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size); void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size); #define ioremap_cache ioremap_cache +/* + * Do not use ioremap_cached in new code. Provided for the benefit of + * the pxa2xx-flash MTD driver only. + */ +void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size); + void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size); #define ioremap_wc ioremap_wc #define ioremap_wt ioremap_wc diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 66a978d..d5350f6 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -380,11 +380,15 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size) EXPORT_SYMBOL(ioremap); void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size) + __alias(ioremap_cached); + +void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size) { return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED, __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap_cache); +EXPORT_SYMBOL(ioremap_cached); void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) { diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 1dd1093..f24967e 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -367,11 +367,15 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size) EXPORT_SYMBOL(ioremap); void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size) + __alias(ioremap_cached); + +void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size) { return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED, __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap_cache); +EXPORT_SYMBOL(ioremap_cached); void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) { -- cgit v0.10.2 From 7769aea2a27db8f12859dcecce37a3da44ab588b Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 25 Feb 2016 08:57:52 +0100 Subject: mtd: pxa2xx-flash: switch back from memremap to ioremap_cached This reverts commit 06968a54790d ("mtd: pxa2xx-flash: switch from ioremap_cache to memremap"), since NOR with memory semantics in array mode and RAM are not necessarily the same thing, and architectures may implement ioremap_cached() and memremap() with different memory attributes. For this reason, ioremap_cached() has been brought back from the dead on the ARM side, so switch this driver back to using it instead of memremap(). Cc: David Woodhouse Acked-by: Brian Norris Acked-by: Dan Williams Signed-off-by: Ard Biesheuvel diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c index 7497090..2cde28e 100644 --- a/drivers/mtd/maps/pxa2xx-flash.c +++ b/drivers/mtd/maps/pxa2xx-flash.c @@ -71,8 +71,8 @@ static int pxa2xx_flash_probe(struct platform_device *pdev) info->map.name); return -ENOMEM; } - info->map.cached = memremap(info->map.phys, info->map.size, - MEMREMAP_WB); + info->map.cached = + ioremap_cached(info->map.phys, info->map.size); if (!info->map.cached) printk(KERN_WARNING "Failed to ioremap cached %s\n", info->map.name); @@ -111,7 +111,7 @@ static int pxa2xx_flash_remove(struct platform_device *dev) map_destroy(info->mtd); iounmap(info->map.virt); if (info->map.cached) - memunmap(info->map.cached); + iounmap(info->map.cached); kfree(info); return 0; } -- cgit v0.10.2 From c269cba35b061181bc23c470809c00e8f71e535a Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 22 Feb 2016 15:02:07 +0100 Subject: memremap: add arch specific hook for MEMREMAP_WB mappings Currently, the memremap code serves MEMREMAP_WB mappings directly from the kernel direct mapping, unless the region is in high memory, in which case it falls back to using ioremap_cache(). However, the semantics of ioremap_cache() are not unambiguously defined, and on ARM, it will actually result in a mapping type that differs from the attributes used for the linear mapping, and for this reason, the ioremap_cache() call fails if the region is part of the memory managed by the kernel. So instead, implement an optional hook 'arch_memremap_wb' whose default implementation calls ioremap_cache() as before, but which can be overridden by the architecture to do what is appropriate for it. Acked-by: Dan Williams Signed-off-by: Ard Biesheuvel diff --git a/kernel/memremap.c b/kernel/memremap.c index a6d3823..0175321 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -27,6 +27,13 @@ __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size) } #endif +#ifndef arch_memremap_wb +static void *arch_memremap_wb(resource_size_t offset, unsigned long size) +{ + return (__force void *)ioremap_cache(offset, size); +} +#endif + static void *try_ram_remap(resource_size_t offset, size_t size) { unsigned long pfn = PHYS_PFN(offset); @@ -34,7 +41,7 @@ static void *try_ram_remap(resource_size_t offset, size_t size) /* In the simple case just return the existing linear address */ if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn))) return __va(offset); - return NULL; /* fallback to ioremap_cache */ + return NULL; /* fallback to arch_memremap_wb */ } /** @@ -90,7 +97,7 @@ void *memremap(resource_size_t offset, size_t size, unsigned long flags) if (is_ram == REGION_INTERSECTS) addr = try_ram_remap(offset, size); if (!addr) - addr = ioremap_cache(offset, size); + addr = arch_memremap_wb(offset, size); } /* -- cgit v0.10.2 From 9ab9e4fce45379cb6a7dbf87cf8f8e6ba01853c2 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 22 Feb 2016 15:02:08 +0100 Subject: ARM: memremap: implement arch_memremap_wb() The generic memremap() falls back to using ioremap_cache() to create MEMREMAP_WB mappings if the requested region is not already covered by the linear mapping, unless the architecture provides an implementation of arch_memremap_wb(). Since ioremap_cache() is not appropriate on ARM to map memory with the same attributes used for the linear mapping, implement arch_memremap_wb() which does exactly that. Also, relax the WARN() check to allow MT_MEMORY_RW mappings of pfn_valid() pages. Cc: Russell King Acked-by: Dan Williams Signed-off-by: Ard Biesheuvel diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index fb94de2..781ef5f 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -392,6 +392,9 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size); #define ioremap ioremap #define ioremap_nocache ioremap +/* + * Do not use ioremap_cache for mapping memory. Use memremap instead. + */ void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size); #define ioremap_cache ioremap_cache @@ -408,6 +411,9 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size); void iounmap(volatile void __iomem *iomem_cookie); #define iounmap iounmap +void *arch_memremap_wb(phys_addr_t phys_addr, size_t size); +#define arch_memremap_wb arch_memremap_wb + /* * io{read,write}{16,32}be() macros */ diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index d5350f6..ff0eed2 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -297,9 +297,10 @@ static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, } /* - * Don't allow RAM to be mapped - this causes problems with ARMv6+ + * Don't allow RAM to be mapped with mismatched attributes - this + * causes problems with ARMv6+ */ - if (WARN_ON(pfn_valid(pfn))) + if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW)) return NULL; area = get_vm_area_caller(size, VM_IOREMAP, caller); @@ -418,6 +419,13 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached) __builtin_return_address(0)); } +void *arch_memremap_wb(phys_addr_t phys_addr, size_t size) +{ + return (__force void *)arch_ioremap_caller(phys_addr, size, + MT_MEMORY_RW, + __builtin_return_address(0)); +} + void __iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index f24967e..ca6d8e8 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -384,6 +384,11 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) } EXPORT_SYMBOL(ioremap_wc); +void *arch_memremap_wb(phys_addr_t phys_addr, size_t size) +{ + return (void *)phys_addr; +} + void __iounmap(volatile void __iomem *addr) { } -- cgit v0.10.2 From 903589ca7165c41d149d902a06006b0f2b975231 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Fri, 1 Apr 2016 10:47:22 +0100 Subject: ARM: 8554/1: kernel: pci: remove pci=firmware command line parameter handling According to kernel documentation, the pci=firmware command line parameter is only meant to be used on IXP2000 ARM platforms to prevent the kernel from assigning PCI resources configured by the bootloader. Since the IXP2000 ARM platforms support has been removed from the kernel in commit: commit c65f2abf54a6 ("ARM: remove ixp23xx and ixp2000 platforms") its platforms specific kernel parameters should be removed too from the kernel documentation along with the kernel code currently handling them in that they have just become obsolete. This patch removes the pci=firmware command line parameter handling from ARM code and the related kernel parameters documentation section. Signed-off-by: Lorenzo Pieralisi Acked-by: Bjorn Helgaas Acked-by: Lennert Buytenhek Cc: Arnd Bergmann Cc: Lennert Buytenhek Cc: Jonathan Corbet Cc: Bjorn Helgaas Cc: Rob Herring Signed-off-by: Russell King diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index ecc74fa..a036762 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2921,11 +2921,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted. for broken drivers that don't call it. skip_isa_align [X86] do not align io start addr, so can handle more pci cards - firmware [ARM] Do not re-enumerate the bus but instead - just use the configuration from the - bootloader. This is currently used on - IXP2000 systems where the bus has to be - configured a certain way for adjunct CPUs. noearly [X86] Don't do any early type 1 scanning. This might help on some broken boards which machine check when some devices' config space diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 066f7f9..05e61a2 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -550,9 +550,6 @@ char * __init pcibios_setup(char *str) if (!strcmp(str, "debug")) { debug_pci = 1; return NULL; - } else if (!strcmp(str, "firmware")) { - pci_add_flags(PCI_PROBE_ONLY); - return NULL; } return str; } -- cgit v0.10.2 From b67dd2e9bdc0f79a3389e57c51189ed2c2127c00 Mon Sep 17 00:00:00 2001 From: Alexandre Courbot Date: Mon, 7 Mar 2016 03:35:56 +0100 Subject: ARM: 8548/1: dma-mapping: remove arm_dma_set_mask() arm_dma_set_mask() implements exactly the same behavior as the fallback that dma_set_mask() takes if the set_dma_mask op is not set. Remove it and use that fallback instead like what is already done for dma_get_mask(). Signed-off-by: Alexandre Courbot Signed-off-by: Russell King diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 6ad1ced..0f2034d 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -162,8 +162,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) static inline void dma_mark_clean(void *addr, size_t size) { } -extern int arm_dma_set_mask(struct device *dev, u64 dma_mask); - /** * arm_dma_alloc - allocate consistent memory for DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index deac58d..bad0c6a 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -190,7 +190,6 @@ struct dma_map_ops arm_dma_ops = { .sync_single_for_device = arm_dma_sync_single_for_device, .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, .sync_sg_for_device = arm_dma_sync_sg_for_device, - .set_dma_mask = arm_dma_set_mask, }; EXPORT_SYMBOL(arm_dma_ops); @@ -209,7 +208,6 @@ struct dma_map_ops arm_coherent_dma_ops = { .get_sgtable = arm_dma_get_sgtable, .map_page = arm_coherent_dma_map_page, .map_sg = arm_dma_map_sg, - .set_dma_mask = arm_dma_set_mask, }; EXPORT_SYMBOL(arm_coherent_dma_ops); @@ -1142,16 +1140,6 @@ int dma_supported(struct device *dev, u64 mask) } EXPORT_SYMBOL(dma_supported); -int arm_dma_set_mask(struct device *dev, u64 dma_mask) -{ - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - - *dev->dma_mask = dma_mask; - - return 0; -} - #define PREALLOC_DMA_DEBUG_ENTRIES 4096 static int __init dma_debug_do_init(void) @@ -2005,8 +1993,6 @@ struct dma_map_ops iommu_ops = { .unmap_sg = arm_iommu_unmap_sg, .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, .sync_sg_for_device = arm_iommu_sync_sg_for_device, - - .set_dma_mask = arm_dma_set_mask, }; struct dma_map_ops iommu_coherent_ops = { @@ -2020,8 +2006,6 @@ struct dma_map_ops iommu_coherent_ops = { .map_sg = arm_coherent_iommu_map_sg, .unmap_sg = arm_coherent_iommu_unmap_sg, - - .set_dma_mask = arm_dma_set_mask, }; /** -- cgit v0.10.2 From b9b74be163a247fcbb3ef18086cc27123539131c Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 1 Apr 2016 14:33:48 +0100 Subject: ARM: 8555/1: kallsyms: ignore ARM mode switching veneers On ARM, the linker may emit veneers to deal with relative branch instructions that appear too far away from their targets. Since the second kallsyms pass results in an increase of the kernel size, it may result in additional veneers to be emitted, potentially affecting the output of kallsyms itself if these symbols are visible to it, and for that reason, symbols whose names end in '_veneer' are ignored explicitly. However, when building Thumb2 kernels, such veneers are named differently if they also incur a mode switch, and since they are not filtered by kallsyms, they may cause the build to fail. So filter symbols whose names end in '_from_arm' or '_from_thumb' as well. Tested-by: Arnd Bergmann Signed-off-by: Ard Biesheuvel Signed-off-by: Russell King diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 638b143e..e287ce6 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c @@ -223,6 +223,8 @@ static int symbol_valid(struct sym_entry *s) static char *special_suffixes[] = { "_veneer", /* arm */ + "_from_arm", /* arm */ + "_from_thumb", /* arm */ NULL }; int i; -- cgit v0.10.2 From d4ffe418195b2d0581dfcda05f366e3b0f23d2a9 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 29 Mar 2016 08:51:48 +0100 Subject: ARM: 8552/1: kallsyms: remove special lower address limit for CONFIG_ARM Now that we no longer emit .stubs symbols into a section VMA loaded at absolute address 0x1000, we can drop the ARM-specific override that sets a lower limit based on CONFIG_PAGE_OFFSET, below which symbols are filtered from the kallsyms output. Acked-by: Nicolas Pitre Acked-by: Chris Brandt Signed-off-by: Ard Biesheuvel Signed-off-by: Russell King diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index 49d61ad..f0f6d9d 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -82,10 +82,6 @@ kallsyms() kallsymopt="${kallsymopt} --all-symbols" fi - if [ -n "${CONFIG_ARM}" ] && [ -z "${CONFIG_XIP_KERNEL}" ] && [ -n "${CONFIG_PAGE_OFFSET}" ]; then - kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET" - fi - if [ -n "${CONFIG_KALLSYMS_ABSOLUTE_PERCPU}" ]; then kallsymopt="${kallsymopt} --absolute-percpu" fi -- cgit v0.10.2 From 2d9586399932dff4746dc25d6498123959d69762 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 29 Mar 2016 08:54:47 +0100 Subject: ARM: 8553/1: kallsyms: remove --page-offset command line option The --page-offset command line option was only used for ARM, to filter symbol addresses below CONFIG_PAGE_OFFSET. This is no longer needed, so remove the functionality altogether. Acked-by: Nicolas Pitre Acked-by: Chris Brandt Signed-off-by: Ard Biesheuvel Signed-off-by: Russell King diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index e287ce6..1f22a18 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c @@ -63,7 +63,6 @@ static unsigned int table_size, table_cnt; static int all_symbols = 0; static int absolute_percpu = 0; static char symbol_prefix_char = '\0'; -static unsigned long long kernel_start_addr = 0; static int base_relative = 0; int token_profit[0x10000]; @@ -230,10 +229,6 @@ static int symbol_valid(struct sym_entry *s) int i; char *sym_name = (char *)s->sym + 1; - - if (s->addr < kernel_start_addr) - return 0; - /* skip prefix char */ if (symbol_prefix_char && *sym_name == symbol_prefix_char) sym_name++; @@ -767,9 +762,6 @@ int main(int argc, char **argv) if ((*p == '"' && *(p+2) == '"') || (*p == '\'' && *(p+2) == '\'')) p++; symbol_prefix_char = *p; - } else if (strncmp(argv[i], "--page-offset=", 14) == 0) { - const char *p = &argv[i][14]; - kernel_start_addr = strtoull(p, NULL, 16); } else if (strcmp(argv[i], "--base-relative") == 0) base_relative = 1; else -- cgit v0.10.2 From 6e8ac724bf45d116195d57fbe3a949f570c35635 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 19 Apr 2016 06:05:09 +0100 Subject: ARM: 8562/1: suppress "include/generated/mach-types.h is up to date." For incremental build, "include/generated/mach-types.h is up to date" is every time displayed like follows: $ make ARCH=arm CHK include/config/kernel.release CHK include/generated/uapi/linux/version.h CHK include/generated/utsrelease.h make[1]: `include/generated/mach-types.h' is up to date. CHK include/generated/bounds.h CHK include/generated/timeconst.h CHK include/generated/asm-offsets.h This commit avoids such a clumsy log and introduces Kbuild standard log style: GEN include/generated/mach-types.h Signed-off-by: Masahiro Yamada Signed-off-by: Russell King diff --git a/arch/arm/tools/Makefile b/arch/arm/tools/Makefile index 32d05c8..6e4cd18 100644 --- a/arch/arm/tools/Makefile +++ b/arch/arm/tools/Makefile @@ -4,7 +4,10 @@ # Copyright (C) 2001 Russell King # -include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types - @$(kecho) ' Generating $@' - @mkdir -p $(dir $@) - $(Q)$(AWK) -f $^ > $@ || { rm -f $@; /bin/false; } +quiet_cmd_gen_mach = GEN $@ + cmd_gen_mach = mkdir -p $(dir $@) && \ + $(AWK) -f $(filter-out $(PHONY),$^) > $@ || \ + { rm -f $@; /bin/false; } + +include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types FORCE + $(call if_changed,gen_mach) -- cgit v0.10.2 From bc5ce155d9d769f86a63a1301fb295ea3fcf22b2 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 5 Apr 2016 03:08:26 +0100 Subject: ARM: 8557/1: specify install, zinstall, and uinstall as PHONY targets Obviously, these are PHONY targets. Signed-off-by: Masahiro Yamada Signed-off-by: Russell King diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile index 48fab15..446705a 100644 --- a/arch/arm/boot/Makefile +++ b/arch/arm/boot/Makefile @@ -88,7 +88,7 @@ $(obj)/bootpImage: $(obj)/bootp/bootp FORCE $(call if_changed,objcopy) @$(kecho) ' Kernel: $@ is ready' -PHONY += initrd +PHONY += initrd install zinstall uinstall initrd: @test "$(INITRD_PHYS)" != "" || \ (echo This machine does not support INITRD; exit -1) -- cgit v0.10.2 From 61603016e2122bf95328321b2f1a64277202b6e3 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 14 Mar 2016 19:34:37 +0000 Subject: ARM: kexec: fix crashkernel= handling When the kernel crashkernel parameter is specified with just a size, we are supposed to allocate a region from RAM to store the crashkernel. However, ARM merely reserves physical address zero with no checking that there is even RAM there. Fix this by lifting similar code from x86, importing it to ARM with the ARM specific parameters added. In the absence of any platform specific information, we allocate the crashkernel region from the first 512MB of physical memory. Update the kdump documentation to reflect this change. Signed-off-by: Russell King Reviewed-by: Pratyush Anand diff --git a/Documentation/kdump/kdump.txt b/Documentation/kdump/kdump.txt index bc4bd5a..88ff63d 100644 --- a/Documentation/kdump/kdump.txt +++ b/Documentation/kdump/kdump.txt @@ -263,12 +263,6 @@ The syntax is: crashkernel=:[,:,...][@offset] range=start-[end] -Please note, on arm, the offset is required. - crashkernel=:[,:,...]@offset - range=start-[end] - - 'start' is inclusive and 'end' is exclusive. - For example: crashkernel=512M-2G:64M,2G-:128M @@ -307,10 +301,9 @@ Boot into System Kernel on the memory consumption of the kdump system. In general this is not dependent on the memory size of the production system. - On arm, use "crashkernel=Y@X". Note that the start address of the kernel - will be aligned to 128MiB (0x08000000), so if the start address is not then - any space below the alignment point may be overwritten by the dump-capture kernel, - which means it is possible that the vmcore is not that precise as expected. + On arm, the use of "crashkernel=Y@X" is no longer necessary; the + kernel will automatically locate the crash kernel image within the + first 512MB of RAM if X is not given. Load the Dump-capture Kernel diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 139791e..77b54c4 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -938,6 +938,13 @@ static int __init init_machine_late(void) late_initcall(init_machine_late); #ifdef CONFIG_KEXEC +/* + * The crash region must be aligned to 128MB to avoid + * zImage relocating below the reserved region. + */ +#define CRASH_ALIGN (128 << 20) +#define CRASH_ADDR_MAX (PHYS_OFFSET + (512 << 20)) + static inline unsigned long long get_total_mem(void) { unsigned long total; @@ -965,6 +972,28 @@ static void __init reserve_crashkernel(void) if (ret) return; + if (crash_base <= 0) { + unsigned long long crash_max = CRASH_ADDR_MAX; + if (crash_max > (u32)~0) + crash_max = (u32)~0; + crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max, + crash_size, CRASH_ALIGN); + if (!crash_base) { + pr_err("crashkernel reservation failed - No suitable area found.\n"); + return; + } + } else { + unsigned long long start; + + start = memblock_find_in_range(crash_base, + crash_base + crash_size, + crash_size, SECTION_SIZE); + if (start != crash_base) { + pr_err("crashkernel reservation failed - memory is in use.\n"); + return; + } + } + ret = memblock_reserve(crash_base, crash_size); if (ret < 0) { pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n", -- cgit v0.10.2 From 981b6714dbd26609212536b9fed43e49db1459cf Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 15 Mar 2016 14:55:03 +0000 Subject: ARM: provide improved virt_to_idmap() functionality For kexec, we need more functionality from the IDMAP system. We need to be able to convert physical addresses to their identity mappped versions as well as virtual addresses. Convert the existing arch_virt_to_idmap() to deal with physical addresses instead. Acked-by: Santosh Shilimkar Signed-off-by: Russell King diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 9427fd6..ca20833 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -288,19 +288,38 @@ static inline void *phys_to_virt(phys_addr_t x) #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) #define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT) -extern unsigned long (*arch_virt_to_idmap)(unsigned long x); +extern long long arch_phys_to_idmap_offset; /* - * These are for systems that have a hardware interconnect supported alias of - * physical memory for idmap purposes. Most cases should leave these + * These are for systems that have a hardware interconnect supported alias + * of physical memory for idmap purposes. Most cases should leave these * untouched. Note: this can only return addresses less than 4GiB. */ +#define IDMAP_INVALID_ADDR ((u32)~0) + +static inline unsigned long phys_to_idmap(phys_addr_t addr) +{ + if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset) { + addr += arch_phys_to_idmap_offset; + if (addr > (u32)~0) + addr = IDMAP_INVALID_ADDR; + } + return addr; +} + +static inline phys_addr_t idmap_to_phys(unsigned long idmap) +{ + phys_addr_t addr = idmap; + + if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset) + addr -= arch_phys_to_idmap_offset; + + return addr; +} + static inline unsigned long __virt_to_idmap(unsigned long x) { - if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap) - return arch_virt_to_idmap(x); - else - return __virt_to_phys(x); + return phys_to_idmap(__virt_to_phys(x)); } #define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x)) diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c index e6b9cb1..a33a296 100644 --- a/arch/arm/mach-keystone/keystone.c +++ b/arch/arm/mach-keystone/keystone.c @@ -63,11 +63,6 @@ static void __init keystone_init(void) of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } -static unsigned long keystone_virt_to_idmap(unsigned long x) -{ - return (phys_addr_t)(x) - CONFIG_PAGE_OFFSET + KEYSTONE_LOW_PHYS_START; -} - static long long __init keystone_pv_fixup(void) { long long offset; @@ -91,7 +86,7 @@ static long long __init keystone_pv_fixup(void) offset = KEYSTONE_HIGH_PHYS_START - KEYSTONE_LOW_PHYS_START; /* Populate the arch idmap hook */ - arch_virt_to_idmap = keystone_virt_to_idmap; + arch_phys_to_idmap_offset = -offset; return offset; } diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index bd274a0..c1a48f8 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -15,7 +15,7 @@ * page tables. */ pgd_t *idmap_pgd; -unsigned long (*arch_virt_to_idmap)(unsigned long x); +long long arch_phys_to_idmap_offset; #ifdef CONFIG_ARM_LPAE static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, -- cgit v0.10.2 From d0506a2395eb07bef248de95016d9a8cf8a465ea Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 1 Apr 2016 14:47:36 +0100 Subject: ARM: kexec: remove 512MB restriction on kexec crashdump The real limit is the top of the visible physical address space with the MMU turned off. Hence, we need to limit the crash kernel allocation running-view physical address of the top of the boot-view physical address space. Reviewed-by: Pratyush Anand Signed-off-by: Russell King diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 77b54c4..d9317ee 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -943,7 +943,6 @@ late_initcall(init_machine_late); * zImage relocating below the reserved region. */ #define CRASH_ALIGN (128 << 20) -#define CRASH_ADDR_MAX (PHYS_OFFSET + (512 << 20)) static inline unsigned long long get_total_mem(void) { @@ -973,9 +972,7 @@ static void __init reserve_crashkernel(void) return; if (crash_base <= 0) { - unsigned long long crash_max = CRASH_ADDR_MAX; - if (crash_max > (u32)~0) - crash_max = (u32)~0; + unsigned long long crash_max = idmap_to_phys((u32)~0); crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max, crash_size, CRASH_ALIGN); if (!crash_base) { -- cgit v0.10.2 From 07a7056ccce3ffdb65908bf502aeb2503714da46 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 15 Mar 2016 15:00:30 +0000 Subject: ARM: provide arm_has_idmap_alias() helper Provide a helper to indicate whether we need to perform special handling for boot identity mapping aliases or not. Signed-off-by: Russell King Reviewed-by: Pratyush Anand diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index ca20833..31c07a2 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -295,6 +295,11 @@ extern long long arch_phys_to_idmap_offset; * of physical memory for idmap purposes. Most cases should leave these * untouched. Note: this can only return addresses less than 4GiB. */ +static inline bool arm_has_idmap_alias(void) +{ + return IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset != 0; +} + #define IDMAP_INVALID_ADDR ((u32)~0) static inline unsigned long phys_to_idmap(phys_addr_t addr) -- cgit v0.10.2 From a41980f2a3eb33ed7a2636e83498b47e95ceb05b Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Thu, 21 Apr 2016 07:58:35 +0100 Subject: ARM: 8566/1: drivers: amba: properly handle devices with power domains To read pid/cid registers, the probed device need to be properly turned on. When it is inside a power domain, the bus code should ensure that the given power domain is enabled before trying to access device's registers. However in some cases power domain (or clocks) might not be yet available. Returning -EPROBE_DEFER is not a solution in such case, because callers don't handle this special error code. Instead such devices are added to the special list and their registration is retried from periodic worker until all resources are available. Signed-off-by: Marek Szyprowski Signed-off-by: Russell King diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index f009936..a5b5c87 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -336,16 +336,7 @@ static void amba_device_release(struct device *dev) kfree(d); } -/** - * amba_device_add - add a previously allocated AMBA device structure - * @dev: AMBA device allocated by amba_device_alloc - * @parent: resource parent for this devices resources - * - * Claim the resource, and read the device cell ID if not already - * initialized. Register the AMBA device with the Linux device - * manager. - */ -int amba_device_add(struct amba_device *dev, struct resource *parent) +static int amba_device_try_add(struct amba_device *dev, struct resource *parent) { u32 size; void __iomem *tmp; @@ -373,6 +364,12 @@ int amba_device_add(struct amba_device *dev, struct resource *parent) goto err_release; } + ret = dev_pm_domain_attach(&dev->dev, true); + if (ret == -EPROBE_DEFER) { + iounmap(tmp); + goto err_release; + } + ret = amba_get_enable_pclk(dev); if (ret == 0) { u32 pid, cid; @@ -398,6 +395,7 @@ int amba_device_add(struct amba_device *dev, struct resource *parent) } iounmap(tmp); + dev_pm_domain_detach(&dev->dev, true); if (ret) goto err_release; @@ -421,6 +419,88 @@ int amba_device_add(struct amba_device *dev, struct resource *parent) err_out: return ret; } + +/* + * Registration of AMBA device require reading its pid and cid registers. + * To do this, the device must be turned on (if it is a part of power domain) + * and have clocks enabled. However in some cases those resources might not be + * yet available. Returning EPROBE_DEFER is not a solution in such case, + * because callers don't handle this special error code. Instead such devices + * are added to the special list and their registration is retried from + * periodic worker, until all resources are available and registration succeeds. + */ +struct deferred_device { + struct amba_device *dev; + struct resource *parent; + struct list_head node; +}; + +static LIST_HEAD(deferred_devices); +static DEFINE_MUTEX(deferred_devices_lock); + +static void amba_deferred_retry_func(struct work_struct *dummy); +static DECLARE_DELAYED_WORK(deferred_retry_work, amba_deferred_retry_func); + +#define DEFERRED_DEVICE_TIMEOUT (msecs_to_jiffies(5 * 1000)) + +static void amba_deferred_retry_func(struct work_struct *dummy) +{ + struct deferred_device *ddev, *tmp; + + mutex_lock(&deferred_devices_lock); + + list_for_each_entry_safe(ddev, tmp, &deferred_devices, node) { + int ret = amba_device_try_add(ddev->dev, ddev->parent); + + if (ret == -EPROBE_DEFER) + continue; + + list_del_init(&ddev->node); + kfree(ddev); + } + + if (!list_empty(&deferred_devices)) + schedule_delayed_work(&deferred_retry_work, + DEFERRED_DEVICE_TIMEOUT); + + mutex_unlock(&deferred_devices_lock); +} + +/** + * amba_device_add - add a previously allocated AMBA device structure + * @dev: AMBA device allocated by amba_device_alloc + * @parent: resource parent for this devices resources + * + * Claim the resource, and read the device cell ID if not already + * initialized. Register the AMBA device with the Linux device + * manager. + */ +int amba_device_add(struct amba_device *dev, struct resource *parent) +{ + int ret = amba_device_try_add(dev, parent); + + if (ret == -EPROBE_DEFER) { + struct deferred_device *ddev; + + ddev = kmalloc(sizeof(*ddev), GFP_KERNEL); + if (!ddev) + return -ENOMEM; + + ddev->dev = dev; + ddev->parent = parent; + ret = 0; + + mutex_lock(&deferred_devices_lock); + + if (list_empty(&deferred_devices)) + schedule_delayed_work(&deferred_retry_work, + DEFERRED_DEVICE_TIMEOUT); + list_add_tail(&ddev->node, &deferred_devices); + + mutex_unlock(&deferred_devices_lock); + } + return ret; +} EXPORT_SYMBOL_GPL(amba_device_add); static struct amba_device * -- cgit v0.10.2 From 953efb2b86cfdad340965222389122cc81cba76e Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Thu, 28 Apr 2016 09:52:44 +0100 Subject: ARM: 8568/1: reboot: remove duplicated local_irq_disable() Once entering machine_halt() and machine_restart(), local_irq_disable() is called, and local irq is kept disabled, so the local_irq_disable() at the end of these two functions are not necessary, remove it. Signed-off-by: Jisheng Zhang Signed-off-by: Russell King diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c index 71a2ff9..3fa867a 100644 --- a/arch/arm/kernel/reboot.c +++ b/arch/arm/kernel/reboot.c @@ -104,8 +104,6 @@ void machine_halt(void) { local_irq_disable(); smp_send_stop(); - - local_irq_disable(); while (1); } @@ -150,6 +148,5 @@ void machine_restart(char *cmd) /* Whoops - the platform was unable to reboot. Tell the user! */ printk("Reboot failed -- System halted\n"); - local_irq_disable(); while (1); } -- cgit v0.10.2 From 204932dfc87c236fc4fea8e6e4f03123853ea633 Mon Sep 17 00:00:00 2001 From: Brad Mouring Date: Thu, 28 Apr 2016 17:00:52 +0100 Subject: ARM: 8569/1: pl2x0: Add OF control of cache power management Add ability to override power management bits of 310 controllers (dynamic clock gating and standby mode) through OF entries. As the saved register is only applied when working on a supported controller, it is safe to save the settings. In order to maintain existing behavior, if the settings are not found in the DT, the corresponding feature will be enabled. Signed-off-by: Brad Mouring Signed-off-by: Russell King diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 9f9d542..c61996c 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -647,11 +647,6 @@ static void __init l2c310_enable(void __iomem *base, unsigned num_lock) aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP); } - /* r3p0 or later has power control register */ - if (rev >= L310_CACHE_ID_RTL_R3P0) - l2x0_saved_regs.pwr_ctrl = L310_DYNAMIC_CLK_GATING_EN | - L310_STNDBY_MODE_EN; - /* * Always enable non-secure access to the lockdown registers - * we write to them as part of the L2C enable sequence so they @@ -1141,6 +1136,7 @@ static void __init l2c310_of_parse(const struct device_node *np, u32 filter[2] = { 0, 0 }; u32 assoc; u32 prefetch; + u32 power; u32 val; int ret; @@ -1271,6 +1267,26 @@ static void __init l2c310_of_parse(const struct device_node *np, } l2x0_saved_regs.prefetch_ctrl = prefetch; + + power = l2x0_saved_regs.pwr_ctrl | + L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN; + + ret = of_property_read_u32(np, "arm,dynamic-clock-gating", &val); + if (!ret) { + if (!val) + power &= ~L310_DYNAMIC_CLK_GATING_EN; + } else if (ret != -EINVAL) { + pr_err("L2C-310 OF dynamic-clock-gating property value is missing or invalid\n"); + } + ret = of_property_read_u32(np, "arm,standby-mode", &val); + if (!ret) { + if (!val) + power &= ~L310_STNDBY_MODE_EN; + } else if (ret != -EINVAL) { + pr_err("L2C-310 OF standby-mode property value is missing or invalid\n"); + } + + l2x0_saved_regs.pwr_ctrl = power; } static const struct l2c_init_data of_l2c310_data __initconst = { -- cgit v0.10.2 From 7274a69cd86f61602a49a3d0b64d29b465f46a15 Mon Sep 17 00:00:00 2001 From: Brad Mouring Date: Wed, 4 May 2016 19:35:25 +0100 Subject: ARM: 8570/2: Documentation: devicetree: Add PL310 PM bindings Document the DT bindings for controlling ARM PL310 Power Control settings. Discussion on the binding wording: http://archive.arm.linux.org.uk/lurker/message/20160427.143444.5141d302.en.html Signed-off-by: Brad Mouring Acked-by: Rob Herring Signed-off-by: Russell King diff --git a/Documentation/devicetree/bindings/arm/l2c2x0.txt b/Documentation/devicetree/bindings/arm/l2c2x0.txt index fe0398c..c453ab5 100644 --- a/Documentation/devicetree/bindings/arm/l2c2x0.txt +++ b/Documentation/devicetree/bindings/arm/l2c2x0.txt @@ -84,6 +84,12 @@ Optional properties: - prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable), <1> (forcibly enable), property absent (retain settings set by firmware) +- arm,dynamic-clock-gating : L2 dynamic clock gating. Value: <0> (forcibly + disable), <1> (forcibly enable), property absent (OS specific behavior, + preferrably retain firmware settings) +- arm,standby-mode: L2 standby mode enable. Value <0> (forcibly disable), + <1> (forcibly enable), property absent (OS specific behavior, + preferrably retain firmware settings) Example: -- cgit v0.10.2 From 6427a840ff6aeaac36c59872b0b4b2040ed26c9b Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 26 Apr 2016 09:11:13 +0100 Subject: ARM: 8567/1: cache-uniphier: activate ways for secondary CPUs This outer cache allows to control active ways independently for each CPU, but currently nothing is done for secondary CPUs. In other words, all the ways are locked for secondary CPUs by default. This commit fixes it to fully bring out the performance of this outer cache. There would be two possible ways to achieve this: [1] Each CPU initializes active ways for itself. This can be done via the SSCLPDAWCR register. This is a banked register, so each CPU sees a different instance of the register for its own. [2] The master CPU initializes active ways for all the CPUs. This is available via SSCDAWCARMR(N) registers, where all instances of SSCLPDAWCR are mirrored. They are mapped at the address SSCDAWCARMR + 4 * N, where N is the CPU number. The outer cache frame work does not support a per-CPU init callback. So this commit adopts [2]; the master CPU iterates over possible CPUs setting up SSCDAWCARMR(N) registers. Signed-off-by: Masahiro Yamada Signed-off-by: Russell King diff --git a/arch/arm/mm/cache-uniphier.c b/arch/arm/mm/cache-uniphier.c index a6fa7b7..c8e2f49 100644 --- a/arch/arm/mm/cache-uniphier.c +++ b/arch/arm/mm/cache-uniphier.c @@ -96,6 +96,7 @@ struct uniphier_cache_data { void __iomem *ctrl_base; void __iomem *rev_base; void __iomem *op_base; + void __iomem *way_ctrl_base; u32 way_present_mask; u32 way_locked_mask; u32 nsets; @@ -256,10 +257,13 @@ static void __init __uniphier_cache_set_locked_ways( struct uniphier_cache_data *data, u32 way_mask) { + unsigned int cpu; + data->way_locked_mask = way_mask & data->way_present_mask; - writel_relaxed(~data->way_locked_mask & data->way_present_mask, - data->ctrl_base + UNIPHIER_SSCLPDAWCR); + for_each_possible_cpu(cpu) + writel_relaxed(~data->way_locked_mask & data->way_present_mask, + data->way_ctrl_base + 4 * cpu); } static void uniphier_cache_maint_range(unsigned long start, unsigned long end, @@ -459,6 +463,8 @@ static int __init __uniphier_cache_init(struct device_node *np, goto err; } + data->way_ctrl_base = data->ctrl_base + 0xc00; + if (*cache_level == 2) { u32 revision = readl(data->rev_base + UNIPHIER_SSCID); /* @@ -467,6 +473,22 @@ static int __init __uniphier_cache_init(struct device_node *np, */ if (revision <= 0x16) data->range_op_max_size = (u32)1 << 22; + + /* + * Unfortunatly, the offset address of active way control base + * varies from SoC to SoC. + */ + switch (revision) { + case 0x11: /* sLD3 */ + data->way_ctrl_base = data->ctrl_base + 0x870; + break; + case 0x12: /* LD4 */ + case 0x16: /* sld8 */ + data->way_ctrl_base = data->ctrl_base + 0x840; + break; + default: + break; + } } data->range_op_max_size -= data->line_size; -- cgit v0.10.2