From bbeb9209515989ff47802d4e5d5702178c8e42c4 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Tue, 25 Aug 2015 13:52:09 +0100 Subject: ARM: 8422/1: enable imprecise aborts during early kernel startup This patch adds imprecise abort enable/disable macros and uses them to enable imprecise aborts early when starting the kernel. This helps in tracking down the real cause for such imprecise abort, as they are handled as soon as they occur. Until now those aborts would only be enabled when entering the userspace and as a consequence crash the first userspace process if any abort had been raised during kernel startup. Signed-off-by: Fabrice Gasnier Signed-off-by: Lucas Stach Signed-off-by: Russell King diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h index 4390814..e6b70d9 100644 --- a/arch/arm/include/asm/irqflags.h +++ b/arch/arm/include/asm/irqflags.h @@ -54,6 +54,14 @@ static inline void arch_local_irq_disable(void) #define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc") #define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc") + +#ifndef CONFIG_CPU_V7M +#define local_abt_enable() __asm__("cpsie a @ __sta" : : : "memory", "cc") +#define local_abt_disable() __asm__("cpsid a @ __cla" : : : "memory", "cc") +#else +#define local_abt_enable() do { } while (0) +#define local_abt_disable() do { } while (0) +#endif #else /* @@ -136,6 +144,8 @@ static inline void arch_local_irq_disable(void) : "memory", "cc"); \ }) +#define local_abt_enable() do { } while (0) +#define local_abt_disable() do { } while (0) #endif /* diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 48185a7..36045e8 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -400,6 +400,7 @@ asmlinkage void secondary_start_kernel(void) local_irq_enable(); local_fiq_enable(); + local_abt_enable(); /* * OK, it's off to the idle thread for us diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 7cd1514..f65a6f3 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1363,6 +1363,9 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) */ local_flush_tlb_all(); flush_cache_all(); + + /* Enable asynchronous aborts */ + local_abt_enable(); } static void __init kmap_init(void) -- cgit v0.10.2 From 4caa9dda388f34f957a9eb52b9f5ef1a8c975c7b Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 26 Aug 2015 07:49:11 +0100 Subject: ARM: 8424/1: add const qualifier to the argument of smp_set_ops() This function just copies '*ops' to 'smp_ops', so the given structure '*ops' is not modified at all. Signed-off-by: Masahiro Yamada Acked-by: Stephen Boyd Signed-off-by: Russell King diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index ef35665..2f8bd44 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -122,6 +122,6 @@ struct of_cpu_method { /* * set platform specific SMP operations */ -extern void smp_set_ops(struct smp_operations *); +extern void smp_set_ops(const struct smp_operations *); #endif /* ifndef __ASM_ARM_SMP_H */ diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 36045e8..7c467c5 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -80,7 +80,7 @@ static DECLARE_COMPLETION(cpu_running); static struct smp_operations smp_ops; -void __init smp_set_ops(struct smp_operations *ops) +void __init smp_set_ops(const struct smp_operations *ops) { if (ops) smp_ops = *ops; -- cgit v0.10.2 From f460b6abdeeafd30c3ee737c843be17b1ceb38e5 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 26 Aug 2015 07:49:12 +0100 Subject: ARM: 8423/1: add const qualifier to smp_operations member in structures The core framework does not modify smp_operations structures. To clarify it, this commit adds 'const' qualifier to the 'ops' member of struct of_cpu_method and the 'smp' member of struct machine_desc. This change allows each SoC code to add 'const' qualifier to its smp_operation structure. Signed-off-by: Masahiro Yamada Signed-off-by: Russell King diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index cb3a407..5c1ad11 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -47,7 +47,7 @@ struct machine_desc { unsigned l2c_aux_val; /* L2 cache aux value */ unsigned l2c_aux_mask; /* L2 cache aux mask */ void (*l2c_write_sec)(unsigned long, unsigned); - struct smp_operations *smp; /* SMP operations */ + const struct smp_operations *smp; /* SMP operations */ bool (*smp_init)(void); void (*fixup)(struct tag *, char **); void (*dt_fixup)(void); diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index 2f8bd44..3d6dc8b 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -112,7 +112,7 @@ struct smp_operations { struct of_cpu_method { const char *method; - struct smp_operations *ops; + const struct smp_operations *ops; }; #define CPU_METHOD_OF_DECLARE(name, _method, _ops) \ -- cgit v0.10.2 From 803e3dbcb4cf80c898faccf01875f6ff6e5e76fd Mon Sep 17 00:00:00 2001 From: Sergey Dyasly Date: Wed, 9 Sep 2015 16:27:18 +0100 Subject: ARM: 8430/1: use default ioremap alignment for SMP or LPAE 16MB alignment for ioremap mappings was added by commit a069c896d0d6 ("[ARM] 3705/1: add supersection support to ioremap()") in order to support supersection mappings. But __arm_ioremap_pfn_caller uses section and supersection mappings only in !SMP && !LPAE case. There is no need for such big alignment if either SMP or LPAE is enabled. After this change, ioremap will use default maximum alignment of 128 pages. Link: https://lkml.kernel.org/g/1419328813-2211-1-git-send-email-d.safonov@partner.samsung.com Cc: Guan Xuetao Cc: Nicolas Pitre Cc: James Bottomley Cc: Will Deacon Cc: Arnd Bergmann Cc: Catalin Marinas Cc: Andrew Morton Cc: Dmitry Safonov Signed-off-by: Sergey Dyasly Signed-off-by: Russell King diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 98d58bb..c79b57b 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -76,10 +76,12 @@ */ #define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff)) +#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) /* * Allow 16MB-aligned ioremap pages */ #define IOREMAP_MAX_ORDER 24 +#endif #else /* CONFIG_MMU */ -- cgit v0.10.2 From 6ff0966052c46efb53980b8a1add2e7b49c9f560 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Sun, 13 Sep 2015 03:25:26 +0100 Subject: ARM: 8432/1: move VMALLOC_END from 0xff000000 to 0xff800000 There is a 12MB unused region in our memory map between the vmalloc and fixmap areas. This became unused with commit e9da6e9905e6, confirmed with commit 64d3b6a3f480. We also have a 8MB guard area before the vmalloc area. With the default 240MB vmalloc area size and the current VMALLOC_END definition, that means the end of low memory ends up at 0xef800000 which is unfortunate for 768MB machines where 8MB of RAM is lost to himem. Let's move VMALLOC_END to 0xff800000 so the guard area won't chop the top of the 768MB low memory area while keeping the default vmalloc area size unchanged and still preserving a gap between the vmalloc and fixmap areas. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King diff --git a/Documentation/arm/memory.txt b/Documentation/arm/memory.txt index 4178ebd..546a390 100644 --- a/Documentation/arm/memory.txt +++ b/Documentation/arm/memory.txt @@ -54,7 +54,7 @@ VMALLOC_START VMALLOC_END-1 vmalloc() / ioremap() space. located here through iotable_init(). VMALLOC_START is based upon the value of the high_memory variable, and VMALLOC_END - is equal to 0xff000000. + is equal to 0xff800000. PAGE_OFFSET high_memory-1 Kernel direct-mapped RAM region. This maps the platforms RAM, and typically diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index f403541..348caab 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -43,7 +43,7 @@ */ #define VMALLOC_OFFSET (8*1024*1024) #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) -#define VMALLOC_END 0xff000000UL +#define VMALLOC_END 0xff800000UL #define LIBRARY_TEXT_START 0x0c000000 -- cgit v0.10.2 From 868e87ccda2461cafd4a0d39f1486eb8f4a9a6f9 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 28 Sep 2015 10:31:50 +0100 Subject: ARM: make RiscPC depend on MMU RiscPC fails to build if MMU is disabled: arch/arm/mach-rpc/ecard.c: In function 'ecard_init_pgtables': arch/arm/mach-rpc/ecard.c:229:2: error: implicit declaration of function 'pgd_offset' [-Werror=implicit-function-declaration] arrange for RiscPC to depend on MMU. Signed-off-by: Russell King diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 72ad724..639411f 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -645,6 +645,7 @@ config ARCH_SHMOBILE_LEGACY config ARCH_RPC bool "RiscPC" + depends on MMU select ARCH_ACORN select ARCH_MAY_HAVE_PC_FDC select ARCH_SPARSEMEM_ENABLE -- cgit v0.10.2 From 63c27ae7981e5c28abfe97ca5f6ef92a5070ce7a Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Fri, 2 Oct 2015 20:33:36 +0100 Subject: ARM: 8434/2: Revert "7655/1: smp_twd: make twd_local_timer_of_register() no-op for nosmp" This reverts commit 904464b91eca8c665acea033489225af02eeb75a. The problem pointed out by commit 904464b91eca ("ARM: 7655/1: smp_twd: make twd_local_timer_of_register() no-op for nosmp") doesn't exist anymore. We can safely boot with nosmp and the warning won't show up. The other side benefit of this patch is that TWD has a chance to probe on single-core A9 systems such as AM437x which sport TWD. While at that, also drop SMP dependency from TWD's Kconfig entry. Cc: Shawn Guo Cc: Dirk Behme Acked-by: Tony Lindgren Signed-off-by: Felipe Balbi Signed-off-by: Russell King diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 72ad724..f7bb1b7 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1410,7 +1410,6 @@ config HAVE_ARM_ARCH_TIMER config HAVE_ARM_TWD bool - depends on SMP select CLKSRC_OF if OF help This options enables support for the ARM timer and watchdog unit diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index e9035cd..662ed88 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -23,7 +23,6 @@ #include #include -#include #include /* set up by the platform code */ @@ -392,9 +391,6 @@ static void __init twd_local_timer_of_register(struct device_node *np) { int err; - if (!is_smp() || !setup_max_cpus) - return; - twd_ppi = irq_of_parse_and_map(np, 0); if (!twd_ppi) { err = -EINVAL; -- cgit v0.10.2 From b97b272e72ae07a04a664685c876439215bdeb74 Mon Sep 17 00:00:00 2001 From: Maninder Singh Date: Mon, 14 Sep 2015 15:59:26 +0100 Subject: ARM: 8436/1: hw_breakpoint: remove unnecessary header Header is not needed for arm/hw_breakpoint.c, so remove the pointless #include. Signed-off-by: Maninder Singh Reviewed-by: Vaneet Narang Signed-off-by: Will Deacon Signed-off-by: Russell King diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index dc7d0a9..6284779 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -35,7 +35,6 @@ #include #include #include -#include #include /* Breakpoint currently in use for each BRP. */ -- cgit v0.10.2 From 6f56a68d0bed16c13e9ec958cd28acbc2991d495 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Tue, 15 Sep 2015 00:11:30 +0100 Subject: ARM: 8438/1: Add unwinding to __clear_user_std() Add unwinding annotations so that unwinding from this function works properly. Signed-off-by: Stephen Boyd Signed-off-by: Russell King diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S index 970d6c0..e936352 100644 --- a/arch/arm/lib/clear_user.S +++ b/arch/arm/lib/clear_user.S @@ -9,6 +9,7 @@ */ #include #include +#include .text @@ -20,6 +21,8 @@ */ ENTRY(__clear_user_std) WEAK(arm_clear_user) +UNWIND(.fnstart) +UNWIND(.save {r1, lr}) stmfd sp!, {r1, lr} mov r2, #0 cmp r1, #4 @@ -44,6 +47,7 @@ WEAK(arm_clear_user) USER( strnebt r2, [r0]) mov r0, #0 ldmfd sp!, {r1, pc} +UNWIND(.fnend) ENDPROC(arm_clear_user) ENDPROC(__clear_user_std) -- cgit v0.10.2 From db695c0509d6ec9046ee5e4c520a19fa17d9fce2 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 21 Sep 2015 19:34:28 +0100 Subject: ARM: remove user cmpxchg syscall Mark Brand reports that a NEEDS_SYSCALL_FOR_CMPXCHG enabled kernel would open a security hole in the ghost syscall used to implement cmpxchg, as it fails to validate the user pointer. However, in order for this option to be enabled, you'd need to be building a pre-ARMv6 kernel with SMP support. There is only one system known which fits that, which is an early ARM SMP FPGA implementation based on the ARM926T. In any case, the Kconfig does not allow SMP to be enabled for pre-ARMv6 systems. Moreover, even if NEEDS_SYSCALL_FOR_CMPXCHG were to be enabled, the kernel would not build as __ARM_NR_cmpxchg64 is not defined. The simple answer is to remove the buggy code. Reported-by: Mark Brand Signed-off-by: Russell King diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 32640c4..8f63daf 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -21,13 +21,6 @@ */ #define __NR_syscalls (388) -/* - * *NOTE*: This is a ghost syscall private to the kernel. Only the - * __kuser_cmpxchg code in entry-armv.S should be aware of its - * existence. Don't ever use this from user code. - */ -#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0) - #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_PAUSE diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 3e1c26e..3ce377f 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -427,8 +427,7 @@ ENDPROC(__fiq_abt) .endm .macro kuser_cmpxchg_check -#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \ - !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) +#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) #ifndef CONFIG_MMU #warning "NPTL on non MMU needs fixing" #else @@ -859,20 +858,7 @@ __kuser_helper_start: __kuser_cmpxchg64: @ 0xffff0f60 -#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) - - /* - * Poor you. No fast solution possible... - * The kernel itself must perform the operation. - * A special ghost syscall is used for that (see traps.c). - */ - stmfd sp!, {r7, lr} - ldr r7, 1f @ it's 20 bits - swi __ARM_NR_cmpxchg64 - ldmfd sp!, {r7, pc} -1: .word __ARM_NR_cmpxchg64 - -#elif defined(CONFIG_CPU_32v6K) +#if defined(CONFIG_CPU_32v6K) stmfd sp!, {r4, r5, r6, r7} ldrd r4, r5, [r0] @ load old val @@ -948,20 +934,7 @@ __kuser_memory_barrier: @ 0xffff0fa0 __kuser_cmpxchg: @ 0xffff0fc0 -#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) - - /* - * Poor you. No fast solution possible... - * The kernel itself must perform the operation. - * A special ghost syscall is used for that (see traps.c). - */ - stmfd sp!, {r7, lr} - ldr r7, 1f @ it's 20 bits - swi __ARM_NR_cmpxchg - ldmfd sp!, {r7, pc} -1: .word __ARM_NR_cmpxchg - -#elif __LINUX_ARM_ARCH__ < 6 +#if __LINUX_ARM_ARCH__ < 6 #ifdef CONFIG_MMU diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 969f9d9..bc69838 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -625,58 +625,6 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) set_tls(regs->ARM_r0); return 0; -#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG - /* - * Atomically store r1 in *r2 if *r2 is equal to r0 for user space. - * Return zero in r0 if *MEM was changed or non-zero if no exchange - * happened. Also set the user C flag accordingly. - * If access permissions have to be fixed up then non-zero is - * returned and the operation has to be re-attempted. - * - * *NOTE*: This is a ghost syscall private to the kernel. Only the - * __kuser_cmpxchg code in entry-armv.S should be aware of its - * existence. Don't ever use this from user code. - */ - case NR(cmpxchg): - for (;;) { - extern void do_DataAbort(unsigned long addr, unsigned int fsr, - struct pt_regs *regs); - unsigned long val; - unsigned long addr = regs->ARM_r2; - struct mm_struct *mm = current->mm; - pgd_t *pgd; pmd_t *pmd; pte_t *pte; - spinlock_t *ptl; - - regs->ARM_cpsr &= ~PSR_C_BIT; - down_read(&mm->mmap_sem); - pgd = pgd_offset(mm, addr); - if (!pgd_present(*pgd)) - goto bad_access; - pmd = pmd_offset(pgd, addr); - if (!pmd_present(*pmd)) - goto bad_access; - pte = pte_offset_map_lock(mm, pmd, addr, &ptl); - if (!pte_present(*pte) || !pte_write(*pte) || !pte_dirty(*pte)) { - pte_unmap_unlock(pte, ptl); - goto bad_access; - } - val = *(unsigned long *)addr; - val -= regs->ARM_r0; - if (val == 0) { - *(unsigned long *)addr = regs->ARM_r1; - regs->ARM_cpsr |= PSR_C_BIT; - } - pte_unmap_unlock(pte, ptl); - up_read(&mm->mmap_sem); - return val; - - bad_access: - up_read(&mm->mmap_sem); - /* simulate a write access fault */ - do_DataAbort(addr, 15 + (1 << 11), regs); - } -#endif - default: /* Calls 9f00xx..9f07ff are defined to return -ENOSYS if not implemented, rather than raising SIGILL. This diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index df7537f..c219413 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -419,28 +419,24 @@ config CPU_THUMBONLY config CPU_32v3 bool select CPU_USE_DOMAINS if MMU - select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select NEED_KUSER_HELPERS select TLS_REG_EMUL if SMP || !MMU config CPU_32v4 bool select CPU_USE_DOMAINS if MMU - select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select NEED_KUSER_HELPERS select TLS_REG_EMUL if SMP || !MMU config CPU_32v4T bool select CPU_USE_DOMAINS if MMU - select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select NEED_KUSER_HELPERS select TLS_REG_EMUL if SMP || !MMU config CPU_32v5 bool select CPU_USE_DOMAINS if MMU - select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select NEED_KUSER_HELPERS select TLS_REG_EMUL if SMP || !MMU @@ -805,14 +801,6 @@ config TLS_REG_EMUL a few prototypes like that in existence) and therefore access to that required register must be emulated. -config NEEDS_SYSCALL_FOR_CMPXCHG - bool - select NEED_KUSER_HELPERS - help - SMP on a pre-ARMv6 processor? Well OK then. - Forget about fast user space cmpxchg support. - It is just not possible. - config NEED_KUSER_HELPERS bool -- cgit v0.10.2 From 371f0f085f629fc0f66695f572373ca4445a67ad Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Fri, 28 Aug 2015 09:41:39 +0100 Subject: ARM: 8426/1: dma-mapping: add missing range check in dma_mmap() dma_mmap() function in IOMMU-based dma-mapping implementation lacked a check for valid range of mmap parameters (offset and buffer size), what might have caused access beyond the allocated buffer. This patch fixes this issue. Signed-off-by: Marek Szyprowski Cc: stable@vger.kernel.org # v3.6+ Signed-off-by: Russell King diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 1a7815e..c086586 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1407,12 +1407,17 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, unsigned long uaddr = vma->vm_start; unsigned long usize = vma->vm_end - vma->vm_start; struct page **pages = __iommu_get_pages(cpu_addr, attrs); + unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; + unsigned long off = vma->vm_pgoff; vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); if (!pages) return -ENXIO; + if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off) + return -ENXIO; + do { int ret = vm_insert_page(vma, uaddr, *pages++); if (ret) { -- cgit v0.10.2 From 7e31210349e9e03a9a4dff31ab5f2bc83e8e84f5 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Fri, 28 Aug 2015 09:42:09 +0100 Subject: ARM: 8427/1: dma-mapping: add support for offset parameter in dma_mmap() IOMMU-based dma_mmap() implementation lacked proper support for offset parameter used in mmap call (it always assumed that mapping starts from offset zero). This patch adds support for offset parameter to IOMMU-based implementation. Signed-off-by: Marek Szyprowski Cc: stable@vger.kernel.org # v3.6+ Signed-off-by: Russell King diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index c086586..ad4eb2d 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1418,6 +1418,8 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off) return -ENXIO; + pages += off; + do { int ret = vm_insert_page(vma, uaddr, *pages++); if (ret) { -- cgit v0.10.2 From 001bf455d20645190beb98ff4ee450dfea1b7eb2 Mon Sep 17 00:00:00 2001 From: Doug Anderson Date: Wed, 2 Sep 2015 03:39:19 +0100 Subject: ARM: 8428/1: kgdb: Fix registers on sleeping tasks Dumping registers from other sleeping tasks in KGDB was totally failing for me. All registers were reported as 0 in many cases. The code was using task_pt_regs(task) to try to get other thread registers. This doesn't appear to be the right place to look. From my tests, I saw non-zero values in this structure when we were looking at a kernel thread that had a userspace task associated with it, but it contained the register values from the userspace task. So even in the cases where registers weren't reported as 0 we were still not showing the right thing. Instead of using task_pt_regs(task) let's use task_thread_info(task). This is the same place that is referred to when doing a dump of all sleeping task stacks (kdb_show_stack() -> show_stack() -> dump_backtrace() -> unwind_backtrace() -> thread_saved_sp()). As further evidence that this is the right thing to do, you can find the following comment in "gdbstub.c" right before it calls sleeping_thread_to_gdb_regs(): Pull stuff saved during switch_to; nothing else is accessible (or even particularly relevant). This should be enough for a stack trace. ...and if you look at switch_to() it only saves r4-r11, sp and lr. Those are the same registers that I'm getting out of the task_thread_info(). With this change you can use "info thread" to see all tasks in the kernel and you can switch to other tasks and examine them in gdb. Signed-off-by: Doug Anderson Tested-by: Stephen Boyd Signed-off-by: Russell King diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index fd9eefc..9232cae 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c @@ -74,7 +74,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) { - struct pt_regs *thread_regs; + struct thread_info *ti; int regno; /* Just making sure... */ @@ -86,24 +86,17 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) gdb_regs[regno] = 0; /* Otherwise, we have only some registers from switch_to() */ - thread_regs = task_pt_regs(task); - gdb_regs[_R0] = thread_regs->ARM_r0; - gdb_regs[_R1] = thread_regs->ARM_r1; - gdb_regs[_R2] = thread_regs->ARM_r2; - gdb_regs[_R3] = thread_regs->ARM_r3; - gdb_regs[_R4] = thread_regs->ARM_r4; - gdb_regs[_R5] = thread_regs->ARM_r5; - gdb_regs[_R6] = thread_regs->ARM_r6; - gdb_regs[_R7] = thread_regs->ARM_r7; - gdb_regs[_R8] = thread_regs->ARM_r8; - gdb_regs[_R9] = thread_regs->ARM_r9; - gdb_regs[_R10] = thread_regs->ARM_r10; - gdb_regs[_FP] = thread_regs->ARM_fp; - gdb_regs[_IP] = thread_regs->ARM_ip; - gdb_regs[_SPT] = thread_regs->ARM_sp; - gdb_regs[_LR] = thread_regs->ARM_lr; - gdb_regs[_PC] = thread_regs->ARM_pc; - gdb_regs[_CPSR] = thread_regs->ARM_cpsr; + ti = task_thread_info(task); + gdb_regs[_R4] = ti->cpu_context.r4; + gdb_regs[_R5] = ti->cpu_context.r5; + gdb_regs[_R6] = ti->cpu_context.r6; + gdb_regs[_R7] = ti->cpu_context.r7; + gdb_regs[_R8] = ti->cpu_context.r8; + gdb_regs[_R9] = ti->cpu_context.r9; + gdb_regs[_R10] = ti->cpu_context.sl; + gdb_regs[_FP] = ti->cpu_context.fp; + gdb_regs[_SPT] = ti->cpu_context.sp; + gdb_regs[_PC] = ti->cpu_context.pc; } void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) -- cgit v0.10.2 From 0768330d46435f324a0b4860c889057524af17c2 Mon Sep 17 00:00:00 2001 From: Daniel Thompson Date: Tue, 22 Sep 2015 17:12:10 +0100 Subject: ARM: 8439/1: Fix backtrace generation when IPI is masked Currently on ARM when is triggered from an interrupt handler (e.g. a SysRq issued using UART or kbd) the main CPU will wedge for ten seconds with interrupts masked before issuing a backtrace for every CPU except itself. The new backtrace code introduced by commit 96f0e00378d4 ("ARM: add basic support for on-demand backtrace of other CPUs") does not work correctly when run from an interrupt handler because IPI_CPU_BACKTRACE is used to generate the backtrace on all CPUs but cannot preempt the current calling context. This can be fixed by detecting that the calling context cannot be preempted and issuing the backtrace directly in this case. Issuing directly leaves us without any pt_regs to pass to nmi_cpu_backtrace() so we also modify the generic code to call dump_stack() when its argument is NULL. Acked-by: Hillf Danton Acked-by: Thomas Gleixner Signed-off-by: Daniel Thompson Signed-off-by: Russell King diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 7c467c5..b263613 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -749,6 +749,15 @@ core_initcall(register_cpufreq_notifier); static void raise_nmi(cpumask_t *mask) { + /* + * Generate the backtrace directly if we are running in a calling + * context that is not preemptible by the backtrace IPI. Note + * that nmi_cpu_backtrace() automatically removes the current cpu + * from mask. + */ + if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled()) + nmi_cpu_backtrace(NULL); + smp_cross_call(mask, IPI_CPU_BACKTRACE); } diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c index 88d3d32..6019c53 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c @@ -43,6 +43,12 @@ static void print_seq_line(struct nmi_seq_buf *s, int start, int end) printk("%.*s", (end - start) + 1, buf); } +/* + * When raise() is called it will be is passed a pointer to the + * backtrace_mask. Architectures that call nmi_cpu_backtrace() + * directly from their raise() functions may rely on the mask + * they are passed being updated as a side effect of this call. + */ void nmi_trigger_all_cpu_backtrace(bool include_self, void (*raise)(cpumask_t *mask)) { @@ -149,7 +155,10 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) /* Replace printk to write into the NMI seq */ this_cpu_write(printk_func, nmi_vprintk); pr_warn("NMI backtrace for cpu %d\n", cpu); - show_regs(regs); + if (regs) + show_regs(regs); + else + dump_stack(); this_cpu_write(printk_func, printk_func_save); cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); -- cgit v0.10.2 From 63ce446c9b5787a94ed875bab20772e1a2b3092f Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Sun, 13 Sep 2015 03:30:11 +0100 Subject: ARM: 8433/1: add a VMSPLIT_3G_OPT config option Mimicking the same config option on x86, this allows for 1GB systems to have their RAM entirely mapped as low memory. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index f7bb1b7..0e74068 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1469,6 +1469,8 @@ choice config VMSPLIT_3G bool "3G/1G user/kernel split" + config VMSPLIT_3G_OPT + bool "3G/1G user/kernel split (for full 1G low memory)" config VMSPLIT_2G bool "2G/2G user/kernel split" config VMSPLIT_1G @@ -1480,6 +1482,7 @@ config PAGE_OFFSET default PHYS_OFFSET if !MMU default 0x40000000 if VMSPLIT_1G default 0x80000000 if VMSPLIT_2G + default 0xB0000000 if VMSPLIT_3G_OPT default 0xC0000000 config NR_CPUS -- cgit v0.10.2 From 9a431bd5a26ca7db8ab251a75e006aa8b145718e Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 25 Jun 2015 10:44:08 +0100 Subject: ARM: make highpte an expert option When highmem is enabled, there's little reason not to also enable highpte support as well. Rather than leaving this to chance, make it an expert option, and default it to be enabled if highmem is enabled. Add some help text to the option while we're here. Signed-off-by: Russell King diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 0e74068..5f48580 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1697,8 +1697,9 @@ config HIGHMEM If unsure, say n. config HIGHPTE - bool "Allocate 2nd-level pagetables from highmem" + bool "Allocate 2nd-level pagetables from highmem" if EXPERT depends on HIGHMEM + default y help The VM uses one page of physical memory for each page table. For systems with a lot of processes, this can use a lot of -- cgit v0.10.2 From ff5138f3af4ed61ed42e00d7d771a0502460488f Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 6 Oct 2015 16:40:53 +0100 Subject: ARM: 8440/1: remove obsolete documentation The Victor target has been removed from mainline long ago. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King diff --git a/Documentation/arm/SA1100/Victor b/Documentation/arm/SA1100/Victor deleted file mode 100644 index 9cff415..0000000 --- a/Documentation/arm/SA1100/Victor +++ /dev/null @@ -1,16 +0,0 @@ -Victor is known as a "digital talking book player" manufactured by -VisuAide, Inc. to be used by blind people. - -For more information related to Victor, see: - - http://www.humanware.com/en-usa/products - -Of course Victor is using Linux as its main operating system. -The Victor implementation for Linux is maintained by Nicolas Pitre: - - nico@visuaide.com - nico@fluxnic.net - -For any comments, please feel free to contact me through the above -addresses. - -- cgit v0.10.2 From 194444c52edd857210b3895e83ce45c64b58e251 Mon Sep 17 00:00:00 2001 From: Marc Gonzalez Date: Fri, 9 Oct 2015 10:20:47 +0100 Subject: ARM: 8441/2: twd: Don't set CLOCK_EVT_FEAT_C3STOP unconditionally In 5388a6b266 ("ARM: SMP: Always enable clock event broadcast support") Russell noted that "the TWD local timers are unable to wake up the CPU when it is placed into a low power mode". However, some platforms do not stop the TWD block in low-power mode, and can thus use the TWD timer in one-shot mode, without setting up a broadcast device. Make the driver check for the "always-on" boolean property, and set the CLOCK_EVT_FEAT_C3STOP flag accordingly. Acked-by: Mark Rutland Signed-off-by: Marc Gonzalez Signed-off-by: Russell King diff --git a/Documentation/devicetree/bindings/arm/twd.txt b/Documentation/devicetree/bindings/arm/twd.txt index 75b8610..383ea19 100644 --- a/Documentation/devicetree/bindings/arm/twd.txt +++ b/Documentation/devicetree/bindings/arm/twd.txt @@ -19,6 +19,11 @@ interrupts. - reg : Specify the base address and the size of the TWD timer register window. +Optional + +- always-on : a boolean property. If present, the timer is powered through + an always-on power domain, therefore it never loses context. + Example: twd-timer@2c000600 { diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 662ed88..4c1aa61 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -33,6 +33,7 @@ static unsigned long twd_timer_rate; static DEFINE_PER_CPU(bool, percpu_setup_called); static struct clock_event_device __percpu *twd_evt; +static int feat_c3stop; static int twd_ppi; static int twd_shutdown(struct clock_event_device *clk) @@ -294,7 +295,7 @@ static void twd_timer_setup(void) clk->name = "local_timer"; clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | - CLOCK_EVT_FEAT_C3STOP; + feat_c3stop; clk->rating = 350; clk->set_state_shutdown = twd_shutdown; clk->set_state_periodic = twd_set_periodic; @@ -349,6 +350,8 @@ static int __init twd_local_timer_common_register(struct device_node *np) goto out_irq; twd_get_clock(np); + if (!of_property_read_bool(np, "always-on")) + feat_c3stop = CLOCK_EVT_FEAT_C3STOP; /* * Immediately configure the timer on the boot CPU, unless we need -- cgit v0.10.2 From e1b8c05dccc7d3de8c49c92c41b2b5ac7d8275b2 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 9 Oct 2015 16:18:09 +0100 Subject: ARM: clean up TWD after previous patch Rename feat_c3stop to twd_features to match the other variables in this file. Initialise it with the standard features that we always support, and arrange to set the CLOCK_EVT_FEAT_C3STOP when appropriate. Signed-off-by: Russell King diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 4c1aa61..1bfa7a7 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -33,7 +33,8 @@ static unsigned long twd_timer_rate; static DEFINE_PER_CPU(bool, percpu_setup_called); static struct clock_event_device __percpu *twd_evt; -static int feat_c3stop; +static unsigned int twd_features = + CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; static int twd_ppi; static int twd_shutdown(struct clock_event_device *clk) @@ -294,8 +295,7 @@ static void twd_timer_setup(void) writel_relaxed(0, twd_base + TWD_TIMER_CONTROL); clk->name = "local_timer"; - clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | - feat_c3stop; + clk->features = twd_features; clk->rating = 350; clk->set_state_shutdown = twd_shutdown; clk->set_state_periodic = twd_set_periodic; @@ -351,7 +351,7 @@ static int __init twd_local_timer_common_register(struct device_node *np) twd_get_clock(np); if (!of_property_read_bool(np, "always-on")) - feat_c3stop = CLOCK_EVT_FEAT_C3STOP; + twd_features |= CLOCK_EVT_FEAT_C3STOP; /* * Immediately configure the timer on the boot CPU, unless we need -- cgit v0.10.2 From e8973a889e69cb86cac08bec2863c878c0d27af9 Mon Sep 17 00:00:00 2001 From: Sarbojit Ganguly Date: Fri, 9 Oct 2015 12:10:02 +0100 Subject: ARM: 8443/1: Adding support for atomic half word exchange Since support for half-word atomic exchange was not there and Qspinlock on ARM requires it, modified __xchg() to add support for that as well. ARMv6 and lower does not support ldrex{b,h} so, added a guard code to prevent build breaks. Signed-off-by: Sarbojit Ganguly Signed-off-by: Russell King diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h index 916a274..97882f9 100644 --- a/arch/arm/include/asm/cmpxchg.h +++ b/arch/arm/include/asm/cmpxchg.h @@ -39,6 +39,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size switch (size) { #if __LINUX_ARM_ARCH__ >= 6 +#ifndef CONFIG_CPU_V6 /* MIN ARCH >= V6K */ case 1: asm volatile("@ __xchg1\n" "1: ldrexb %0, [%3]\n" @@ -49,6 +50,17 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size : "r" (x), "r" (ptr) : "memory", "cc"); break; + case 2: + asm volatile("@ __xchg2\n" + "1: ldrexh %0, [%3]\n" + " strexh %1, %2, [%3]\n" + " teq %1, #0\n" + " bne 1b" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; +#endif case 4: asm volatile("@ __xchg4\n" "1: ldrex %0, [%3]\n" -- cgit v0.10.2 From 8a603f91cc4848ab1a0458bc065aa9f64322e123 Mon Sep 17 00:00:00 2001 From: "H. Nikolaus Schaller" Date: Fri, 16 Oct 2015 22:19:06 +0100 Subject: ARM: 8445/1: fix vdsomunge not to depend on glibc specific byteswap.h If the host toolchain is not glibc based then the arm kernel build fails with HOSTCC arch/arm/vdso/vdsomunge arch/arm/vdso/vdsomunge.c:48:22: fatal error: byteswap.h: No such file or directory Observed: with omap2plus_defconfig and compile on Mac OS X with arm ELF cross-compiler. Reason: byteswap.h is a glibc only header. Solution: replace by private byte-swapping macros (taken from arch/mips/boot/elf2ecoff.c and kindly improved by Russell King) Tested to compile on Mac OS X 10.9.5 host. Signed-off-by: H. Nikolaus Schaller Signed-off-by: Nathan Lynch Signed-off-by: Russell King diff --git a/arch/arm/vdso/vdsomunge.c b/arch/arm/vdso/vdsomunge.c index aedec81..0cebd98 100644 --- a/arch/arm/vdso/vdsomunge.c +++ b/arch/arm/vdso/vdsomunge.c @@ -45,7 +45,6 @@ * it does. */ -#include #include #include #include @@ -59,6 +58,16 @@ #include #include +#define swab16(x) \ + ((((x) & 0x00ff) << 8) | \ + (((x) & 0xff00) >> 8)) + +#define swab32(x) \ + ((((x) & 0x000000ff) << 24) | \ + (((x) & 0x0000ff00) << 8) | \ + (((x) & 0x00ff0000) >> 8) | \ + (((x) & 0xff000000) << 24)) + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define HOST_ORDER ELFDATA2LSB #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ @@ -104,17 +113,17 @@ static void cleanup(void) static Elf32_Word read_elf_word(Elf32_Word word, bool swap) { - return swap ? bswap_32(word) : word; + return swap ? swab32(word) : word; } static Elf32_Half read_elf_half(Elf32_Half half, bool swap) { - return swap ? bswap_16(half) : half; + return swap ? swab16(half) : half; } static void write_elf_word(Elf32_Word val, Elf32_Word *dst, bool swap) { - *dst = swap ? bswap_32(val) : val; + *dst = swap ? swab32(val) : val; } int main(int argc, char **argv) -- cgit v0.10.2 From c8d46ece44458a2088896d6fcae123a72bdfd429 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Mon, 19 Oct 2015 09:17:40 +0100 Subject: ARM: 8446/1: amba: Remove unused callbacks for legacy system PM Signed-off-by: Ulf Hansson Signed-off-by: Russell King diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 50fc668..9006c4e 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -41,8 +41,6 @@ struct amba_driver { int (*probe)(struct amba_device *, const struct amba_id *); int (*remove)(struct amba_device *); void (*shutdown)(struct amba_device *); - int (*suspend)(struct amba_device *, pm_message_t); - int (*resume)(struct amba_device *); const struct amba_id *id_table; }; -- cgit v0.10.2 From 9254970cbbf542a0085e491810f0144a27885702 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Mon, 19 Oct 2015 13:38:09 +0100 Subject: ARM: 8447/1: catch pending imprecise abort on unmask Install a non-faulting handler just before unmasking imprecise aborts and switch back to the regular one after unmasking is done. This catches any pending imprecise abort that the firmware/bootloader may have left behind that would normally crash the kernel at that point. As there are apparently a lot of bootlaoders out there that do such a thing it makes sense to handle it in the common startup code. Signed-off-by: Lucas Stach Tested-by: Tyler Baker Signed-off-by: Russell King diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 0d629b8..daafcf1 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -593,6 +593,28 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) arm_notify_die("", regs, &info, ifsr, 0); } +/* + * Abort handler to be used only during first unmasking of asynchronous aborts + * on the boot CPU. This makes sure that the machine will not die if the + * firmware/bootloader left an imprecise abort pending for us to trip over. + */ +static int __init early_abort_handler(unsigned long addr, unsigned int fsr, + struct pt_regs *regs) +{ + pr_warn("Hit pending asynchronous external abort (FSR=0x%08x) during " + "first unmask, this is most likely caused by a " + "firmware/bootloader bug.\n", fsr); + + return 0; +} + +void __init early_abt_enable(void) +{ + fsr_info[22].fn = early_abort_handler; + local_abt_enable(); + fsr_info[22].fn = do_bad; +} + #ifndef CONFIG_ARM_LPAE static int __init exceptions_init(void) { diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h index cf08bdf..05ec5e0 100644 --- a/arch/arm/mm/fault.h +++ b/arch/arm/mm/fault.h @@ -24,5 +24,6 @@ static inline int fsr_fs(unsigned int fsr) void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs); unsigned long search_exception_table(unsigned long addr); +void early_abt_enable(void); #endif /* __ARCH_ARM_FAULT_H */ diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index f65a6f3..4867f5d 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -38,6 +38,7 @@ #include #include +#include "fault.h" #include "mm.h" #include "tcm.h" @@ -1365,7 +1366,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) flush_cache_all(); /* Enable asynchronous aborts */ - local_abt_enable(); + early_abt_enable(); } static void __init kmap_init(void) -- cgit v0.10.2 From 625faa6a720d26fc0db9e20b48dc0dfe4c8d8ddf Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 20 Oct 2015 11:49:44 +0100 Subject: clkdev: fix clk_add_alias() with a NULL alias device name clk_add_alias() was not correctly handling the case where alias_dev_name was NULL: rather than producing an entry with a NULL dev_id pointer, it would produce a device name of (null). Fix this. Cc: Fixes: 2568999835d7 ("clkdev: add clkdev_create() helper") Reported-by: Aaro Koskinen Tested-by: Aaro Koskinen Signed-off-by: Russell King diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c index c0eaf09..779b6ff 100644 --- a/drivers/clk/clkdev.c +++ b/drivers/clk/clkdev.c @@ -333,7 +333,8 @@ int clk_add_alias(const char *alias, const char *alias_dev_name, if (IS_ERR(r)) return PTR_ERR(r); - l = clkdev_create(r, alias, "%s", alias_dev_name); + l = clkdev_create(r, alias, alias_dev_name ? "%s" : NULL, + alias_dev_name); clk_put(r); return l ? 0 : -ENODEV; -- cgit v0.10.2 From a4283e41bbd80c629770f954cc5d013eb8851229 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 10 Oct 2015 23:41:42 +0200 Subject: arm: add missing of_node_put for_each_child_of_node performs an of_node_get on each iteration, so a break out of the loop requires an of_node_put. The of_node_put is duplicated in front of each error return, because the function contains a later error return that is beyond the end of the for_each_child_of_node and thus doesn't need of_node_put. The semantic patch that fixes this problem is as follows (http://coccinelle.lip6.fr): // @@ expression root,e; local idexpression child; iterator name for_each_child_of_node; @@ for_each_child_of_node(root, child) { ... when != of_node_put(child) when != e = child ( return child; | + of_node_put(child); ? return ...; ) ... } @@ expression root,e; local idexpression child; @@ for_each_child_of_node(root, child) { ... when != of_node_put(child) when != e = child + of_node_put(child); ? break; ... } ... when != child // Additionally, concatenated a string in an affected line to avoid introducing a checkpatch warning. Signed-off-by: Julia Lawall Signed-off-by: Russell King diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index 11c54de..65addcb 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c @@ -101,6 +101,7 @@ void __init arm_dt_init_cpu_maps(void) if (of_property_read_u32(cpu, "reg", &hwid)) { pr_debug(" * %s missing reg property\n", cpu->full_name); + of_node_put(cpu); return; } @@ -108,8 +109,10 @@ void __init arm_dt_init_cpu_maps(void) * 8 MSBs must be set to 0 in the DT since the reg property * defines the MPIDR[23:0]. */ - if (hwid & ~MPIDR_HWID_BITMASK) + if (hwid & ~MPIDR_HWID_BITMASK) { + of_node_put(cpu); return; + } /* * Duplicate MPIDRs are a recipe for disaster. @@ -119,9 +122,11 @@ void __init arm_dt_init_cpu_maps(void) * to avoid matching valid MPIDR[23:0] values. */ for (j = 0; j < cpuidx; j++) - if (WARN(tmp_map[j] == hwid, "Duplicate /cpu reg " - "properties in the DT\n")) + if (WARN(tmp_map[j] == hwid, + "Duplicate /cpu reg properties in the DT\n")) { + of_node_put(cpu); return; + } /* * Build a stashed array of MPIDR values. Numbering scheme @@ -143,6 +148,7 @@ void __init arm_dt_init_cpu_maps(void) "max cores %u, capping them\n", cpuidx, nr_cpu_ids)) { cpuidx = nr_cpu_ids; + of_node_put(cpu); break; } -- cgit v0.10.2 From 38850d786a799c3ff2de0dc1980902c3263698dc Mon Sep 17 00:00:00 2001 From: "H. Nikolaus Schaller" Date: Wed, 28 Oct 2015 19:00:26 +0100 Subject: ARM: 8449/1: fix bug in vdsomunge swab32 macro Commit 8a603f91cc48 ("ARM: 8445/1: fix vdsomunge not to depend on glibc specific byteswap.h") unfortunately introduced a bug created but not found during discussion and patch simplification. Reported-by: Efraim Yawitz Signed-off-by: H. Nikolaus Schaller Fixes: 8a603f91cc48 ("ARM: 8445/1: fix vdsomunge not to depend on glibc specific byteswap.h") Signed-off-by: Nathan Lynch Signed-off-by: Russell King diff --git a/arch/arm/vdso/vdsomunge.c b/arch/arm/vdso/vdsomunge.c index 0cebd98..f645527 100644 --- a/arch/arm/vdso/vdsomunge.c +++ b/arch/arm/vdso/vdsomunge.c @@ -66,7 +66,7 @@ ((((x) & 0x000000ff) << 24) | \ (((x) & 0x0000ff00) << 8) | \ (((x) & 0x00ff0000) >> 8) | \ - (((x) & 0xff000000) << 24)) + (((x) & 0xff000000) >> 24)) #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define HOST_ORDER ELFDATA2LSB -- cgit v0.10.2