From 7c15d9bb8231f998ae7dc0b72415f5215459f7fb Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Tue, 19 Jul 2016 15:00:04 -0700 Subject: mm: Add is_migrate_cma_page Code such as hardened user copy[1] needs a way to tell if a page is CMA or not. Add is_migrate_cma_page in a similar way to is_migrate_isolate_page. [1]http://article.gmane.org/gmane.linux.kernel.mm/155238 Signed-off-by: Laura Abbott Signed-off-by: Kees Cook diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 02069c2..c8478b2 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -68,8 +68,10 @@ extern char * const migratetype_names[MIGRATE_TYPES]; #ifdef CONFIG_CMA # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) +# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA) #else # define is_migrate_cma(migratetype) false +# define is_migrate_cma_page(_page) false #endif #define for_each_migratetype_order(order, type) \ -- cgit v0.10.2 From 0f60a8efe4005ab5e65ce000724b04d4ca04a199 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 12 Jul 2016 16:19:48 -0700 Subject: mm: Implement stack frame object validation This creates per-architecture function arch_within_stack_frames() that should validate if a given object is contained by a kernel stack frame. Initial implementation is on x86. This is based on code from PaX. Signed-off-by: Kees Cook diff --git a/arch/Kconfig b/arch/Kconfig index 1599629..ef86cde 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -424,6 +424,15 @@ config CC_STACKPROTECTOR_STRONG endchoice +config HAVE_ARCH_WITHIN_STACK_FRAMES + bool + help + An architecture should select this if it can walk the kernel stack + frames to determine if an object is part of either the arguments + or local variables (i.e. that it excludes saved return addresses, + and similar) by implementing an inline arch_within_stack_frames(), + which is used by CONFIG_HARDENED_USERCOPY. + config HAVE_CONTEXT_TRACKING bool help diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d9a94da..a2865dd 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -91,6 +91,7 @@ config X86 select HAVE_ARCH_SOFT_DIRTY if X86_64 select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select HAVE_ARCH_WITHIN_STACK_FRAMES select HAVE_EBPF_JIT if X86_64 select HAVE_CC_STACKPROTECTOR select HAVE_CMPXCHG_DOUBLE diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 30c133a..ab386f1 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -180,6 +180,50 @@ static inline unsigned long current_stack_pointer(void) return sp; } +/* + * Walks up the stack frames to make sure that the specified object is + * entirely contained by a single stack frame. + * + * Returns: + * 1 if within a frame + * -1 if placed across a frame boundary (or outside stack) + * 0 unable to determine (no frame pointers, etc) + */ +static inline int arch_within_stack_frames(const void * const stack, + const void * const stackend, + const void *obj, unsigned long len) +{ +#if defined(CONFIG_FRAME_POINTER) + const void *frame = NULL; + const void *oldframe; + + oldframe = __builtin_frame_address(1); + if (oldframe) + frame = __builtin_frame_address(2); + /* + * low ----------------------------------------------> high + * [saved bp][saved ip][args][local vars][saved bp][saved ip] + * ^----------------^ + * allow copies only within here + */ + while (stack <= frame && frame < stackend) { + /* + * If obj + len extends past the last frame, this + * check won't pass and the next frame will be 0, + * causing us to bail out and correctly report + * the copy as invalid. + */ + if (obj + len <= frame) + return obj >= oldframe + 2 * sizeof(void *) ? 1 : -1; + oldframe = frame; + frame = *(const void * const *)frame; + } + return -1; +#else + return 0; +#endif +} + #else /* !__ASSEMBLY__ */ #ifdef CONFIG_X86_64 diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index b4c2a48..3d5c80b 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -146,6 +146,15 @@ static inline bool test_and_clear_restore_sigmask(void) #error "no set_restore_sigmask() provided and default one won't work" #endif +#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES +static inline int arch_within_stack_frames(const void * const stack, + const void * const stackend, + const void *obj, unsigned long len) +{ + return 0; +} +#endif + #endif /* __KERNEL__ */ #endif /* _LINUX_THREAD_INFO_H */ -- cgit v0.10.2 From f5509cc18daa7f82bcc553be70df2117c8eedc16 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 7 Jun 2016 11:05:33 -0700 Subject: mm: Hardened usercopy This is the start of porting PAX_USERCOPY into the mainline kernel. This is the first set of features, controlled by CONFIG_HARDENED_USERCOPY. The work is based on code by PaX Team and Brad Spengler, and an earlier port from Casey Schaufler. Additional non-slab page tests are from Rik van Riel. This patch contains the logic for validating several conditions when performing copy_to_user() and copy_from_user() on the kernel object being copied to/from: - address range doesn't wrap around - address range isn't NULL or zero-allocated (with a non-zero copy size) - if on the slab allocator: - object size must be less than or equal to copy size (when check is implemented in the allocator, which appear in subsequent patches) - otherwise, object must not span page allocations (excepting Reserved and CMA ranges) - if on the stack - object must not extend before/after the current process stack - object must be contained by a valid stack frame (when there is arch/build support for identifying stack frames) - object must not overlap with kernel text Signed-off-by: Kees Cook Tested-by: Valdis Kletnieks Tested-by: Michael Ellerman diff --git a/include/linux/slab.h b/include/linux/slab.h index aeb3e6d..96a16a3 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -155,6 +155,18 @@ void kfree(const void *); void kzfree(const void *); size_t ksize(const void *); +#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR +const char *__check_heap_object(const void *ptr, unsigned long n, + struct page *page); +#else +static inline const char *__check_heap_object(const void *ptr, + unsigned long n, + struct page *page) +{ + return NULL; +} +#endif + /* * Some archs want to perform DMA into kmalloc caches and need a guaranteed * alignment larger than the alignment of a 64-bit integer. diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 3d5c80b..f24b99e 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -155,6 +155,21 @@ static inline int arch_within_stack_frames(const void * const stack, } #endif +#ifdef CONFIG_HARDENED_USERCOPY +extern void __check_object_size(const void *ptr, unsigned long n, + bool to_user); + +static inline void check_object_size(const void *ptr, unsigned long n, + bool to_user) +{ + __check_object_size(ptr, n, to_user); +} +#else +static inline void check_object_size(const void *ptr, unsigned long n, + bool to_user) +{ } +#endif /* CONFIG_HARDENED_USERCOPY */ + #endif /* __KERNEL__ */ #endif /* _LINUX_THREAD_INFO_H */ diff --git a/mm/Makefile b/mm/Makefile index 78c6f7d..32d3724 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -21,6 +21,9 @@ KCOV_INSTRUMENT_memcontrol.o := n KCOV_INSTRUMENT_mmzone.o := n KCOV_INSTRUMENT_vmstat.o := n +# Since __builtin_frame_address does work as used, disable the warning. +CFLAGS_usercopy.o += $(call cc-disable-warning, frame-address) + mmu-y := nommu.o mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \ mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \ @@ -99,3 +102,4 @@ obj-$(CONFIG_USERFAULTFD) += userfaultfd.o obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o +obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o diff --git a/mm/usercopy.c b/mm/usercopy.c new file mode 100644 index 0000000..8ebae91 --- /dev/null +++ b/mm/usercopy.c @@ -0,0 +1,268 @@ +/* + * This implements the various checks for CONFIG_HARDENED_USERCOPY*, + * which are designed to protect kernel memory from needless exposure + * and overwrite under many unintended conditions. This code is based + * on PAX_USERCOPY, which is: + * + * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source + * Security Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include + +enum { + BAD_STACK = -1, + NOT_STACK = 0, + GOOD_FRAME, + GOOD_STACK, +}; + +/* + * Checks if a given pointer and length is contained by the current + * stack frame (if possible). + * + * Returns: + * NOT_STACK: not at all on the stack + * GOOD_FRAME: fully within a valid stack frame + * GOOD_STACK: fully on the stack (when can't do frame-checking) + * BAD_STACK: error condition (invalid stack position or bad stack frame) + */ +static noinline int check_stack_object(const void *obj, unsigned long len) +{ + const void * const stack = task_stack_page(current); + const void * const stackend = stack + THREAD_SIZE; + int ret; + + /* Object is not on the stack at all. */ + if (obj + len <= stack || stackend <= obj) + return NOT_STACK; + + /* + * Reject: object partially overlaps the stack (passing the + * the check above means at least one end is within the stack, + * so if this check fails, the other end is outside the stack). + */ + if (obj < stack || stackend < obj + len) + return BAD_STACK; + + /* Check if object is safely within a valid frame. */ + ret = arch_within_stack_frames(stack, stackend, obj, len); + if (ret) + return ret; + + return GOOD_STACK; +} + +static void report_usercopy(const void *ptr, unsigned long len, + bool to_user, const char *type) +{ + pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n", + to_user ? "exposure" : "overwrite", + to_user ? "from" : "to", ptr, type ? : "unknown", len); + /* + * For greater effect, it would be nice to do do_group_exit(), + * but BUG() actually hooks all the lock-breaking and per-arch + * Oops code, so that is used here instead. + */ + BUG(); +} + +/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */ +static bool overlaps(const void *ptr, unsigned long n, unsigned long low, + unsigned long high) +{ + unsigned long check_low = (uintptr_t)ptr; + unsigned long check_high = check_low + n; + + /* Does not overlap if entirely above or entirely below. */ + if (check_low >= high || check_high < low) + return false; + + return true; +} + +/* Is this address range in the kernel text area? */ +static inline const char *check_kernel_text_object(const void *ptr, + unsigned long n) +{ + unsigned long textlow = (unsigned long)_stext; + unsigned long texthigh = (unsigned long)_etext; + unsigned long textlow_linear, texthigh_linear; + + if (overlaps(ptr, n, textlow, texthigh)) + return ""; + + /* + * Some architectures have virtual memory mappings with a secondary + * mapping of the kernel text, i.e. there is more than one virtual + * kernel address that points to the kernel image. It is usually + * when there is a separate linear physical memory mapping, in that + * __pa() is not just the reverse of __va(). This can be detected + * and checked: + */ + textlow_linear = (unsigned long)__va(__pa(textlow)); + /* No different mapping: we're done. */ + if (textlow_linear == textlow) + return NULL; + + /* Check the secondary mapping... */ + texthigh_linear = (unsigned long)__va(__pa(texthigh)); + if (overlaps(ptr, n, textlow_linear, texthigh_linear)) + return ""; + + return NULL; +} + +static inline const char *check_bogus_address(const void *ptr, unsigned long n) +{ + /* Reject if object wraps past end of memory. */ + if (ptr + n < ptr) + return ""; + + /* Reject if NULL or ZERO-allocation. */ + if (ZERO_OR_NULL_PTR(ptr)) + return ""; + + return NULL; +} + +static inline const char *check_heap_object(const void *ptr, unsigned long n, + bool to_user) +{ + struct page *page, *endpage; + const void *end = ptr + n - 1; + bool is_reserved, is_cma; + + /* + * Some architectures (arm64) return true for virt_addr_valid() on + * vmalloced addresses. Work around this by checking for vmalloc + * first. + */ + if (is_vmalloc_addr(ptr)) + return NULL; + + if (!virt_addr_valid(ptr)) + return NULL; + + page = virt_to_head_page(ptr); + + /* Check slab allocator for flags and size. */ + if (PageSlab(page)) + return __check_heap_object(ptr, n, page); + + /* + * Sometimes the kernel data regions are not marked Reserved (see + * check below). And sometimes [_sdata,_edata) does not cover + * rodata and/or bss, so check each range explicitly. + */ + + /* Allow reads of kernel rodata region (if not marked as Reserved). */ + if (ptr >= (const void *)__start_rodata && + end <= (const void *)__end_rodata) { + if (!to_user) + return ""; + return NULL; + } + + /* Allow kernel data region (if not marked as Reserved). */ + if (ptr >= (const void *)_sdata && end <= (const void *)_edata) + return NULL; + + /* Allow kernel bss region (if not marked as Reserved). */ + if (ptr >= (const void *)__bss_start && + end <= (const void *)__bss_stop) + return NULL; + + /* Is the object wholly within one base page? */ + if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) == + ((unsigned long)end & (unsigned long)PAGE_MASK))) + return NULL; + + /* Allow if start and end are inside the same compound page. */ + endpage = virt_to_head_page(end); + if (likely(endpage == page)) + return NULL; + + /* + * Reject if range is entirely either Reserved (i.e. special or + * device memory), or CMA. Otherwise, reject since the object spans + * several independently allocated pages. + */ + is_reserved = PageReserved(page); + is_cma = is_migrate_cma_page(page); + if (!is_reserved && !is_cma) + goto reject; + + for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) { + page = virt_to_head_page(ptr); + if (is_reserved && !PageReserved(page)) + goto reject; + if (is_cma && !is_migrate_cma_page(page)) + goto reject; + } + + return NULL; + +reject: + return ""; +} + +/* + * Validates that the given object is: + * - not bogus address + * - known-safe heap or stack object + * - not in kernel text + */ +void __check_object_size(const void *ptr, unsigned long n, bool to_user) +{ + const char *err; + + /* Skip all tests if size is zero. */ + if (!n) + return; + + /* Check for invalid addresses. */ + err = check_bogus_address(ptr, n); + if (err) + goto report; + + /* Check for bad heap object. */ + err = check_heap_object(ptr, n, to_user); + if (err) + goto report; + + /* Check for bad stack object. */ + switch (check_stack_object(ptr, n)) { + case NOT_STACK: + /* Object is not touching the current process stack. */ + break; + case GOOD_FRAME: + case GOOD_STACK: + /* + * Object is either in the correct frame (when it + * is possible to check) or just generally on the + * process stack (when frame checking not available). + */ + return; + default: + err = ""; + goto report; + } + + /* Check for object in kernel to avoid text exposure. */ + err = check_kernel_text_object(ptr, n); + if (!err) + return; + +report: + report_usercopy(ptr, n, to_user, err); +} +EXPORT_SYMBOL(__check_object_size); diff --git a/security/Kconfig b/security/Kconfig index 176758c..df28f2b 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -118,6 +118,34 @@ config LSM_MMAP_MIN_ADDR this low address space will need the permission specific to the systems running LSM. +config HAVE_HARDENED_USERCOPY_ALLOCATOR + bool + help + The heap allocator implements __check_heap_object() for + validating memory ranges against heap object sizes in + support of CONFIG_HARDENED_USERCOPY. + +config HAVE_ARCH_HARDENED_USERCOPY + bool + help + The architecture supports CONFIG_HARDENED_USERCOPY by + calling check_object_size() just before performing the + userspace copies in the low level implementation of + copy_to_user() and copy_from_user(). + +config HARDENED_USERCOPY + bool "Harden memory copies between kernel and userspace" + depends on HAVE_ARCH_HARDENED_USERCOPY + select BUG + help + This option checks for obviously wrong memory regions when + copying memory to/from the kernel (via copy_to_user() and + copy_from_user() functions) by rejecting memory ranges that + are larger than the specified heap object, span multiple + separately allocates pages, are not on the process stack, + or are part of the kernel text. This kills entire classes + of heap overflow exploits and similar kernel memory exposures. + source security/selinux/Kconfig source security/smack/Kconfig source security/tomoyo/Kconfig -- cgit v0.10.2 From 5b710f34e194c6b7710f69fdb5d798fdf35b98c1 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 23 Jun 2016 15:04:01 -0700 Subject: x86/uaccess: Enable hardened usercopy Enables CONFIG_HARDENED_USERCOPY checks on x86. This is done both in copy_*_user() and __copy_*_user() because copy_*_user() actually calls down to _copy_*_user() and not __copy_*_user(). Based on code from PaX and grsecurity. Signed-off-by: Kees Cook Tested-by: Valdis Kletnieks diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a2865dd..9640942 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -80,6 +80,7 @@ config X86 select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_AOUT if X86_32 select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_HARDENED_USERCOPY select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 2982387..d3312f0 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -742,9 +742,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n) * case, and do only runtime checking for non-constant sizes. */ - if (likely(sz < 0 || sz >= n)) + if (likely(sz < 0 || sz >= n)) { + check_object_size(to, n, false); n = _copy_from_user(to, from, n); - else if(__builtin_constant_p(n)) + } else if (__builtin_constant_p(n)) copy_from_user_overflow(); else __copy_from_user_overflow(sz, n); @@ -762,9 +763,10 @@ copy_to_user(void __user *to, const void *from, unsigned long n) might_fault(); /* See the comment in copy_from_user() above. */ - if (likely(sz < 0 || sz >= n)) + if (likely(sz < 0 || sz >= n)) { + check_object_size(from, n, true); n = _copy_to_user(to, from, n); - else if(__builtin_constant_p(n)) + } else if (__builtin_constant_p(n)) copy_to_user_overflow(); else __copy_to_user_overflow(sz, n); diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 4b32da24..7d3bdd1 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -37,6 +37,7 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero static __always_inline unsigned long __must_check __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) { + check_object_size(from, n, true); return __copy_to_user_ll(to, from, n); } @@ -95,6 +96,7 @@ static __always_inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { might_fault(); + check_object_size(to, n, false); if (__builtin_constant_p(n)) { unsigned long ret; diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 2eac2aa..673059a 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -54,6 +54,7 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) { int ret = 0; + check_object_size(dst, size, false); if (!__builtin_constant_p(size)) return copy_user_generic(dst, (__force void *)src, size); switch (size) { @@ -119,6 +120,7 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size) { int ret = 0; + check_object_size(src, size, true); if (!__builtin_constant_p(size)) return copy_user_generic((__force void *)dst, src, size); switch (size) { -- cgit v0.10.2 From dfd45b6103c973bfcea2341d89e36faf947dbc33 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 23 Jun 2016 15:06:53 -0700 Subject: ARM: uaccess: Enable hardened usercopy Enables CONFIG_HARDENED_USERCOPY checks on arm. Based on code from PaX and grsecurity. Signed-off-by: Kees Cook diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 90542db..f56b29b 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -35,6 +35,7 @@ config ARM select HARDIRQS_SW_RESEND select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT) select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 + select HAVE_ARCH_HARDENED_USERCOPY select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU select HAVE_ARCH_MMAP_RND_BITS if MMU diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 35c9db8..7fb5919 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -496,7 +496,10 @@ arm_copy_from_user(void *to, const void __user *from, unsigned long n); static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n) { - unsigned int __ua_flags = uaccess_save_and_enable(); + unsigned int __ua_flags; + + check_object_size(to, n, false); + __ua_flags = uaccess_save_and_enable(); n = arm_copy_from_user(to, from, n); uaccess_restore(__ua_flags); return n; @@ -511,11 +514,15 @@ static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) { #ifndef CONFIG_UACCESS_WITH_MEMCPY - unsigned int __ua_flags = uaccess_save_and_enable(); + unsigned int __ua_flags; + + check_object_size(from, n, true); + __ua_flags = uaccess_save_and_enable(); n = arm_copy_to_user(to, from, n); uaccess_restore(__ua_flags); return n; #else + check_object_size(from, n, true); return arm_copy_to_user(to, from, n); #endif } -- cgit v0.10.2 From faf5b63e294151d6ac24ca6906d6f221bd3496cd Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 23 Jun 2016 15:59:42 -0700 Subject: arm64/uaccess: Enable hardened usercopy Enables CONFIG_HARDENED_USERCOPY checks on arm64. As done by KASAN in -next, renames the low-level functions to __arch_copy_*_user() so a static inline can do additional work before the copy. Signed-off-by: Kees Cook diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 5a0a691..9cdb232 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -51,6 +51,7 @@ config ARM64 select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_BITREVERSE + select HAVE_ARCH_HARDENED_USERCOPY select HAVE_ARCH_HUGE_VMAP select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 9e397a5..92848b0 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -256,24 +256,39 @@ do { \ -EFAULT; \ }) -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); +extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); +extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n); extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n) +{ + check_object_size(to, n, false); + return __arch_copy_from_user(to, from, n); +} + +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) +{ + check_object_size(from, n, true); + return __arch_copy_to_user(to, from, n); +} + static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { - if (access_ok(VERIFY_READ, from, n)) - n = __copy_from_user(to, from, n); - else /* security hole - plug it */ + if (access_ok(VERIFY_READ, from, n)) { + check_object_size(to, n, false); + n = __arch_copy_from_user(to, from, n); + } else /* security hole - plug it */ memset(to, 0, n); return n; } static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) { - if (access_ok(VERIFY_WRITE, to, n)) - n = __copy_to_user(to, from, n); + if (access_ok(VERIFY_WRITE, to, n)) { + check_object_size(from, n, true); + n = __arch_copy_to_user(to, from, n); + } return n; } diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index 678f30b0..2dc4440 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -34,8 +34,8 @@ EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(clear_page); /* user mem (segment) */ -EXPORT_SYMBOL(__copy_from_user); -EXPORT_SYMBOL(__copy_to_user); +EXPORT_SYMBOL(__arch_copy_from_user); +EXPORT_SYMBOL(__arch_copy_to_user); EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__copy_in_user); diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 17e8306..0b90497 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -66,7 +66,7 @@ .endm end .req x5 -ENTRY(__copy_from_user) +ENTRY(__arch_copy_from_user) ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \ CONFIG_ARM64_PAN) add end, x0, x2 @@ -75,7 +75,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \ CONFIG_ARM64_PAN) mov x0, #0 // Nothing to copy ret -ENDPROC(__copy_from_user) +ENDPROC(__arch_copy_from_user) .section .fixup,"ax" .align 2 diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index 21faae6..7a7efe2 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -65,7 +65,7 @@ .endm end .req x5 -ENTRY(__copy_to_user) +ENTRY(__arch_copy_to_user) ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \ CONFIG_ARM64_PAN) add end, x0, x2 @@ -74,7 +74,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \ CONFIG_ARM64_PAN) mov x0, #0 ret -ENDPROC(__copy_to_user) +ENDPROC(__arch_copy_to_user) .section .fixup,"ax" .align 2 -- cgit v0.10.2 From 73d35887e24da77e8d1321b2e92bd9b9128e2fc2 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 23 Jun 2016 15:09:50 -0700 Subject: ia64/uaccess: Enable hardened usercopy Enables CONFIG_HARDENED_USERCOPY checks on ia64. Based on code from PaX and grsecurity. Signed-off-by: Kees Cook diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index e109ee9..7451f1d 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -53,6 +53,7 @@ config IA64 select MODULES_USE_ELF_RELA select ARCH_USE_CMPXCHG_LOCKREF select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_HARDENED_USERCOPY default y help The Itanium Processor Family is Intel's 64-bit successor to diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 2189d5d..465c709 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -241,12 +241,18 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use static inline unsigned long __copy_to_user (void __user *to, const void *from, unsigned long count) { + if (!__builtin_constant_p(count)) + check_object_size(from, count, true); + return __copy_user(to, (__force void __user *) from, count); } static inline unsigned long __copy_from_user (void *to, const void __user *from, unsigned long count) { + if (!__builtin_constant_p(count)) + check_object_size(to, count, false); + return __copy_user((__force void __user *) to, from, count); } @@ -258,8 +264,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) const void *__cu_from = (from); \ long __cu_len = (n); \ \ - if (__access_ok(__cu_to, __cu_len, get_fs())) \ - __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ + if (__access_ok(__cu_to, __cu_len, get_fs())) { \ + if (!__builtin_constant_p(n)) \ + check_object_size(__cu_from, __cu_len, true); \ + __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ + } \ __cu_len; \ }) @@ -270,8 +279,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) long __cu_len = (n); \ \ __chk_user_ptr(__cu_from); \ - if (__access_ok(__cu_from, __cu_len, get_fs())) \ + if (__access_ok(__cu_from, __cu_len, get_fs())) { \ + if (!__builtin_constant_p(n)) \ + check_object_size(__cu_to, __cu_len, false); \ __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ + } \ __cu_len; \ }) -- cgit v0.10.2 From 1d3c1324746fed0e34a5b94d3ed303e7521ed603 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 23 Jun 2016 15:10:01 -0700 Subject: powerpc/uaccess: Enable hardened usercopy Enables CONFIG_HARDENED_USERCOPY checks on powerpc. Based on code from PaX and grsecurity. Signed-off-by: Kees Cook Tested-by: Michael Ellerman diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 0a9d439..0ac48a8 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -164,6 +164,7 @@ config PPC select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS + select HAVE_ARCH_HARDENED_USERCOPY config GENERIC_CSUM def_bool CPU_LITTLE_ENDIAN diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index b7c20f0..c1dc6c1 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -310,10 +310,15 @@ static inline unsigned long copy_from_user(void *to, { unsigned long over; - if (access_ok(VERIFY_READ, from, n)) + if (access_ok(VERIFY_READ, from, n)) { + if (!__builtin_constant_p(n)) + check_object_size(to, n, false); return __copy_tofrom_user((__force void __user *)to, from, n); + } if ((unsigned long)from < TASK_SIZE) { over = (unsigned long)from + n - TASK_SIZE; + if (!__builtin_constant_p(n - over)) + check_object_size(to, n - over, false); return __copy_tofrom_user((__force void __user *)to, from, n - over) + over; } @@ -325,10 +330,15 @@ static inline unsigned long copy_to_user(void __user *to, { unsigned long over; - if (access_ok(VERIFY_WRITE, to, n)) + if (access_ok(VERIFY_WRITE, to, n)) { + if (!__builtin_constant_p(n)) + check_object_size(from, n, true); return __copy_tofrom_user(to, (__force void __user *)from, n); + } if ((unsigned long)to < TASK_SIZE) { over = (unsigned long)to + n - TASK_SIZE; + if (!__builtin_constant_p(n)) + check_object_size(from, n - over, true); return __copy_tofrom_user(to, (__force void __user *)from, n - over) + over; } @@ -372,6 +382,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to, if (ret == 0) return 0; } + + if (!__builtin_constant_p(n)) + check_object_size(to, n, false); + return __copy_tofrom_user((__force void __user *)to, from, n); } @@ -398,6 +412,9 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to, if (ret == 0) return 0; } + if (!__builtin_constant_p(n)) + check_object_size(from, n, true); + return __copy_tofrom_user(to, (__force const void __user *)from, n); } -- cgit v0.10.2 From 9d9208a15800f9f06f102f9aac1e8b323c3b8575 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 23 Jun 2016 15:10:13 -0700 Subject: sparc/uaccess: Enable hardened usercopy Enables CONFIG_HARDENED_USERCOPY checks on sparc. Based on code from PaX and grsecurity. Signed-off-by: Kees Cook diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 546293d..59b0960 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -43,6 +43,7 @@ config SPARC select OLD_SIGSUSPEND select ARCH_HAS_SG_CHAIN select CPU_NO_EFFICIENT_FFS + select HAVE_ARCH_HARDENED_USERCOPY config SPARC32 def_bool !64BIT diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 57aca27..341a5a1 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -248,22 +248,28 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) { - if (n && __access_ok((unsigned long) to, n)) + if (n && __access_ok((unsigned long) to, n)) { + if (!__builtin_constant_p(n)) + check_object_size(from, n, true); return __copy_user(to, (__force void __user *) from, n); - else + } else return n; } static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) { + if (!__builtin_constant_p(n)) + check_object_size(from, n, true); return __copy_user(to, (__force void __user *) from, n); } static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { - if (n && __access_ok((unsigned long) from, n)) + if (n && __access_ok((unsigned long) from, n)) { + if (!__builtin_constant_p(n)) + check_object_size(to, n, false); return __copy_user((__force void __user *) to, from, n); - else + } else return n; } diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index e9a51d6..8bda94f 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -210,8 +210,12 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from, static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long size) { - unsigned long ret = ___copy_from_user(to, from, size); + unsigned long ret; + if (!__builtin_constant_p(size)) + check_object_size(to, size, false); + + ret = ___copy_from_user(to, from, size); if (unlikely(ret)) ret = copy_from_user_fixup(to, from, size); @@ -227,8 +231,11 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from, static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long size) { - unsigned long ret = ___copy_to_user(to, from, size); + unsigned long ret; + if (!__builtin_constant_p(size)) + check_object_size(from, size, true); + ret = ___copy_to_user(to, from, size); if (unlikely(ret)) ret = copy_to_user_fixup(to, from, size); return ret; -- cgit v0.10.2 From 97433ea4fda62349bfa42089455593cbcb57e06c Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 7 Jul 2016 11:38:39 -0700 Subject: s390/uaccess: Enable hardened usercopy Enables CONFIG_HARDENED_USERCOPY checks on s390. Signed-off-by: Kees Cook diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index a8c2590..9f69431 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -122,6 +122,7 @@ config S390 select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_EARLY_PFN_TO_NID + select HAVE_ARCH_HARDENED_USERCOPY select HAVE_ARCH_JUMP_LABEL select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES select HAVE_ARCH_SECCOMP_FILTER diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index ae4de55..6986c20 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c @@ -104,6 +104,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr, unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { + check_object_size(to, n, false); if (static_branch_likely(&have_mvcos)) return copy_from_user_mvcos(to, from, n); return copy_from_user_mvcp(to, from, n); @@ -177,6 +178,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x, unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) { + check_object_size(from, n, true); if (static_branch_likely(&have_mvcos)) return copy_to_user_mvcos(to, from, n); return copy_to_user_mvcs(to, from, n); -- cgit v0.10.2 From 04385fc5e8fffed84425d909a783c0f0c587d847 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 23 Jun 2016 15:20:59 -0700 Subject: mm: SLAB hardened usercopy support Under CONFIG_HARDENED_USERCOPY, this adds object size checking to the SLAB allocator to catch any copies that may span objects. Based on code from PaX and grsecurity. Signed-off-by: Kees Cook Tested-by: Valdis Kletnieks diff --git a/init/Kconfig b/init/Kconfig index c02d897..1312d7b 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1758,6 +1758,7 @@ choice config SLAB bool "SLAB" + select HAVE_HARDENED_USERCOPY_ALLOCATOR help The regular slab allocator that is established and known to work well in all environments. It organizes cache hot objects in diff --git a/mm/slab.c b/mm/slab.c index cc8bbc1..5e2d5f3 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4477,6 +4477,36 @@ static int __init slab_proc_init(void) module_init(slab_proc_init); #endif +#ifdef CONFIG_HARDENED_USERCOPY +/* + * Rejects objects that are incorrectly sized. + * + * Returns NULL if check passes, otherwise const char * to name of cache + * to indicate an error. + */ +const char *__check_heap_object(const void *ptr, unsigned long n, + struct page *page) +{ + struct kmem_cache *cachep; + unsigned int objnr; + unsigned long offset; + + /* Find and validate object. */ + cachep = page->slab_cache; + objnr = obj_to_index(cachep, page, (void *)ptr); + BUG_ON(objnr >= cachep->num); + + /* Find offset within object. */ + offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep); + + /* Allow address range falling entirely within object size. */ + if (offset <= cachep->object_size && n <= cachep->object_size - offset) + return NULL; + + return cachep->name; +} +#endif /* CONFIG_HARDENED_USERCOPY */ + /** * ksize - get the actual amount of memory allocated for a given object * @objp: Pointer to the object -- cgit v0.10.2 From ed18adc1cdd00a5c55a20fbdaed4804660772281 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 23 Jun 2016 15:24:05 -0700 Subject: mm: SLUB hardened usercopy support Under CONFIG_HARDENED_USERCOPY, this adds object size checking to the SLUB allocator to catch any copies that may span objects. Includes a redzone handling fix discovered by Michael Ellerman. Based on code from PaX and grsecurity. Signed-off-by: Kees Cook Tested-by: Michael Ellerman Reviwed-by: Laura Abbott diff --git a/init/Kconfig b/init/Kconfig index 1312d7b..0c84706 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1766,6 +1766,7 @@ config SLAB config SLUB bool "SLUB (Unqueued Allocator)" + select HAVE_HARDENED_USERCOPY_ALLOCATOR help SLUB is a slab allocator that minimizes cache line usage instead of managing queues of cached objects (SLAB approach). diff --git a/mm/slub.c b/mm/slub.c index 825ff45..256a8ef 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3614,6 +3614,46 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) EXPORT_SYMBOL(__kmalloc_node); #endif +#ifdef CONFIG_HARDENED_USERCOPY +/* + * Rejects objects that are incorrectly sized. + * + * Returns NULL if check passes, otherwise const char * to name of cache + * to indicate an error. + */ +const char *__check_heap_object(const void *ptr, unsigned long n, + struct page *page) +{ + struct kmem_cache *s; + unsigned long offset; + size_t object_size; + + /* Find object and usable object size. */ + s = page->slab_cache; + object_size = slab_ksize(s); + + /* Reject impossible pointers. */ + if (ptr < page_address(page)) + return s->name; + + /* Find offset within object. */ + offset = (ptr - page_address(page)) % s->size; + + /* Adjust for redzone and reject if within the redzone. */ + if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) { + if (offset < s->red_left_pad) + return s->name; + offset -= s->red_left_pad; + } + + /* Allow address range falling entirely within object size. */ + if (offset <= object_size && n <= object_size - offset) + return NULL; + + return s->name; +} +#endif /* CONFIG_HARDENED_USERCOPY */ + static size_t __ksize(const void *object) { struct page *page; -- cgit v0.10.2 From 8053f21675b073b379cbca258ee4a3f3850dfa94 Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Tue, 26 Jul 2016 14:55:51 -0700 Subject: ASoC: dapm: Add a dummy snd_pcm_runtime to avoid NULL pointer access The SND_SOC_DAPM_PRE_PMU case would call startup()/hw_params() that might access substream->runtime through other functions. For example: Unable to handle kernel NULL pointer dereference at virtual address [....] PC is at snd_pcm_hw_rule_add+0x24/0x1b0 LR is at snd_pcm_hw_constraint_list+0x20/0x28 [....] Process arecord (pid: 424, stack limit = 0xffffffc1ecaf0020) Call trace: [] snd_pcm_hw_rule_add+0x24/0x1b0 [] snd_pcm_hw_constraint_list+0x20/0x28 [] cs53l30_pcm_startup+0x24/0x30 [] snd_soc_dai_link_event+0x290/0x354 [] dapm_seq_check_event.isra.31+0x134/0x2c8 [] dapm_seq_run_coalesced+0x94/0x1c8 [] dapm_seq_run+0xa4/0x404 [] dapm_power_widgets+0x524/0x984 [] snd_soc_dapm_stream_event+0x8c/0xa8 [] soc_pcm_prepare+0x10c/0x1ec [] snd_pcm_do_prepare+0x1c/0x38 [] snd_pcm_action_single+0x40/0x88 [] snd_pcm_action_nonatomic+0x70/0x90 [] snd_pcm_common_ioctl1+0xb6c/0xdd8 [] snd_pcm_capture_ioctl1+0x200/0x334 [] snd_pcm_ioctl_compat+0x648/0x95c [] compat_SyS_ioctl+0xac/0xfc4 [] el0_svc_naked+0x24/0x28 ---[ end trace 0dc4f99c2759c35c ]--- So this patch adds a dummy runtime for the original dummy substream to merely avoid the NULL pointer access. Signed-off-by: Nicolin Chen Signed-off-by: Mark Brown diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 8698c26..d908ff8 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -3493,6 +3493,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w, const struct snd_soc_pcm_stream *config = w->params + w->params_select; struct snd_pcm_substream substream; struct snd_pcm_hw_params *params = NULL; + struct snd_pcm_runtime *runtime = NULL; u64 fmt; int ret; @@ -3541,6 +3542,14 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w, memset(&substream, 0, sizeof(substream)); + /* Allocate a dummy snd_pcm_runtime for startup() and other ops() */ + runtime = kzalloc(sizeof(*runtime), GFP_KERNEL); + if (!runtime) { + ret = -ENOMEM; + goto out; + } + substream.runtime = runtime; + switch (event) { case SND_SOC_DAPM_PRE_PMU: substream.stream = SNDRV_PCM_STREAM_CAPTURE; @@ -3606,6 +3615,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w, } out: + kfree(runtime); kfree(params); return ret; } -- cgit v0.10.2 From 93ca33c99f22a0a096f83f19c9f887aeb67507a1 Mon Sep 17 00:00:00 2001 From: Hiroyuki Yokoyama Date: Mon, 25 Jul 2016 01:52:43 +0000 Subject: ASoC: rsnd: Fixup SRCm_IFSVR calculate method This patch fixes the calculation accuracy degradation of SRCm_IFSVR register value. Signed-off-by: Hiroyuki Yokoyama Signed-off-by: Kuninori Morimoto Signed-off-by: Mark Brown diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c index e39f916..969a516 100644 --- a/sound/soc/sh/rcar/src.c +++ b/sound/soc/sh/rcar/src.c @@ -226,8 +226,12 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io, ifscr = 0; fsrate = 0; if (fin != fout) { + u64 n; + ifscr = 1; - fsrate = 0x0400000 / fout * fin; + n = (u64)0x0400000 * fin; + do_div(n, fout); + fsrate = n; } /* -- cgit v0.10.2 From 784bdf3bb694b256fcd6120b93e8947a84249a3a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 29 Jul 2016 16:32:30 +0200 Subject: futex: Assume all mappings are private on !MMU systems To quote Rick why there is no need for shared mapping on !MMU systems: |With MMU, shared futex keys need to identify the physical backing for |a memory address because it may be mapped at different addresses in |different processes (or even multiple times in the same process). |Without MMU this cannot happen. You only have physical addresses. So |the "private futex" behavior of using the virtual address as the key |is always correct (for both shared and private cases) on nommu |systems. This patch disables the FLAGS_SHARED in a way that allows the compiler to remove that code. [bigeasy: Added changelog ] Reported-by: Rich Felker Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior Cc: Andrew Morton Link: http://lkml.kernel.org/r/20160729143230.GA21715@linutronix.de Signed-off-by: Thomas Gleixner diff --git a/kernel/futex.c b/kernel/futex.c index 33664f7..46cb3a3 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -179,7 +179,15 @@ int __read_mostly futex_cmpxchg_enabled; * Futex flags used to encode options to functions and preserve them across * restarts. */ -#define FLAGS_SHARED 0x01 +#ifdef CONFIG_MMU +# define FLAGS_SHARED 0x01 +#else +/* + * NOMMU does not have per process address space. Let the compiler optimize + * code away. + */ +# define FLAGS_SHARED 0x00 +#endif #define FLAGS_CLOCKRT 0x02 #define FLAGS_HAS_TIMEOUT 0x04 @@ -405,6 +413,16 @@ static void get_futex_key_refs(union futex_key *key) if (!key->both.ptr) return; + /* + * On MMU less systems futexes are always "private" as there is no per + * process address space. We need the smp wmb nevertheless - yes, + * arch/blackfin has MMU less SMP ... + */ + if (!IS_ENABLED(CONFIG_MMU)) { + smp_mb(); /* explicit smp_mb(); (B) */ + return; + } + switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { case FUT_OFF_INODE: ihold(key->shared.inode); /* implies smp_mb(); (B) */ @@ -436,6 +454,9 @@ static void drop_futex_key_refs(union futex_key *key) return; } + if (!IS_ENABLED(CONFIG_MMU)) + return; + switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { case FUT_OFF_INODE: iput(key->shared.inode); -- cgit v0.10.2 From 3b2c1710fac7fb278b760d1545e637cbb5ea5b5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 13 Jul 2016 16:32:03 +0300 Subject: drm/i915: Wait up to 3ms for the pcu to ack the cdclk change request on SKL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bspec tells us to keep bashing the PCU for up to 3ms when trying to inform it about an upcoming change in the cdclk frequency. Currently we only keep at it for 15*10usec (+ whatever delays gets added by the sandybridge_pcode_read() itself). Let's change the limit to 3ms. I decided to keep 10 usec delay per iteration for now, even though the spec doesn't really tell us to do that. Cc: stable@vger.kernel.org Fixes: 5d96d8afcfbb ("drm/i915/skl: Deinit/init the display at suspend/resume") Cc: David Weinehall Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1468416723-23440-1-git-send-email-ville.syrjala@linux.intel.com Tested-by: David Weinehall Reviewed-by: Chris Wilson (cherry picked from commit 848496e5902833600f7992f4faa82dc1546051ba) Signed-off-by: Daniel Vetter diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9f56ac1..e5c46a6 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5691,15 +5691,7 @@ static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv) static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv) { - unsigned int i; - - for (i = 0; i < 15; i++) { - if (skl_cdclk_pcu_ready(dev_priv)) - return true; - udelay(10); - } - - return false; + return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0; } static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco) -- cgit v0.10.2 From a7b4667a00025ac28300737c868bd4818b6d8c4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Mon, 23 May 2016 17:42:48 +0300 Subject: drm/i915: Never fully mask the the EI up rps interrupt on SNB/IVB MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit SNB (and IVB too I suppose) starts to misbehave if the GPU gets stuck in an infinite batch buffer loop. The GPU apparently hogs something critical and CPUs start to lose interrupts and whatnot. We can keep the system limping along by unmasking some interrupts in GEN6_PMINTRMSK. The EI up interrupt has been previously chosen for that task, so let's never mask it. v2: s/gen6_rps_pm_mask/gen6_sanitize_rps_pm_mask/ (Chris) Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=93122 Signed-off-by: Ville Syrjälä Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/1464014568-4529-1-git-send-email-ville.syrjala@linux.intel.com Cc: stable@vger.kernel.org (cherry picked from commit 12c100bfa5d9103b6c4d43636fee09c31e75605a) Signed-off-by: Daniel Vetter diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 5a8ee0c..5ca68d2 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4892,7 +4892,8 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) else gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); dev_priv->rps.last_adj = 0; - I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); + I915_WRITE(GEN6_PMINTRMSK, + gen6_sanitize_rps_pm_mask(dev_priv, ~0)); } mutex_unlock(&dev_priv->rps.hw_lock); -- cgit v0.10.2 From 0596a9048bf2aca2a74b312493f39e4d5ac3b653 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Tue, 14 Jun 2016 14:18:27 +0100 Subject: Btrfs: add missing check for writeback errors on fsync When we start an fsync we start ordered extents for all delalloc ranges. However before attempting to log the inode, we only wait for those ordered extents if we are not doing a full sync (bit BTRFS_INODE_NEEDS_FULL_SYNC is set in the inode's flags). This means that if an ordered extent completes with an IO error before we check if we can skip logging the inode, we will not catch and report the IO error to user space. This is because on an IO error, when the ordered extent completes we do not update the inode, so if the inode was not previously updated by the current transaction we end up not logging it through calls to fsync and therefore not check its mapping flags for the presence of IO errors. Fix this by checking for errors in the flags of the inode's mapping when we notice we can skip logging the inode. This caused sporadic failures in the test generic/331 (which explicitly tests for IO errors during an fsync call). Signed-off-by: Filipe Manana Reviewed-by: Liu Bo diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index bcfb4a2..6f04910 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -2033,6 +2033,14 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) */ clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); + /* + * An ordered extent might have started before and completed + * already with io errors, in which case the inode was not + * updated and we end up here. So check the inode's mapping + * flags for any errors that might have happened while doing + * writeback of file data. + */ + ret = btrfs_inode_check_errors(inode); inode_unlock(inode); goto out; } -- cgit v0.10.2 From 801bec365e0e19f2ba066cd3e25a67dee21b4aae Mon Sep 17 00:00:00 2001 From: Robbie Ko Date: Tue, 23 Jun 2015 18:39:46 +0800 Subject: Btrfs: send, fix failure to move directories with the same name around When doing an incremental send we can end up not moving directories that have the same name. This happens when the same parent directory has different child directories with the same name in the parent and send snapshots. For example, consider the following scenario: Parent snapshot: . (ino 256) |---- d/ (ino 257) | |--- p1/ (ino 258) | |---- p1/ (ino 259) Send snapshot: . (ino 256) |--- d/ (ino 257) |--- p1/ (ino 259) |--- p1/ (ino 258) The directory named "d" (inode 257) has in both snapshots an entry with the name "p1" but it refers to different inodes in both snapshots (inode 258 in the parent snapshot and inode 259 in the send snapshot). When attempting to move inode 258, the operation is delayed because its new parent, inode 259, was not yet moved/renamed (as the stream is currently processing inode 258). Then when processing inode 259, we also end up delaying its move/rename operation so that it happens after inode 258 is moved/renamed. This decision to delay the move/rename rename operation of inode 259 is due to the fact that the new parent inode (257) still has inode 258 as its child, which has the same name has inode 259. So we end up with inode 258 move/rename operation waiting for inode's 259 move/rename operation, which in turn it waiting for inode's 258 move/rename. This results in ending the send stream without issuing move/rename operations for inodes 258 and 259 and generating the following warnings in syslog/dmesg: [148402.979747] ------------[ cut here ]------------ [148402.980588] WARNING: CPU: 14 PID: 4117 at fs/btrfs/send.c:6177 btrfs_ioctl_send+0xe03/0xe51 [btrfs] [148402.981928] Modules linked in: btrfs crc32c_generic xor raid6_pq acpi_cpufreq tpm_tis ppdev tpm parport_pc psmouse parport sg pcspkr i2c_piix4 i2c_core evdev processor serio_raw button loop autofs4 ext4 crc16 jbd2 mbcache sr_mod cdrom sd_mod ata_generic virtio_scsi ata_piix libata virtio_pci virtio_ring virtio e1000 scsi_mod floppy [last unloaded: btrfs] [148402.986999] CPU: 14 PID: 4117 Comm: btrfs Tainted: G W 4.6.0-rc7-btrfs-next-31+ #1 [148402.988136] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS by qemu-project.org 04/01/2014 [148402.988136] 0000000000000000 ffff88022139fca8 ffffffff8126b42c 0000000000000000 [148402.988136] 0000000000000000 ffff88022139fce8 ffffffff81052b14 000018212139fac8 [148402.988136] ffff88022b0db400 0000000000000000 0000000000000001 0000000000000000 [148402.988136] Call Trace: [148402.988136] [] dump_stack+0x67/0x90 [148402.988136] [] __warn+0xc2/0xdd [148402.988136] [] warn_slowpath_null+0x1d/0x1f [148402.988136] [] btrfs_ioctl_send+0xe03/0xe51 [btrfs] [148402.988136] [] btrfs_ioctl+0x14f/0x1f81 [btrfs] [148402.988136] [] ? arch_local_irq_save+0x9/0xc [148402.988136] [] ? __lock_is_held+0x3c/0x57 [148402.988136] [] vfs_ioctl+0x18/0x34 [148402.988136] [] do_vfs_ioctl+0x550/0x5be [148402.988136] [] ? __fget+0x6b/0x77 [148402.988136] [] ? __fget_light+0x62/0x71 [148402.988136] [] SyS_ioctl+0x57/0x79 [148402.988136] [] entry_SYSCALL_64_fastpath+0x18/0xa8 [148402.988136] [] ? trace_hardirqs_off_caller+0x3f/0xaa [148403.011373] ---[ end trace a4539270c8056f8b ]--- [148403.012296] ------------[ cut here ]------------ [148403.013071] WARNING: CPU: 14 PID: 4117 at fs/btrfs/send.c:6194 btrfs_ioctl_send+0xe19/0xe51 [btrfs] [148403.014447] Modules linked in: btrfs crc32c_generic xor raid6_pq acpi_cpufreq tpm_tis ppdev tpm parport_pc psmouse parport sg pcspkr i2c_piix4 i2c_core evdev processor serio_raw button loop autofs4 ext4 crc16 jbd2 mbcache sr_mod cdrom sd_mod ata_generic virtio_scsi ata_piix libata virtio_pci virtio_ring virtio e1000 scsi_mod floppy [last unloaded: btrfs] [148403.019708] CPU: 14 PID: 4117 Comm: btrfs Tainted: G W 4.6.0-rc7-btrfs-next-31+ #1 [148403.020104] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS by qemu-project.org 04/01/2014 [148403.020104] 0000000000000000 ffff88022139fca8 ffffffff8126b42c 0000000000000000 [148403.020104] 0000000000000000 ffff88022139fce8 ffffffff81052b14 000018322139fac8 [148403.020104] ffff88022b0db400 0000000000000000 0000000000000001 0000000000000000 [148403.020104] Call Trace: [148403.020104] [] dump_stack+0x67/0x90 [148403.020104] [] __warn+0xc2/0xdd [148403.020104] [] warn_slowpath_null+0x1d/0x1f [148403.020104] [] btrfs_ioctl_send+0xe19/0xe51 [btrfs] [148403.020104] [] btrfs_ioctl+0x14f/0x1f81 [btrfs] [148403.020104] [] ? arch_local_irq_save+0x9/0xc [148403.020104] [] ? __lock_is_held+0x3c/0x57 [148403.020104] [] vfs_ioctl+0x18/0x34 [148403.020104] [] do_vfs_ioctl+0x550/0x5be [148403.020104] [] ? __fget+0x6b/0x77 [148403.020104] [] ? __fget_light+0x62/0x71 [148403.020104] [] SyS_ioctl+0x57/0x79 [148403.020104] [] entry_SYSCALL_64_fastpath+0x18/0xa8 [148403.020104] [] ? trace_hardirqs_off_caller+0x3f/0xaa [148403.038981] ---[ end trace a4539270c8056f8c ]--- There's another issue caused by similar (but more complex) changes in the directory hierarchy that makes move/rename operations fail, described with the following example: Parent snapshot: . |---- a/ (ino 262) | |---- c/ (ino 268) | |---- d/ (ino 263) |---- ance/ (ino 267) |---- e/ (ino 264) |---- f/ (ino 265) |---- ance/ (ino 266) Send snapshot: . |---- a/ (ino 262) |---- c/ (ino 268) | |---- ance/ (ino 267) | |---- d/ (ino 263) | |---- ance/ (ino 266) | |---- f/ (ino 265) |---- e/ (ino 264) When the inode 265 is processed, the path for inode 267 is computed, which at that time corresponds to "d/ance", and it's stored in the names cache. Later on when processing inode 266, we end up orphanizing (renaming to a name matching the pattern o--) inode 267 because it has the same name as inode 266 and it's currently a child of the new parent directory (inode 263) for inode 266. After the orphanization and while we are still processing inode 266, a rename operation for inode 266 is generated. However the source path for that rename operation is incorrect because it ends up using the old, pre-orphanization, name of inode 267. The no longer valid name for inode 267 was previously cached when processing inode 265 and it remains usable and considered valid until the inode currently being processed has a number greater than 267. This resulted in the receiving side failing with the following error: ERROR: rename d/ance/ance -> d/ance failed: No such file or directory So fix these issues by detecting such circular dependencies for rename operations and by clearing the cached name of an inode once the inode is orphanized. A test case for fstests will follow soon. Signed-off-by: Robbie Ko Signed-off-by: Filipe Manana [Rewrote change log to be more detailed and organized, and improved comments] Signed-off-by: Filipe Manana diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index b71dd29..993e1ba 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -231,7 +231,6 @@ struct pending_dir_move { u64 parent_ino; u64 ino; u64 gen; - bool is_orphan; struct list_head update_refs; }; @@ -1861,7 +1860,8 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen, * was already unlinked/moved, so we can safely assume that we will not * overwrite anything at this point in time. */ - if (other_inode > sctx->send_progress) { + if (other_inode > sctx->send_progress || + is_waiting_for_move(sctx, other_inode)) { ret = get_inode_info(sctx->parent_root, other_inode, NULL, who_gen, NULL, NULL, NULL, NULL); if (ret < 0) @@ -3047,7 +3047,6 @@ static int add_pending_dir_move(struct send_ctx *sctx, pm->parent_ino = parent_ino; pm->ino = ino; pm->gen = ino_gen; - pm->is_orphan = is_orphan; INIT_LIST_HEAD(&pm->list); INIT_LIST_HEAD(&pm->update_refs); RB_CLEAR_NODE(&pm->node); @@ -3113,6 +3112,48 @@ static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx, return NULL; } +static int path_loop(struct send_ctx *sctx, struct fs_path *name, + u64 ino, u64 gen, u64 *ancestor_ino) +{ + int ret = 0; + u64 parent_inode = 0; + u64 parent_gen = 0; + u64 start_ino = ino; + + *ancestor_ino = 0; + while (ino != BTRFS_FIRST_FREE_OBJECTID) { + fs_path_reset(name); + + if (is_waiting_for_rm(sctx, ino)) + break; + if (is_waiting_for_move(sctx, ino)) { + if (*ancestor_ino == 0) + *ancestor_ino = ino; + ret = get_first_ref(sctx->parent_root, ino, + &parent_inode, &parent_gen, name); + } else { + ret = __get_cur_name_and_parent(sctx, ino, gen, + &parent_inode, + &parent_gen, name); + if (ret > 0) { + ret = 0; + break; + } + } + if (ret < 0) + break; + if (parent_inode == start_ino) { + ret = 1; + if (*ancestor_ino == 0) + *ancestor_ino = ino; + break; + } + ino = parent_inode; + gen = parent_gen; + } + return ret; +} + static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) { struct fs_path *from_path = NULL; @@ -3123,6 +3164,8 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) u64 parent_ino, parent_gen; struct waiting_dir_move *dm = NULL; u64 rmdir_ino = 0; + u64 ancestor; + bool is_orphan; int ret; name = fs_path_alloc(); @@ -3135,9 +3178,10 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) dm = get_waiting_dir_move(sctx, pm->ino); ASSERT(dm); rmdir_ino = dm->rmdir_ino; + is_orphan = dm->orphanized; free_waiting_dir_move(sctx, dm); - if (pm->is_orphan) { + if (is_orphan) { ret = gen_unique_name(sctx, pm->ino, pm->gen, from_path); } else { @@ -3155,6 +3199,22 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) goto out; sctx->send_progress = sctx->cur_ino + 1; + ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor); + if (ret) { + LIST_HEAD(deleted_refs); + ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID); + ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor, + &pm->update_refs, &deleted_refs, + is_orphan); + if (ret < 0) + goto out; + if (rmdir_ino) { + dm = get_waiting_dir_move(sctx, pm->ino); + ASSERT(dm); + dm->rmdir_ino = rmdir_ino; + } + goto out; + } fs_path_reset(name); to_path = name; name = NULL; @@ -3325,6 +3385,7 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx, u64 left_gen; u64 right_gen; int ret = 0; + struct waiting_dir_move *wdm; if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) return 0; @@ -3383,7 +3444,8 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx, goto out; } - if (is_waiting_for_move(sctx, di_key.objectid)) { + wdm = get_waiting_dir_move(sctx, di_key.objectid); + if (wdm && !wdm->orphanized) { ret = add_pending_dir_move(sctx, sctx->cur_ino, sctx->cur_inode_gen, @@ -3643,11 +3705,26 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); goto out; if (ret) { struct name_cache_entry *nce; + struct waiting_dir_move *wdm; ret = orphanize_inode(sctx, ow_inode, ow_gen, cur->full_path); if (ret < 0) goto out; + + /* + * If ow_inode has its rename operation delayed + * make sure that its orphanized name is used in + * the source path when performing its rename + * operation. + */ + if (is_waiting_for_move(sctx, ow_inode)) { + wdm = get_waiting_dir_move(sctx, + ow_inode); + ASSERT(wdm); + wdm->orphanized = true; + } + /* * Make sure we clear our orphanized inode's * name from the name cache. This is because the @@ -3663,6 +3740,19 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); name_cache_delete(sctx, nce); kfree(nce); } + + /* + * ow_inode might currently be an ancestor of + * cur_ino, therefore compute valid_path (the + * current path of cur_ino) again because it + * might contain the pre-orphanization name of + * ow_inode, which is no longer valid. + */ + fs_path_reset(valid_path); + ret = get_cur_path(sctx, sctx->cur_ino, + sctx->cur_inode_gen, valid_path); + if (ret < 0) + goto out; } else { ret = send_unlink(sctx, cur->full_path); if (ret < 0) -- cgit v0.10.2 From 7969e77a73164b418da851cbae35cdd6c1b43fee Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Fri, 17 Jun 2016 17:13:36 +0100 Subject: Btrfs: send, add missing error check for calls to path_loop() The function path_loop() can return a negative integer, signaling an error, 0 if there's no path loop and 1 if there's a path loop. We were treating any non zero values as meaning that a path loop exists. Fix this by explicitly checking for errors and gracefully return them to user space. Signed-off-by: Filipe Manana diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 993e1ba..0dc05bb 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -3200,6 +3200,8 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) sctx->send_progress = sctx->cur_ino + 1; ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor); + if (ret < 0) + goto out; if (ret) { LIST_HEAD(deleted_refs); ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID); -- cgit v0.10.2 From 4122ea64f877f632390bca1886c8a6f666f28398 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Mon, 27 Jun 2016 16:54:44 +0100 Subject: Btrfs: incremental send, fix invalid paths for rename operations Example scenario: Parent snapshot: . (ino 277) |---- tmp/ (ino 278) |---- pre/ (ino 280) | |---- wait_dir/ (ino 281) | |---- desc/ (ino 282) |---- ance/ (ino 283) | |---- below_ance/ (ino 279) | |---- other_dir/ (ino 284) Send snapshot: . (ino 277) |---- tmp/ (ino 278) |---- other_dir/ (ino 284) |---- below_ance/ (ino 279) | |---- pre/ (ino 280) | |---- wait_dir/ (ino 281) |---- desc/ (ino 282) |---- ance/ (ino 283) While computing the send stream the following steps happen: 1) While processing inode 279 we end up delaying its rename operation because its new parent in the send snapshot, inode 284, was not yet processed and therefore not yet renamed; 2) Later when processing inode 280 we end up renaming it immediately to "ance/below_once/pre" and not delay its rename operation because its new parent (inode 279 in the send snapshot) has its rename operation delayed and inode 280 is not an encestor of inode 279 (its parent in the send snapshot) in the parent snapshot; 3) When processing inode 281 we end up delaying its rename operation because its new parent in the send snapshot, inode 284, was not yet processed and therefore not yet renamed; 4) When processing inode 282 we do not delay its rename operation because its parent in the send snapshot, inode 281, already has its own rename operation delayed and our current inode (282) is not an ancestor of inode 281 in the parent snapshot. Therefore inode 282 is renamed to "ance/below_ance/pre/wait_dir"; 5) When processing inode 283 we realize that we can rename it because one of its ancestors in the send snapshot, inode 281, has its rename operation delayed and inode 283 is not an ancestor of inode 281 in the parent snapshot. So a rename operation to rename inode 283 to "ance/below_ance/pre/wait_dir/desc/ance" is issued. This path is invalid due to a missing path building loop that was undetected by the incremental send implementation, as inode 283 ends up getting included twice in the path (once with its path in the parent snapshot). Therefore its rename operation must wait before the ancestor inode 284 is renamed. Fix this by not terminating the rename dependency checks when we find an ancestor, in the send snapshot, that has its rename operation delayed. So that we continue doing the same checks if the current inode is not an ancestor, in the parent snapshot, of an ancestor in the send snapshot we are processing in the loop. The problem and reproducer were reported by Robbie Ko, as part of a patch titled "Btrfs: incremental send, avoid ancestor rename to descendant". However the fix was unnecessarily complicated and can be addressed with much less code and effort. Reported-by: Robbie Ko Signed-off-by: Filipe Manana diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 0dc05bb..3d6a4dd 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -3534,7 +3534,8 @@ static int wait_for_parent_move(struct send_ctx *sctx, ret = is_ancestor(sctx->parent_root, sctx->cur_ino, sctx->cur_inode_gen, ino, path_before); - break; + if (ret) + break; } fs_path_reset(path_before); -- cgit v0.10.2 From 99ea42ddb13bad6188d4f183f8866cacd14fa141 Mon Sep 17 00:00:00 2001 From: Robbie Ko Date: Tue, 23 Jun 2015 18:39:49 +0800 Subject: Btrfs: incremental send, fix premature rmdir operations Under certain situations, an incremental send operation can contain a rmdir operation that will make the receiving end fail when attempting to execute it, because the target directory is not yet empty. Consider the following example: Parent snapshot: . (ino 256) |--- a/ (ino 257) | |--- c/ (ino 260) | |--- del/ (ino 259) |--- tmp/ (ino 258) |--- x/ (ino 261) Send snapshot: . (ino 256) |--- a/ (ino 257) | |--- x/ (ino 261) | |--- c/ (ino 260) |--- tmp/ (ino 258) 1) When processing inode 258, we delay its rename operation because inode 260 is its new parent in the send snapshot and it was not yet renamed (since 260 > 258, that is, beyond the current progress); 2) When processing inode 259, we realize we can not yet send an rmdir operation (against inode 259) because inode 258 was still not yet renamed/moved away from inode 259. Therefore we update data structures so that after inode 258 is renamed, we try again to see if we can finally send an rmdir operation for inode 259; 3) When we process inode 260, we send a rename operation for it followed by a rename operation for inode 258. Once we send the rename operation for inode 258 we then check if we can finally issue an rmdir for its previous parent, inode 259, by calling the can_rmdir() function with a value of sctx->cur_ino + 1 (260 + 1 = 261) for its "progress" argument. This makes can_rmdir() return true (value 1) because even though there's still a child inode of inode 259 that was not yet renamed/moved, which is inode 261, the given value of progress (261) is not lower then 261 (that is, not lower than the inode number of some child of inode 259). So we end up sending a rmdir operation for inode 259 before its child inode 261 is processed and renamed. So fix this by passing the correct progress value to the call to can_rmdir() from within apply_dir_move() (where we issue delayed rename operations), which should match stcx->cur_ino (the number of the inode currently being processed) and not sctx->cur_ino + 1. A test case for fstests follows soon. Signed-off-by: Robbie Ko Signed-off-by: Filipe Manana [Rewrote change log to be more detailed, clear and well formatted] Signed-off-by: Filipe Manana diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 3d6a4dd..4115aba 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -3236,7 +3236,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) /* already deleted */ goto finish; } - ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino + 1); + ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino); if (ret < 0) goto out; if (!ret) -- cgit v0.10.2 From 443f9d266c4848a3a9ea386f66c4bed2f5997940 Mon Sep 17 00:00:00 2001 From: Robbie Ko Date: Mon, 22 Jun 2015 17:08:45 +0800 Subject: Btrfs: send, fix warning due to late freeing of orphan_dir_info structures Under certain situations, when doing an incremental send, we can end up not freeing orphan_dir_info structures as soon as they are no longer needed. Instead we end up freeing them only after finishing the send stream, which causes a warning to be emitted: [282735.229200] ------------[ cut here ]------------ [282735.229968] WARNING: CPU: 9 PID: 10588 at fs/btrfs/send.c:6298 btrfs_ioctl_send+0xe2f/0xe51 [btrfs] [282735.231282] Modules linked in: btrfs crc32c_generic xor raid6_pq acpi_cpufreq tpm_tis ppdev tpm parport_pc psmouse parport sg pcspkr i2c_piix4 i2c_core evdev processor serio_raw button loop autofs4 ext4 crc16 jbd2 mbcache sr_mod cdrom sd_mod ata_generic virtio_scsi ata_piix libata virtio_pci virtio_ring virtio e1000 scsi_mod floppy [last unloaded: btrfs] [282735.237130] CPU: 9 PID: 10588 Comm: btrfs Tainted: G W 4.6.0-rc7-btrfs-next-31+ #1 [282735.239309] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS by qemu-project.org 04/01/2014 [282735.240160] 0000000000000000 ffff880224273ca8 ffffffff8126b42c 0000000000000000 [282735.240160] 0000000000000000 ffff880224273ce8 ffffffff81052b14 0000189a24273ac8 [282735.240160] ffff8802210c9800 0000000000000000 0000000000000001 0000000000000000 [282735.240160] Call Trace: [282735.240160] [] dump_stack+0x67/0x90 [282735.240160] [] __warn+0xc2/0xdd [282735.240160] [] warn_slowpath_null+0x1d/0x1f [282735.240160] [] btrfs_ioctl_send+0xe2f/0xe51 [btrfs] [282735.240160] [] btrfs_ioctl+0x14f/0x1f81 [btrfs] [282735.240160] [] ? arch_local_irq_save+0x9/0xc [282735.240160] [] vfs_ioctl+0x18/0x34 [282735.240160] [] do_vfs_ioctl+0x550/0x5be [282735.240160] [] ? __fget+0x6b/0x77 [282735.240160] [] ? __fget_light+0x62/0x71 [282735.240160] [] SyS_ioctl+0x57/0x79 [282735.240160] [] entry_SYSCALL_64_fastpath+0x18/0xa8 [282735.240160] [] ? time_hardirqs_off+0x9/0x14 [282735.240160] [] ? trace_hardirqs_off_caller+0x1f/0xaa [282735.256343] ---[ end trace a4539270c8056f93 ]--- Consider the following example: Parent snapshot: . (ino 256) |--- a/ (ino 257) | |--- c/ (ino 260) | |--- del/ (ino 259) |--- tmp/ (ino 258) |--- x/ (ino 261) |--- y/ (ino 262) Send snapshot: . (ino 256) |--- a/ (ino 257) | |--- x/ (ino 261) | |--- y/ (ino 262) | |--- c/ (ino 260) |--- tmp/ (ino 258) 1) When processing inode 258, we end up delaying its rename operation because it has an ancestor (in the send snapshot) that has a higher inode number (inode 260) which was also renamed in the send snapshot, therefore we delay the rename of inode 258 so that it happens after inode 260 is renamed; 2) When processing inode 259, we end up delaying its deletion (rmdir operation) because it has a child inode (258) that has its rename operation delayed. At this point we allocate an orphan_dir_info structure and tag inode 258 so that we later attempt to see if we can delete (rmdir) inode 259 once inode 258 is renamed; 3) When we process inode 260, after renaming it we finally do the rename operation for inode 258. Once we issue the rename operation for inode 258 we notice that this inode was tagged so that we attempt to see if at this point we can delete (rmdir) inode 259. But at this point we can not still delete inode 259 because it has 2 children, inodes 261 and 262, that were not yet processed and therefore not yet moved (renamed) away from inode 259. We end up not freeing the orphan_dir_info structure allocated in step 2; 4) We process inodes 261 and 262, and once we move/rename inode 262 we issue the rmdir operation for inode 260; 5) We finish the send stream and notice that red black tree that contains orphan_dir_info structures is not empty, so we emit a warning and then free any orphan_dir_structures left. So fix this by freeing an orphan_dir_info structure once we try to apply a pending rename operation if we can not delete yet the tagged directory. A test case for fstests follows soon. Signed-off-by: Robbie Ko Signed-off-by: Filipe Manana [Modified changelog to be more detailed and easier to understand] diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 4115aba..e552c33 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -2947,6 +2947,10 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen, } if (loc.objectid > send_progress) { + struct orphan_dir_info *odi; + + odi = get_orphan_dir_info(sctx, dir); + free_orphan_dir_info(sctx, odi); ret = 0; goto out; } -- cgit v0.10.2 From 764433a12e78e1a2161793c02eceafdfed2d652b Mon Sep 17 00:00:00 2001 From: Robbie Ko Date: Tue, 23 Jun 2015 18:39:50 +0800 Subject: Btrfs: send, fix invalid leaf accesses due to incorrect utimes operations During an incremental send, if we have delayed rename operations for inodes that were children of directories which were removed in the send snapshot, we can end up accessing incorrect items in a leaf or accessing beyond the last item of the leaf due to issuing utimes operations for the removed inodes. Consider the following example: Parent snapshot: . (ino 256) |--- a/ (ino 257) | |--- c/ (ino 262) | |--- b/ (ino 258) | |--- d/ (ino 263) | |--- del/ (ino 261) |--- x/ (ino 259) |--- y/ (ino 260) Send snapshot: . (ino 256) |--- a/ (ino 257) | |--- b/ (ino 258) | |--- c/ (ino 262) | |--- y/ (ino 260) | |--- d/ (ino 263) |--- x/ (ino 259) 1) When processing inodes 259 and 260, we end up delaying their rename operations because their parents, inodes 263 and 262 respectively, were not yet processed and therefore not yet renamed; 2) When processing inode 262, its rename operation is issued and right after the rename operation for inode 260 is issued. However right after issuing the rename operation for inode 260, at send.c:apply_dir_move(), we issue utimes operations for all current and past parents of inode 260. This means we try to send a utimes operation for its old parent, inode 261 (deleted in the send snapshot), which does not cause any immediate and deterministic failure, because when the target inode is not found in the send snapshot, the send.c:send_utimes() function ignores it and uses the leaf region pointed to by path->slots[0], which can be any unrelated item (belonging to other inode) or it can be a region outside the leaf boundaries, if the leaf is full and path->slots[0] matches the number of items in the leaf. So we end up either successfully sending a utimes operation, which is fine and irrelevant because the old parent (inode 261) will end up being deleted later, or we end up doing an invalid memory access tha crashes the kernel. So fix this by making apply_dir_move() issue utimes operations only for parents that still exist in the send snapshot. In a separate patch we will make send_utimes() return an error (-ENOENT) if the given inode does not exists in the send snapshot. Signed-off-by: Robbie Ko Signed-off-by: Filipe Manana [Rewrote change log to be more detailed and better organized] Signed-off-by: Filipe Manana diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index e552c33..8b65396 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -3270,8 +3270,18 @@ finish: * and old parent(s). */ list_for_each_entry(cur, &pm->update_refs, list) { - if (cur->dir == rmdir_ino) + /* + * The parent inode might have been deleted in the send snapshot + */ + ret = get_inode_info(sctx->send_root, cur->dir, NULL, + NULL, NULL, NULL, NULL, NULL); + if (ret == -ENOENT) { + ret = 0; continue; + } + if (ret < 0) + goto out; + ret = send_utimes(sctx, cur->dir, cur->dir_gen); if (ret < 0) goto out; -- cgit v0.10.2 From 15b253eace1f98dabbc6e03f6793fcf8603b1655 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Sat, 2 Jul 2016 05:43:46 +0100 Subject: Btrfs: send, avoid incorrect leaf accesses when sending utimes operations The caller of send_utimes() is supposed to be sure that the inode number it passes to this function does actually exists in the send snapshot. However due to logic/algorithm bugs (such as the one fixed by the patch titled "Btrfs: send, fix invalid leaf accesses due to incorrect utimes operations"), this might not be the case and when that happens it makes send_utimes() access use an unrelated leaf item as the target inode item or access beyond a leaf's boundaries (when the leaf is full and path->slots[0] matches the number of items in the leaf). So if the call to btrfs_search_slot() done by send_utimes() does not find the inode item, just make sure send_utimes() returns -ENOENT and does not silently accesses unrelated leaf items or does invalid leaf accesses, also allowing us to easialy and deterministically catch such algorithmic/logic bugs. Signed-off-by: Filipe Manana diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 8b65396..2db8dc8 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -2502,6 +2502,8 @@ verbose_printk("btrfs: send_utimes %llu\n", ino); key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); + if (ret > 0) + ret = -ENOENT; if (ret < 0) goto out; -- cgit v0.10.2 From 951555856b88aa47bc238de6b4c6e97bfd9d36df Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Mon, 1 Aug 2016 01:50:37 +0100 Subject: Btrfs: send, don't bug on inconsistent snapshots When doing an incremental send, if we find a new/modified/deleted extent, reference or xattr without having previously processed the corresponding inode item we end up exexuting a BUG_ON(). This is because whenever an extent, xattr or reference is added, modified or deleted, we always expect to have the corresponding inode item updated. However there are situations where this will not happen due to transient -ENOMEM or -ENOSPC errors when doing delayed inode updates. For example, when punching holes we can succeed in deleting and modifying (shrinking) extents but later fail to do the delayed inode update. So after such failure we close our transaction handle and right after a snapshot of the fs/subvol tree can be made and used later for a send operation. The same thing can happen during truncate, link, unlink, and xattr related operations. So instead of executing a BUG_ON, make send return an -EIO error and print an informative error message do dmesg/syslog. Signed-off-by: Filipe Manana diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 2db8dc8..efe129f 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -273,6 +273,39 @@ struct name_cache_entry { char name[]; }; +static void inconsistent_snapshot_error(struct send_ctx *sctx, + enum btrfs_compare_tree_result result, + const char *what) +{ + const char *result_string; + + switch (result) { + case BTRFS_COMPARE_TREE_NEW: + result_string = "new"; + break; + case BTRFS_COMPARE_TREE_DELETED: + result_string = "deleted"; + break; + case BTRFS_COMPARE_TREE_CHANGED: + result_string = "updated"; + break; + case BTRFS_COMPARE_TREE_SAME: + ASSERT(0); + result_string = "unchanged"; + break; + default: + ASSERT(0); + result_string = "unexpected"; + } + + btrfs_err(sctx->send_root->fs_info, + "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu", + result_string, what, sctx->cmp_key->objectid, + sctx->send_root->root_key.objectid, + (sctx->parent_root ? + sctx->parent_root->root_key.objectid : 0)); +} + static int is_waiting_for_move(struct send_ctx *sctx, u64 ino); static struct waiting_dir_move * @@ -5711,7 +5744,10 @@ static int changed_ref(struct send_ctx *sctx, { int ret = 0; - BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); + if (sctx->cur_ino != sctx->cmp_key->objectid) { + inconsistent_snapshot_error(sctx, result, "reference"); + return -EIO; + } if (!sctx->cur_inode_new_gen && sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) { @@ -5736,7 +5772,10 @@ static int changed_xattr(struct send_ctx *sctx, { int ret = 0; - BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); + if (sctx->cur_ino != sctx->cmp_key->objectid) { + inconsistent_snapshot_error(sctx, result, "xattr"); + return -EIO; + } if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { if (result == BTRFS_COMPARE_TREE_NEW) @@ -5760,7 +5799,10 @@ static int changed_extent(struct send_ctx *sctx, { int ret = 0; - BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); + if (sctx->cur_ino != sctx->cmp_key->objectid) { + inconsistent_snapshot_error(sctx, result, "extent"); + return -EIO; + } if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { if (result != BTRFS_COMPARE_TREE_DELETED) -- cgit v0.10.2 From 67710892ec983aa79ad1e2a2642fe8e3a4a194ea Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Mon, 6 Jun 2016 11:51:25 +0100 Subject: Btrfs: be more precise on errors when getting an inode from disk When we attempt to read an inode from disk, we end up always returning an -ESTALE error to the caller regardless of the actual failure reason, which can be an out of memory problem (when allocating a path), some error found when reading from the fs/subvolume btree (like a genuine IO error) or the inode does not exists. So lets start returning the real error code to the callers so that they don't treat all -ESTALE errors as meaning that the inode does not exists (such as during orphan cleanup). This will also be needed for a subsequent patch in the same series dealing with a special fsync case. Signed-off-by: Filipe Manana diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3e61bd1..f968654 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3428,10 +3428,10 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) found_key.offset = 0; inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); ret = PTR_ERR_OR_ZERO(inode); - if (ret && ret != -ESTALE) + if (ret && ret != -ENOENT) goto out; - if (ret == -ESTALE && root == root->fs_info->tree_root) { + if (ret == -ENOENT && root == root->fs_info->tree_root) { struct btrfs_root *dead_root; struct btrfs_fs_info *fs_info = root->fs_info; int is_dead_root = 0; @@ -3467,7 +3467,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) * Inode is already gone but the orphan item is still there, * kill the orphan item. */ - if (ret == -ESTALE) { + if (ret == -ENOENT) { trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) { ret = PTR_ERR(trans); @@ -3626,7 +3626,7 @@ static noinline int acls_after_inode_item(struct extent_buffer *leaf, /* * read an inode from the btree into the in-memory inode */ -static void btrfs_read_locked_inode(struct inode *inode) +static int btrfs_read_locked_inode(struct inode *inode) { struct btrfs_path *path; struct extent_buffer *leaf; @@ -3645,14 +3645,19 @@ static void btrfs_read_locked_inode(struct inode *inode) filled = true; path = btrfs_alloc_path(); - if (!path) + if (!path) { + ret = -ENOMEM; goto make_bad; + } memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); ret = btrfs_lookup_inode(NULL, root, path, &location, 0); - if (ret) + if (ret) { + if (ret > 0) + ret = -ENOENT; goto make_bad; + } leaf = path->nodes[0]; @@ -3805,11 +3810,12 @@ cache_acl: } btrfs_update_iflags(inode); - return; + return 0; make_bad: btrfs_free_path(path); make_bad_inode(inode); + return ret; } /* @@ -5595,7 +5601,9 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, return ERR_PTR(-ENOMEM); if (inode->i_state & I_NEW) { - btrfs_read_locked_inode(inode); + int ret; + + ret = btrfs_read_locked_inode(inode); if (!is_bad_inode(inode)) { inode_tree_add(inode); unlock_new_inode(inode); @@ -5604,7 +5612,8 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, } else { unlock_new_inode(inode); iput(inode); - inode = ERR_PTR(-ESTALE); + ASSERT(ret < 0); + inode = ERR_PTR(ret < 0 ? ret : -ESTALE); } } -- cgit v0.10.2 From 44f714dae50a2e795d3268a6831762aa6fa54f55 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Mon, 6 Jun 2016 16:11:13 +0100 Subject: Btrfs: improve performance on fsync against new inode after rename/unlink With commit 56f23fdbb600 ("Btrfs: fix file/data loss caused by fsync after rename and new inode") we got simple fix for a functional issue when the following sequence of actions is done: at transaction N create file A at directory D at transaction N + M (where M >= 1) move/rename existing file A from directory D to directory E create a new file named A at directory D fsync the new file power fail The solution was to simply detect such scenario and fallback to a full transaction commit when we detect it. However this turned out to had a significant impact on throughput (and a bit on latency too) for benchmarks using the dbench tool, which simulates real workloads from smbd (Samba) servers. For example on a test vm (with a debug kernel): Unpatched: Throughput 19.1572 MB/sec 32 clients 32 procs max_latency=1005.229 ms Patched: Throughput 23.7015 MB/sec 32 clients 32 procs max_latency=809.206 ms The patched results (this patch is applied) are similar to the results of a kernel with the commit 56f23fdbb600 ("Btrfs: fix file/data loss caused by fsync after rename and new inode") reverted. This change avoids the fallback to a transaction commit and instead makes sure all the names of the conflicting inode (the one that had a name in a past transaction that matches the name of the new file in the same parent directory) are logged so that at log replay time we don't lose neither the new file nor the old file, and the old file gets the name it was renamed to. This also ends up avoiding a full transaction commit for a similar case that involves an unlink instead of a rename of the old file: at transaction N create file A at directory D at transaction N + M (where M >= 1) remove file A create a new file named A at directory D fsync the new file power fail Signed-off-by: Filipe Manana diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f968654..453b9d0 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4203,6 +4203,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) int err = 0; struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_trans_handle *trans; + u64 last_unlink_trans; if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) return -ENOTEMPTY; @@ -4225,11 +4226,27 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) if (err) goto out; + last_unlink_trans = BTRFS_I(inode)->last_unlink_trans; + /* now the directory is empty */ err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry), dentry->d_name.name, dentry->d_name.len); - if (!err) + if (!err) { btrfs_i_size_write(inode, 0); + /* + * Propagate the last_unlink_trans value of the deleted dir to + * its parent directory. This is to prevent an unrecoverable + * log tree in the case we do something like this: + * 1) create dir foo + * 2) create snapshot under dir foo + * 3) delete the snapshot + * 4) rmdir foo + * 5) mkdir foo + * 6) fsync foo or some file inside foo + */ + if (last_unlink_trans >= trans->transid) + BTRFS_I(dir)->last_unlink_trans = last_unlink_trans; + } out: btrfs_end_transaction(trans, root); btrfs_btree_balance_dirty(root); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index c05f69a..5fa624c 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -4469,7 +4469,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans, static int btrfs_check_ref_name_override(struct extent_buffer *eb, const int slot, const struct btrfs_key *key, - struct inode *inode) + struct inode *inode, + u64 *other_ino) { int ret; struct btrfs_path *search_path; @@ -4528,7 +4529,16 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb, search_path, parent, name, this_name_len, 0); if (di && !IS_ERR(di)) { - ret = 1; + struct btrfs_key di_key; + + btrfs_dir_item_key_to_cpu(search_path->nodes[0], + di, &di_key); + if (di_key.type == BTRFS_INODE_ITEM_KEY) { + ret = 1; + *other_ino = di_key.objectid; + } else { + ret = -EAGAIN; + } goto out; } else if (IS_ERR(di)) { ret = PTR_ERR(di); @@ -4718,16 +4728,71 @@ again: if ((min_key.type == BTRFS_INODE_REF_KEY || min_key.type == BTRFS_INODE_EXTREF_KEY) && BTRFS_I(inode)->generation == trans->transid) { + u64 other_ino = 0; + ret = btrfs_check_ref_name_override(path->nodes[0], path->slots[0], - &min_key, inode); + &min_key, inode, + &other_ino); if (ret < 0) { err = ret; goto out_unlock; } else if (ret > 0) { - err = 1; - btrfs_set_log_full_commit(root->fs_info, trans); - goto out_unlock; + struct btrfs_key inode_key; + struct inode *other_inode; + + if (ins_nr > 0) { + ins_nr++; + } else { + ins_nr = 1; + ins_start_slot = path->slots[0]; + } + ret = copy_items(trans, inode, dst_path, path, + &last_extent, ins_start_slot, + ins_nr, inode_only, + logged_isize); + if (ret < 0) { + err = ret; + goto out_unlock; + } + ins_nr = 0; + btrfs_release_path(path); + inode_key.objectid = other_ino; + inode_key.type = BTRFS_INODE_ITEM_KEY; + inode_key.offset = 0; + other_inode = btrfs_iget(root->fs_info->sb, + &inode_key, root, + NULL); + /* + * If the other inode that had a conflicting dir + * entry was deleted in the current transaction, + * we don't need to do more work nor fallback to + * a transaction commit. + */ + if (IS_ERR(other_inode) && + PTR_ERR(other_inode) == -ENOENT) { + goto next_key; + } else if (IS_ERR(other_inode)) { + err = PTR_ERR(other_inode); + goto out_unlock; + } + /* + * We are safe logging the other inode without + * acquiring its i_mutex as long as we log with + * the LOG_INODE_EXISTS mode. We're safe against + * concurrent renames of the other inode as well + * because during a rename we pin the log and + * update the log with the new name before we + * unpin it. + */ + err = btrfs_log_inode(trans, root, other_inode, + LOG_INODE_EXISTS, + 0, LLONG_MAX, ctx); + iput(other_inode); + if (err) + goto out_unlock; + else + goto next_key; } } @@ -4795,7 +4860,7 @@ next_slot: ins_nr = 0; } btrfs_release_path(path); - +next_key: if (min_key.offset < (u64)-1) { min_key.offset++; } else if (min_key.type < max_key.type) { @@ -4989,8 +5054,12 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) break; - if (IS_ROOT(parent)) + if (IS_ROOT(parent)) { + inode = d_inode(parent); + if (btrfs_must_commit_transaction(trans, inode)) + ret = 1; break; + } parent = dget_parent(parent); dput(old_parent); -- cgit v0.10.2 From f005bd7e3b84a353475a2895e2c7686a66297d87 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 1 Aug 2016 10:54:15 +0100 Subject: clocksource/arm_arch_timer: Force per-CPU interrupt to be level-triggered The ARM architected timer produces level-triggered interrupts (this is mandated by the architecture). Unfortunately, a number of device-trees get this wrong, and expose an edge-triggered interrupt. Until now, this wasn't too much an issue, as the programming of the trigger would fail (the corresponding PPI cannot be reconfigured), and the kernel would be happy with this. But we're about to change this, and trust DT a lot if the driver doesn't provide its own trigger information. In that context, the timer breaks badly. While we do need to fix the DTs, there is also some userspace out there (kvmtool) that generates the same kind of broken DT on the fly, and that will completely break with newer kernels. As a safety measure, and to keep buggy software alive as well as buying us some time to fix DTs all over the place, let's check what trigger configuration has been given us by the firmware. If this is not a level configuration, then we know that the DT/ACPI configuration is bust, and we pick some defaults which won't be worse than the existing setup. Signed-off-by: Marc Zyngier Cc: Andrew Lunn Cc: Liu Gang Cc: Mark Rutland Cc: Masahiro Yamada Cc: Wenbin Song Cc: Mingkai Hu Cc: Florian Fainelli Cc: Kevin Hilman Cc: Daniel Lezcano Cc: Michal Simek Cc: Jon Hunter Cc: arm@kernel.org Cc: bcm-kernel-feedback-list@broadcom.com Cc: linux-arm-kernel@lists.infradead.org Cc: Sebastian Hesselbarth Cc: Jason Cooper Cc: Ray Jui Cc: "Hou Zhiqiang" Cc: Tirumalesh Chalamarla Cc: linux-samsung-soc@vger.kernel.org Cc: Yuan Yao Cc: Jan Glauber Cc: Gregory Clement Cc: linux-amlogic@lists.infradead.org Cc: soren.brinkmann@xilinx.com Cc: Rajesh Bhagat Cc: Scott Branden Cc: Duc Dang Cc: Kukjin Kim Cc: Carlo Caione Cc: Dinh Nguyen Link: http://lkml.kernel.org/r/1470045256-9032-2-git-send-email-marc.zyngier@arm.com Signed-off-by: Thomas Gleixner diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 28bce3f..5770054 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -8,6 +8,9 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ + +#define pr_fmt(fmt) "arm_arch_timer: " fmt + #include #include #include @@ -370,16 +373,33 @@ static bool arch_timer_has_nonsecure_ppi(void) arch_timer_ppi[PHYS_NONSECURE_PPI]); } +static u32 check_ppi_trigger(int irq) +{ + u32 flags = irq_get_trigger_type(irq); + + if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) { + pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq); + pr_warn("WARNING: Please fix your firmware\n"); + flags = IRQF_TRIGGER_LOW; + } + + return flags; +} + static int arch_timer_starting_cpu(unsigned int cpu) { struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); + u32 flags; __arch_timer_setup(ARCH_CP15_TIMER, clk); - enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0); + flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]); + enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags); - if (arch_timer_has_nonsecure_ppi()) - enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); + if (arch_timer_has_nonsecure_ppi()) { + flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]); + enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags); + } arch_counter_set_user_access(); if (evtstrm_enable) -- cgit v0.10.2 From 7f555c8e5a84b348c2b76f4ca78eae7222354c03 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 29 Jul 2016 18:03:42 -0400 Subject: drm/amdgpu/gmc7: add missing mullins case Looks like this got missed when we ported the code from radeon. Reviewed-by: Edward O'Callaghan Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index d24a82b..0b0f086 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -144,6 +144,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev) break; case CHIP_KAVERI: case CHIP_KABINI: + case CHIP_MULLINS: return 0; default: BUG(); } -- cgit v0.10.2 From 903f75c8958e21f0fba2520467c11eaca824226d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 29 Jul 2016 18:14:39 -0400 Subject: drm/amdgpu/ci: add mullins to default case for smc ucode It's already covered by the default case, but add it for consistency. Reviewed-by: Alexandre Demers Reviewed-by: Edward O'Callaghan Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index e2f0e5d..a5c94b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -5779,6 +5779,7 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev) break; case CHIP_KAVERI: case CHIP_KABINI: + case CHIP_MULLINS: default: BUG(); } -- cgit v0.10.2 From 4f7ec157cb6dfb1e2cd18abf21e6cb1107842c5f Mon Sep 17 00:00:00 2001 From: Eric Huang Date: Thu, 28 Jul 2016 17:25:01 -0400 Subject: drm/amd/amdgpu: change pptable output format from ASCII to binary Reviewed-by: Edward O'Callaghan Reviewed-by: Alex Deucher Signed-off-by: Eric Huang Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index ff63b88..5cc7052 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -305,7 +305,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; char *table = NULL; - int size, i; + int size; if (adev->pp_enabled) size = amdgpu_dpm_get_pp_table(adev, &table); @@ -315,10 +315,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, if (size >= PAGE_SIZE) size = PAGE_SIZE - 1; - for (i = 0; i < size; i++) { - sprintf(buf + i, "%02x", table[i]); - } - sprintf(buf + i, "\n"); + memcpy(buf, table, size); return size; } -- cgit v0.10.2 From 2392f7fd695246df4fb5f0b5fb88ce37cdb01764 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 26 Jul 2016 18:06:39 +0530 Subject: ASoC: Intel: Skylake: Check list empty while getting module info Module list can be NULL so check if the list is empty before accessing the list. Signed-off-by: Senthilnathan Veppur Signed-off-by: Vinod Koul Signed-off-by: Mark Brown diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c index 25fcb79..ddcb52a 100644 --- a/sound/soc/intel/skylake/skl-sst-utils.c +++ b/sound/soc/intel/skylake/skl-sst-utils.c @@ -123,6 +123,11 @@ int snd_skl_get_module_info(struct skl_sst *ctx, u8 *uuid, uuid_mod = (uuid_le *)uuid; + if (list_empty(&ctx->uuid_list)) { + dev_err(ctx->dev, "Module list is empty\n"); + return -EINVAL; + } + list_for_each_entry(module, &ctx->uuid_list, list) { if (uuid_le_cmp(*uuid_mod, module->uuid) == 0) { dfw_config->module_id = module->id; -- cgit v0.10.2 From 1f4c17a03ba7f430d63dba8c8e08ff1e2712581d Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 1 Aug 2016 13:36:08 -0400 Subject: SUNRPC: Handle EADDRNOTAVAIL on connection failures If the connect attempt immediately fails with an EADDRNOTAVAIL error, then that means our choice of source port number was bad. This error is expected when we set the SO_REUSEPORT socket option and we have 2 sockets sharing the same source and destination address and port combinations. Signed-off-by: Trond Myklebust Fixes: 402e23b4ed9ed ("SUNRPC: Fix stupid typo in xs_sock_set_reuseport") Cc: stable@vger.kernel.org # v4.0+ diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 111767a..c6b1d48 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2295,6 +2295,10 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) /* SYN_SENT! */ if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; + break; + case -EADDRNOTAVAIL: + /* Source port number is unavailable. Try a new one! */ + transport->srcport = 0; } out: return ret; -- cgit v0.10.2 From 300108740b659c2380a731f147dd85ca0365db4f Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 19 Jul 2016 13:04:14 -0700 Subject: lkdtm: fix false positive warning from -Wmaybe-uninitialized The variable in use here doesn't matter (it's just used to exercise taking up stack space), but this changes its use to pass its address instead, to avoid a compiler warning: drivers/misc/lkdtm_usercopy.c:54:15: warning: 'bad_stack' may be used uninitialized in this function [-Wmaybe-uninitialized] Reported-by: Arnd Bergmann Signed-off-by: Kees Cook diff --git a/drivers/misc/lkdtm_usercopy.c b/drivers/misc/lkdtm_usercopy.c index 5a3fd76..5525a20 100644 --- a/drivers/misc/lkdtm_usercopy.c +++ b/drivers/misc/lkdtm_usercopy.c @@ -49,7 +49,7 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame) /* This is a pointer to outside our current stack frame. */ if (bad_frame) { - bad_stack = do_usercopy_stack_callee((uintptr_t)bad_stack); + bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack); } else { /* Put start address just inside stack. */ bad_stack = task_stack_page(current) + THREAD_SIZE; -- cgit v0.10.2 From e50bd2354ced2018eff1c7e06e7db4db1f5ce745 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 1 Aug 2016 14:18:34 -0700 Subject: lkdtm: Fix targets for objcopy usage The targets for lkdtm's objcopy were missing which caused them to always be rebuilt. This corrects the problem. Reported-by: Linus Torvalds Signed-off-by: Kees Cook diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 4387ccb..7410c6d 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -69,5 +69,6 @@ OBJCOPYFLAGS := OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \ --set-section-flags .text=alloc,readonly \ --rename-section .text=.rodata -$(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o +targets += lkdtm_rodata.o lkdtm_rodata_objcopy.o +$(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o FORCE $(call if_changed,objcopy) -- cgit v0.10.2 From 680682d4d537565e2c358483e1feeca30a8cf3d4 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sun, 17 Jul 2016 19:55:27 +0100 Subject: cfg80211: fix missing break in NL8211_CHAN_WIDTH_80P80 case The switch on chandef->width is missing a break on the NL8211_CHAN_WIDTH_80P80 case; currently we get a WARN_ON when center_freq2 is non-zero because of the missing break. Signed-off-by: Colin Ian King Signed-off-by: Johannes Berg diff --git a/net/wireless/chan.c b/net/wireless/chan.c index da49c0b..bb3d64e 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -513,6 +513,7 @@ static bool cfg80211_chandef_dfs_available(struct wiphy *wiphy, r = cfg80211_get_chans_dfs_available(wiphy, chandef->center_freq2, width); + break; default: WARN_ON(chandef->center_freq2); break; -- cgit v0.10.2 From 4e3f21bc7bfbdc4a348852e4da1540227e1b0171 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Mon, 11 Jul 2016 15:09:15 +0200 Subject: mac80211: fix check for buffered powersave frames with txq The logic was inverted here, set the bit if frames are pending. Fixes: ba8c3d6f16a1 ("mac80211: add an intermediate software queue implementation") Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 2e8a902..9dce3b1 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1268,7 +1268,7 @@ static void sta_ps_start(struct sta_info *sta) for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]); - if (!txqi->tin.backlog_packets) + if (txqi->tin.backlog_packets) set_bit(tid, &sta->txq_buffered_tids); else clear_bit(tid, &sta->txq_buffered_tids); -- cgit v0.10.2 From ad3331acb1464024d20a184d3358f3cecc373b54 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 2 Aug 2016 13:47:43 -0400 Subject: SUNRPC: Fix up socket autodisconnect Ensure that we don't forget to set up the disconnection timer for the case when a connect request is fulfilled after the RPC request that initiated it has timed out or been interrupted. Signed-off-by: Trond Myklebust diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 8313960..ea244b2 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -680,6 +680,20 @@ out: spin_unlock_bh(&xprt->transport_lock); } +static bool +xprt_has_timer(const struct rpc_xprt *xprt) +{ + return xprt->idle_timeout != 0; +} + +static void +xprt_schedule_autodisconnect(struct rpc_xprt *xprt) + __must_hold(&xprt->transport_lock) +{ + if (list_empty(&xprt->recv) && xprt_has_timer(xprt)) + mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout); +} + static void xprt_init_autodisconnect(unsigned long data) { @@ -688,6 +702,8 @@ xprt_init_autodisconnect(unsigned long data) spin_lock(&xprt->transport_lock); if (!list_empty(&xprt->recv)) goto out_abort; + /* Reset xprt->last_used to avoid connect/autodisconnect cycling */ + xprt->last_used = jiffies; if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) goto out_abort; spin_unlock(&xprt->transport_lock); @@ -725,6 +741,7 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) goto out; xprt->snd_task =NULL; xprt->ops->release_xprt(xprt, NULL); + xprt_schedule_autodisconnect(xprt); out: spin_unlock_bh(&xprt->transport_lock); wake_up_bit(&xprt->state, XPRT_LOCKED); @@ -888,11 +905,6 @@ static void xprt_timer(struct rpc_task *task) spin_unlock_bh(&xprt->transport_lock); } -static inline int xprt_has_timer(struct rpc_xprt *xprt) -{ - return xprt->idle_timeout != 0; -} - /** * xprt_prepare_transmit - reserve the transport before sending a request * @task: RPC task about to send a request @@ -1280,9 +1292,7 @@ void xprt_release(struct rpc_task *task) if (!list_empty(&req->rq_list)) list_del(&req->rq_list); xprt->last_used = jiffies; - if (list_empty(&xprt->recv) && xprt_has_timer(xprt)) - mod_timer(&xprt->timer, - xprt->last_used + xprt->idle_timeout); + xprt_schedule_autodisconnect(xprt); spin_unlock_bh(&xprt->transport_lock); if (req->rq_buffer) xprt->ops->buf_free(req->rq_buffer); -- cgit v0.10.2 From 1fabaddd153e9e2f7e5695266abb56c934629718 Mon Sep 17 00:00:00 2001 From: Alim Akhtar Date: Mon, 11 Jul 2016 09:34:18 +0200 Subject: arm64: dts: Fix RTC by providing rtc_src clock Add RTC source clock as Exynos7 needs source (32.768KHz) clock for RTC block. Without this currently S3C RTC driver probe is broken on this SoC. Signed-off-by: Alim Akhtar Reviewed-by: Javier Martinez Canillas Signed-off-by: Krzysztof Kozlowski Signed-off-by: Arnd Bergmann diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts index 299f3ce..c528dd5 100644 --- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts +++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts @@ -12,6 +12,7 @@ /dts-v1/; #include "exynos7.dtsi" #include +#include / { model = "Samsung Exynos7 Espresso board based on EXYNOS7"; @@ -43,6 +44,8 @@ &rtc { status = "okay"; + clocks = <&clock_ccore PCLK_RTC>, <&s2mps15_osc S2MPS11_CLK_AP>; + clock-names = "rtc", "rtc_src"; }; &watchdog { -- cgit v0.10.2 From d97ffe788184313fd83ff7da4b6587156f8388f0 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 2 Aug 2016 09:44:33 +0200 Subject: ARM: do away with final ARCH_REQUIRE_GPIOLIB A new user of the Kconfig selection of ARCH_REQUIRE_GPIOLIB has appeared. Replace with just selecting GPIOLIB. Signed-off-by: Linus Walleij Signed-off-by: Olof Johansson diff --git a/arch/arm/mach-clps711x/Kconfig b/arch/arm/mach-clps711x/Kconfig index dc7c6ed..61284b9 100644 --- a/arch/arm/mach-clps711x/Kconfig +++ b/arch/arm/mach-clps711x/Kconfig @@ -1,13 +1,13 @@ menuconfig ARCH_CLPS711X bool "Cirrus Logic EP721x/EP731x-based" depends on ARCH_MULTI_V4T - select ARCH_REQUIRE_GPIOLIB select AUTO_ZRELADDR select CLKSRC_OF select CLPS711X_TIMER select COMMON_CLK select CPU_ARM720T select GENERIC_CLOCKEVENTS + select GPIOLIB select MFD_SYSCON select OF_IRQ select USE_OF -- cgit v0.10.2 From 4d2bf027e8ca499a52224e508f52a59d3858094a Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Tue, 26 Jul 2016 10:00:48 +0200 Subject: MAINTAINER: alpine: add a mailing list Add the linux-arm-kernel mailing list for the Alpine SoCs. Signed-off-by: Antoine Tenart Signed-off-by: Olof Johansson diff --git a/MAINTAINERS b/MAINTAINERS index 25f4320..b5c04df 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -999,6 +999,7 @@ N: meson ARM/Annapurna Labs ALPINE ARCHITECTURE M: Tsahee Zidenberg M: Antoine Tenart +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-alpine/ F: arch/arm/boot/dts/alpine* -- cgit v0.10.2 From 869ec056fa8450184423c8076e0a342db127795c Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 30 Jun 2016 14:25:19 +0200 Subject: ARM: shmobile: don't call platform_can_secondary_boot on UP For rcar-gen2, we build the SMP files even for UP configurations, and that just broke: arch/arm/mach-shmobile/built-in.o: In function `shmobile_smp_init_fallback_ops': pm-rcar-gen2.c:(.init.text+0x40c): undefined reference to `platform_can_secondary_boot' This adds an compile-time check before the call to platform_can_secondary_boot, turning the function into an empty stub for UP configurations. Signed-off-by: Arnd Bergmann Fixes: c21af444eace ("ARM: shmobile: smp: Add function to prioritize DT SMP") Acked-by: Geert Uytterhoeven Signed-off-by: Olof Johansson diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c index f3dba6f..02e21bc 100644 --- a/arch/arm/mach-shmobile/platsmp.c +++ b/arch/arm/mach-shmobile/platsmp.c @@ -40,5 +40,8 @@ bool shmobile_smp_cpu_can_disable(unsigned int cpu) bool __init shmobile_smp_init_fallback_ops(void) { /* fallback on PSCI/smp_ops if no other DT based method is detected */ + if (!IS_ENABLED(CONFIG_SMP)) + return false; + return platform_can_secondary_boot() ? true : false; } -- cgit v0.10.2 From fe85f07f582751da0a65d4ba7e3b36ebcbe55019 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 2 Aug 2016 11:21:35 +0800 Subject: drm/amdgpu: update golden setting of iceland Signed-off-by: Huang Rui Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index bff8668..a498323 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -409,6 +409,7 @@ static const u32 golden_settings_iceland_a11[] = mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002, mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000, + mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c, mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd, mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, mmTCC_CTRL, 0x00100000, 0xf31fff7f, -- cgit v0.10.2 From 3a494b588f724fde00ae3ca0aa1c7e6cef1d0d51 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 2 Aug 2016 11:28:47 +0800 Subject: drm/amdgpu: update golden setting of carrizo Signed-off-by: Huang Rui Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index a498323..85854ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -506,8 +506,10 @@ static const u32 cz_golden_settings_a11[] = mmGB_GPU_ID, 0x0000000f, 0x00000000, mmPA_SC_ENHANCE, 0xffffffff, 0x00000001, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, + mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c, mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd, mmTA_CNTL_AUX, 0x000f000f, 0x00010000, + mmTCC_CTRL, 0x00100000, 0xf31fff7f, mmTCC_EXE_DISABLE, 0x00000002, 0x00000002, mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302 -- cgit v0.10.2 From 9761bc53c8e1c52f3f0aa4334f38fd87b0cf1613 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 2 Aug 2016 11:26:34 +0800 Subject: drm/amdgpu: update golden setting of polaris11 Signed-off-by: Huang Rui Reviewed-by: Alex Deucher Reviewed-by: Ken Wang Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 85854ba..3b7f6e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -270,7 +270,8 @@ static const u32 tonga_mgcg_cgcg_init[] = static const u32 golden_settings_polaris11_a11[] = { - mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208, + mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208, + mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000, mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, mmDB_DEBUG2, 0xf00fffff, 0x00000400, mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, @@ -279,7 +280,7 @@ static const u32 golden_settings_polaris11_a11[] = mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000, mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c, mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c, - mmSQ_CONFIG, 0x07f80000, 0x07180000, + mmSQ_CONFIG, 0x07f80000, 0x01180000, mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, mmTCC_CTRL, 0x00100000, 0xf31fff7f, mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3, -- cgit v0.10.2 From 6d51c813b172b4374fe7a6b732b6666f8d77bfea Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 2 Aug 2016 11:30:05 +0800 Subject: drm/amdgpu: update golden setting of stoney Signed-off-by: Huang Rui Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 717359d..2aee2c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -103,6 +103,11 @@ static const u32 stoney_mgcg_cgcg_init[] = mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 }; +static const u32 golden_settings_stoney_common[] = +{ + mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004, + mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000 +}; static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) { @@ -142,6 +147,9 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) amdgpu_program_register_sequence(adev, stoney_mgcg_cgcg_init, (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + golden_settings_stoney_common, + (const u32)ARRAY_SIZE(golden_settings_stoney_common)); break; default: break; -- cgit v0.10.2 From a5a5e3084ec457f234298392dfa2c55db47b603f Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 2 Aug 2016 13:15:12 +0800 Subject: drm/amdgpu: update golden setting of polaris10 Signed-off-by: Huang Rui Reviewed-by: Ken Wang Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 3b7f6e8..b818461 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -302,8 +302,8 @@ static const u32 polaris11_golden_common_all[] = static const u32 golden_settings_polaris10_a11[] = { mmATC_MISC_CG, 0x000c0fc0, 0x000c0200, - mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208, - mmCB_HW_CONTROL_2, 0, 0x0f000000, + mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208, + mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000, mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, mmDB_DEBUG2, 0xf00fffff, 0x00000400, mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, -- cgit v0.10.2 From c37a54ac37e7fbf8abe2b7b886ac5f1b3e8f8d29 Mon Sep 17 00:00:00 2001 From: Maital Hahn Date: Wed, 13 Jul 2016 14:44:41 +0300 Subject: mac80211: mesh: flush stations before beacons are stopped Some drivers (e.g. wl18xx) expect that the last stage in the de-initialization process will be stopping the beacons, similar to AP flow. Update ieee80211_stop_mesh() flow accordingly. As peers can be removed dynamically, this would not impact other drivers. Tested also on Ralink RT3572 chipset. Signed-off-by: Maital Hahn Signed-off-by: Yaniv Machani Signed-off-by: Johannes Berg diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index c66411d..42120d9 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -881,20 +881,22 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) netif_carrier_off(sdata->dev); + /* flush STAs and mpaths on this iface */ + sta_info_flush(sdata); + mesh_path_flush_by_iface(sdata); + /* stop the beacon */ ifmsh->mesh_id_len = 0; sdata->vif.bss_conf.enable_beacon = false; clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); + + /* remove beacon */ bcn = rcu_dereference_protected(ifmsh->beacon, lockdep_is_held(&sdata->wdev.mtx)); RCU_INIT_POINTER(ifmsh->beacon, NULL); kfree_rcu(bcn, rcu_head); - /* flush STAs and mpaths on this iface */ - sta_info_flush(sdata); - mesh_path_flush_by_iface(sdata); - /* free all potentially still buffered group-addressed frames */ local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf); skb_queue_purge(&ifmsh->ps.bc_buf); -- cgit v0.10.2 From 7a376ac11fc2109dfd86442ff79982ecf16dcd6d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 3 Aug 2016 11:07:44 +0200 Subject: Revert "ARM: aspeed: adapt defconfigs for new CONFIG_PRINTK_TIME" The patch that this was preparing for made it into neither v4.7 nor v4.8, so we should back this out as well to avoid the opposite warning: arch/arm/configs/aspeed_g5_defconfig:62:warning: symbol value '1' invalid for PRINTK_TIME arch/arm/configs/aspeed_g4_defconfig:61:warning: symbol value '1' invalid for PRINTK_TIME Sorry for not catching this earlier. Signed-off-by: Arnd Bergmann Fixes: 0ef659a30055 ("ARM: aspeed: adapt defconfigs for new CONFIG_PRINTK_TIME") Cc: stable@vger.kernel.org # v4.7 diff --git a/arch/arm/configs/aspeed_g4_defconfig b/arch/arm/configs/aspeed_g4_defconfig index b6e54ee..ca39c04 100644 --- a/arch/arm/configs/aspeed_g4_defconfig +++ b/arch/arm/configs/aspeed_g4_defconfig @@ -58,7 +58,7 @@ CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_FIRMWARE_MEMMAP=y CONFIG_FANOTIFY=y -CONFIG_PRINTK_TIME=1 +CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y CONFIG_STRIP_ASM_SYMS=y CONFIG_PAGE_POISONING=y diff --git a/arch/arm/configs/aspeed_g5_defconfig b/arch/arm/configs/aspeed_g5_defconfig index 8926051..4f366b0 100644 --- a/arch/arm/configs/aspeed_g5_defconfig +++ b/arch/arm/configs/aspeed_g5_defconfig @@ -59,7 +59,7 @@ CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_FIRMWARE_MEMMAP=y CONFIG_FANOTIFY=y -CONFIG_PRINTK_TIME=1 +CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y CONFIG_STRIP_ASM_SYMS=y CONFIG_PAGE_POISONING=y -- cgit v0.10.2 From e6571499336e10f93a77c51a35fd1a96828eea71 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Wed, 3 Aug 2016 11:01:32 +0100 Subject: Btrfs: remove unused function btrfs_add_delayed_qgroup_reserve() No longer used as of commit 5846a3c26873 ("btrfs: qgroup: Fix a race in delayed_ref which leads to abort trans"). Signed-off-by: Filipe Manana diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index 430b368..02dd2ec 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -861,33 +861,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, return 0; } -int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info, - struct btrfs_trans_handle *trans, - u64 ref_root, u64 bytenr, u64 num_bytes) -{ - struct btrfs_delayed_ref_root *delayed_refs; - struct btrfs_delayed_ref_head *ref_head; - int ret = 0; - - if (!fs_info->quota_enabled || !is_fstree(ref_root)) - return 0; - - delayed_refs = &trans->transaction->delayed_refs; - - spin_lock(&delayed_refs->lock); - ref_head = find_ref_head(&delayed_refs->href_root, bytenr, 0); - if (!ref_head) { - ret = -ENOENT; - goto out; - } - WARN_ON(ref_head->qgroup_reserved || ref_head->qgroup_ref_root); - ref_head->qgroup_ref_root = ref_root; - ref_head->qgroup_reserved = num_bytes; -out: - spin_unlock(&delayed_refs->lock); - return ret; -} - int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes, diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index 5fca953..43f3629 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h @@ -250,9 +250,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, u64 parent, u64 ref_root, u64 owner, u64 offset, u64 reserved, int action, struct btrfs_delayed_extent_op *extent_op); -int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info, - struct btrfs_trans_handle *trans, - u64 ref_root, u64 bytenr, u64 num_bytes); int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes, -- cgit v0.10.2 From 0b857b44b5e445dc850cd91c45ce6edeb7797480 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Sun, 31 Jul 2016 00:27:39 -0700 Subject: nvme-rdma: Don't leak uninitialized memory in connect request private data Zero out the full nvme_rdma_cm_req structure before sending it. Otherwise we end up leaking kernel memory in the reserved field, which might break forward compatibility in the future. Signed-off-by: Roland Dreier Reviewed-by: Christoph Hellwig Signed-off-by: Sagi Grimberg diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 3e3ce2b..b96b883 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1269,7 +1269,7 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) { struct nvme_rdma_ctrl *ctrl = queue->ctrl; struct rdma_conn_param param = { }; - struct nvme_rdma_cm_req priv; + struct nvme_rdma_cm_req priv = { }; int ret; param.qp_num = queue->qp->qp_num; -- cgit v0.10.2 From 5f372eb3e76317b4fe4ba53ad1547f39fc883350 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Sun, 31 Jul 2016 18:43:15 +0300 Subject: nvme-rdma: Queue ns scanning after a sucessful reconnection On an ordered target shutdown, the target can send a AEN on a namespace removal, this will trigger the host to queue ns-list query. The shutdown will trigger error recovery which will attepmt periodic reconnect. We can hit a race where the ns rescanning fails (error recovery kicked in and we're not connected) causing removing all the namespaces and when we reconnect we won't see any namespaces for this controller. So, queue a namespace rescan after we successfully reconnected to the target. Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index b96b883..7fd1d73 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -748,8 +748,10 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); WARN_ON_ONCE(!changed); - if (ctrl->queue_count > 1) + if (ctrl->queue_count > 1) { nvme_start_queues(&ctrl->ctrl); + nvme_queue_scan(&ctrl->ctrl); + } dev_info(ctrl->ctrl.device, "Successfully reconnected\n"); -- cgit v0.10.2 From 57de5a0a40db97bb390d3ac1f4c2e74b9f3515c3 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 14 Jul 2016 17:39:47 +0300 Subject: nvme-rdma: Fix device removal handling Device removal sequence may have crashed because the controller (and admin queue space) was freed before we destroyed the admin queue resources. Thus we want to destroy the admin queue and only then queue controller deletion and wait for it to complete. More specifically we: 1. own the controller deletion (make sure we are not competing with another deletion). 2. get rid of inflight reconnects if exists (which also destroy and create queues). 3. destroy the queue. 4. safely queue controller deletion (and wait for it to complete). Reported-by: Steve Wise Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 7fd1d73..3ffec37 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -169,7 +169,6 @@ MODULE_PARM_DESC(register_always, static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event); static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); -static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl); /* XXX: really should move to a generic header sooner or later.. */ static inline void put_unaligned_le24(u32 val, u8 *p) @@ -1320,37 +1319,39 @@ out_destroy_queue_ib: * that caught the event. Since we hold the callout until the controller * deletion is completed, we'll deadlock if the controller deletion will * call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership - * of destroying this queue before-hand, destroy the queue resources - * after the controller deletion completed with the exception of destroying - * the cm_id implicitely by returning a non-zero rc to the callout. + * of destroying this queue before-hand, destroy the queue resources, + * then queue the controller deletion which won't destroy this queue and + * we destroy the cm_id implicitely by returning a non-zero rc to the callout. */ static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue) { struct nvme_rdma_ctrl *ctrl = queue->ctrl; - int ret, ctrl_deleted = 0; + int ret; - /* First disable the queue so ctrl delete won't free it */ - if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) - goto out; + /* Own the controller deletion */ + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING)) + return 0; - /* delete the controller */ - ret = __nvme_rdma_del_ctrl(ctrl); - if (!ret) { - dev_warn(ctrl->ctrl.device, - "Got rdma device removal event, deleting ctrl\n"); - flush_work(&ctrl->delete_work); + dev_warn(ctrl->ctrl.device, + "Got rdma device removal event, deleting ctrl\n"); - /* Return non-zero so the cm_id will destroy implicitly */ - ctrl_deleted = 1; + /* Get rid of reconnect work if its running */ + cancel_delayed_work_sync(&ctrl->reconnect_work); + /* Disable the queue so ctrl delete won't free it */ + if (test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) { /* Free this queue ourselves */ - rdma_disconnect(queue->cm_id); - ib_drain_qp(queue->qp); + nvme_rdma_stop_queue(queue); nvme_rdma_destroy_queue_ib(queue); + + /* Return non-zero so the cm_id will destroy implicitly */ + ret = 1; } -out: - return ctrl_deleted; + /* Queue controller deletion */ + queue_work(nvme_rdma_wq, &ctrl->delete_work); + flush_work(&ctrl->delete_work); + return ret; } static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, -- cgit v0.10.2 From 2461a8dd38bea3cb5b1c1f0323794483292fb03f Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Sun, 24 Jul 2016 09:29:51 +0300 Subject: nvme-rdma: Remove duplicate call to nvme_remove_namespaces nvme_uninit_ctrl already does that for us. Note that we reordered nvme_rdma_shutdown_ctrl and nvme_uninit_ctrl, this is perfectly fine because we actually want ctrl uninit (aen, scan cancellation and namespaces removal) to happen before we shutdown the rdma resources. Also, centralize the deletion work and the dead controller removal work code duplication into __nvme_rdma_shutdown_ctrl that accepts a shutdown boolean. Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 3ffec37..1279bc2 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1660,15 +1660,20 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl) nvme_rdma_destroy_admin_queue(ctrl); } +static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) +{ + nvme_uninit_ctrl(&ctrl->ctrl); + if (shutdown) + nvme_rdma_shutdown_ctrl(ctrl); + nvme_put_ctrl(&ctrl->ctrl); +} + static void nvme_rdma_del_ctrl_work(struct work_struct *work) { struct nvme_rdma_ctrl *ctrl = container_of(work, struct nvme_rdma_ctrl, delete_work); - nvme_remove_namespaces(&ctrl->ctrl); - nvme_rdma_shutdown_ctrl(ctrl); - nvme_uninit_ctrl(&ctrl->ctrl); - nvme_put_ctrl(&ctrl->ctrl); + __nvme_rdma_remove_ctrl(ctrl, true); } static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl) @@ -1701,9 +1706,7 @@ static void nvme_rdma_remove_ctrl_work(struct work_struct *work) struct nvme_rdma_ctrl *ctrl = container_of(work, struct nvme_rdma_ctrl, delete_work); - nvme_remove_namespaces(&ctrl->ctrl); - nvme_uninit_ctrl(&ctrl->ctrl); - nvme_put_ctrl(&ctrl->ctrl); + __nvme_rdma_remove_ctrl(ctrl, false); } static void nvme_rdma_reset_ctrl_work(struct work_struct *work) -- cgit v0.10.2 From a34ca17a9717fe607cd58285a1704cb6526cf561 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Sun, 24 Jul 2016 09:22:19 +0300 Subject: nvme-rdma: Free the I/O tags when we delete the controller If we wait until we free the controller (free_ctrl) we might lose our rdma device without any notification while we still have open resources (tags mrs and dma mappings). Instead, destroy the tags with their rdma resources once we delete the device and not when freeing it. Note that we don't do that in nvme_rdma_shutdown_ctrl because controller reset uses it as well and we want to give active I/O a chance to complete successfully. Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 1279bc2..6378dc9 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -686,11 +686,6 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) list_del(&ctrl->list); mutex_unlock(&nvme_rdma_ctrl_mutex); - if (ctrl->ctrl.tagset) { - blk_cleanup_queue(ctrl->ctrl.connect_q); - blk_mq_free_tag_set(&ctrl->tag_set); - nvme_rdma_dev_put(ctrl->device); - } kfree(ctrl->queues); nvmf_free_options(nctrl->opts); free_ctrl: @@ -1665,6 +1660,13 @@ static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) nvme_uninit_ctrl(&ctrl->ctrl); if (shutdown) nvme_rdma_shutdown_ctrl(ctrl); + + if (ctrl->ctrl.tagset) { + blk_cleanup_queue(ctrl->ctrl.connect_q); + blk_mq_free_tag_set(&ctrl->tag_set); + nvme_rdma_dev_put(ctrl->device); + } + nvme_put_ctrl(&ctrl->ctrl); } -- cgit v0.10.2 From a159c64d936eb0d1da29d8ad384183d8984899c9 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Sun, 24 Jul 2016 09:32:08 +0300 Subject: nvme-loop: Remove duplicate call to nvme_remove_namespaces nvme_uninit_ctrl already does that for us. Note that we reordered nvme_loop_shutdown_ctrl with nvme_uninit_ctrl but its safe because we want controller uninit to happen before we shutdown the transport resources. Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 94e7829..7affd40 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -414,9 +414,8 @@ static void nvme_loop_del_ctrl_work(struct work_struct *work) struct nvme_loop_ctrl *ctrl = container_of(work, struct nvme_loop_ctrl, delete_work); - nvme_remove_namespaces(&ctrl->ctrl); - nvme_loop_shutdown_ctrl(ctrl); nvme_uninit_ctrl(&ctrl->ctrl); + nvme_loop_shutdown_ctrl(ctrl); nvme_put_ctrl(&ctrl->ctrl); } @@ -501,7 +500,6 @@ out_free_queues: nvme_loop_destroy_admin_queue(ctrl); out_disable: dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); - nvme_remove_namespaces(&ctrl->ctrl); nvme_uninit_ctrl(&ctrl->ctrl); nvme_put_ctrl(&ctrl->ctrl); } -- cgit v0.10.2 From 45862ebcc4883b1b6bc0701cd15cb2b68b140c5d Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Sun, 24 Jul 2016 09:26:16 +0300 Subject: nvme-rdma: Make sure to shutdown the controller if we can Relying on ctrl state in nvme_rdma_shutdown_ctrl is wrong because it will never be NVME_CTRL_LIVE (delete_ctrl or reset_ctrl invoked it). Instead, check that the admin queue is connected. Note that it is safe because we can never see a copmeting thread trying to destroy the admin queue (reset or delete controller). Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 6378dc9..f4b83686 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1646,7 +1646,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl) nvme_rdma_free_io_queues(ctrl); } - if (ctrl->ctrl.state == NVME_CTRL_LIVE) + if (test_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[0].flags)) nvme_shutdown_ctrl(&ctrl->ctrl); blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); -- cgit v0.10.2 From 1f85e118c81d15aa9e002604dfb69c823d4aac16 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Wed, 3 Aug 2016 01:24:05 +0000 Subject: ASoC: simple-card-utils: add missing MODULE_xxx() simple-card-utils might be used as module, but MODULE_xxx() information was missed. This patch adds it. Otherwise, we will have below error, and can't use it. Specil thanks to Kevin. > insmod simple-card-utils.ko simple_card_utils: module license 'unspecified' taints kernel. Disabling lock debugging due to kernel taint simple_card_utils: Unknown symbol snd_soc_of_parse_daifmt (err 0) simple_card_utils: Unknown symbol snd_soc_of_parse_card_name (err 0) insmod: can't insert 'simple-card-utils.ko': \ unknown symbol in module, or unknown parameter Reported-by: Kevin Hilman Signed-off-by: Kuninori Morimoto Signed-off-by: Mark Brown diff --git a/sound/soc/generic/Makefile b/sound/soc/generic/Makefile index 45602ca..2d53c8d 100644 --- a/sound/soc/generic/Makefile +++ b/sound/soc/generic/Makefile @@ -1,5 +1,5 @@ -obj-$(CONFIG_SND_SIMPLE_CARD_UTILS) := simple-card-utils.o - +snd-soc-simple-card-utils-objs := simple-card-utils.o snd-soc-simple-card-objs := simple-card.o -obj-$(CONFIG_SND_SIMPLE_CARD) += snd-soc-simple-card.o +obj-$(CONFIG_SND_SIMPLE_CARD_UTILS) += snd-soc-simple-card-utils.o +obj-$(CONFIG_SND_SIMPLE_CARD) += snd-soc-simple-card.o diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c index d89a9a1..9599de6 100644 --- a/sound/soc/generic/simple-card-utils.c +++ b/sound/soc/generic/simple-card-utils.c @@ -7,6 +7,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#include #include #include @@ -95,3 +96,8 @@ int asoc_simple_card_parse_card_name(struct snd_soc_card *card, return 0; } EXPORT_SYMBOL_GPL(asoc_simple_card_parse_card_name); + +/* Module information */ +MODULE_AUTHOR("Kuninori Morimoto "); +MODULE_DESCRIPTION("ALSA SoC Simple Card Utils"); +MODULE_LICENSE("GPL v2"); -- cgit v0.10.2 From bce91f8a4247905b8c40a53f72c14db908cd0710 Mon Sep 17 00:00:00 2001 From: Jarno Rajahalme Date: Mon, 1 Aug 2016 19:36:07 -0700 Subject: openvswitch: Remove incorrect WARN_ONCE(). ovs_ct_find_existing() issues a warning if an existing conntrack entry classified as IP_CT_NEW is found, with the premise that this should not happen. However, a newly confirmed, non-expected conntrack entry remains IP_CT_NEW as long as no reply direction traffic is seen. This has resulted into somewhat confusing kernel log messages. This patch removes this check and warning. Fixes: 289f2253 ("openvswitch: Find existing conntrack entry after upcall.") Suggested-by: Joe Stringer Signed-off-by: Jarno Rajahalme Acked-by: Joe Stringer Signed-off-by: David S. Miller diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index c644c78..e054a74 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -433,7 +433,6 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone, struct nf_conntrack_l4proto *l4proto; struct nf_conntrack_tuple tuple; struct nf_conntrack_tuple_hash *h; - enum ip_conntrack_info ctinfo; struct nf_conn *ct; unsigned int dataoff; u8 protonum; @@ -458,13 +457,8 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone, ct = nf_ct_tuplehash_to_ctrack(h); - ctinfo = ovs_ct_get_info(h); - if (ctinfo == IP_CT_NEW) { - /* This should not happen. */ - WARN_ONCE(1, "ovs_ct_find_existing: new packet for %p\n", ct); - } skb->nfct = &ct->ct_general; - skb->nfctinfo = ctinfo; + skb->nfctinfo = ovs_ct_get_info(h); return ct; } -- cgit v0.10.2 From ea966cb6b9e11e87f7b146549aef8e13cad5c6ff Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 2 Aug 2016 12:23:29 +0200 Subject: net: xgene: fix maybe-uninitialized variable Building with -Wmaybe-uninitialized shows a potential use of an uninitialized variable: drivers/net/ethernet/apm/xgene/xgene_enet_hw.c: In function 'xgene_enet_phy_connect': drivers/net/ethernet/apm/xgene/xgene_enet_hw.c:802:23: warning: 'phy_dev' may be used uninitialized in this function [-Wmaybe-uninitialized] Although the compiler correctly identified this based on the function, the current code is still safe as long dev->of_node is non-NULL for the case of CONFIG_ACPI=n, which is currently the case. The warning is now disabled by default, but still appears when building with W=1, and other build test tools should be able to detect it as well. Adding an #else clause here makes the code more robust and makes it clear to the compiler that this cannot happen. Signed-off-by: Arnd Bergmann Fixes: 8089a96f601b ("drivers: net: xgene: Add backward compatibility") Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 37a0f46..18bb955 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -793,6 +793,8 @@ int xgene_enet_phy_connect(struct net_device *ndev) netdev_err(ndev, "Could not connect to PHY\n"); return -ENODEV; } +#else + return -ENODEV; #endif } -- cgit v0.10.2 From 1f415a74b0ca64b5bfacbb12d71ed2ec050a8cfb Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 2 Aug 2016 16:12:14 +0100 Subject: bpf: fix method of PTR_TO_PACKET reg id generation Using per-register incrementing ID can lead to find_good_pkt_pointers() confusing registers which have completely different values. Consider example: 0: (bf) r6 = r1 1: (61) r8 = *(u32 *)(r6 +76) 2: (61) r0 = *(u32 *)(r6 +80) 3: (bf) r7 = r8 4: (07) r8 += 32 5: (2d) if r8 > r0 goto pc+9 R0=pkt_end R1=ctx R6=ctx R7=pkt(id=0,off=0,r=32) R8=pkt(id=0,off=32,r=32) R10=fp 6: (bf) r8 = r7 7: (bf) r9 = r7 8: (71) r1 = *(u8 *)(r7 +0) 9: (0f) r8 += r1 10: (71) r1 = *(u8 *)(r7 +1) 11: (0f) r9 += r1 12: (07) r8 += 32 13: (2d) if r8 > r0 goto pc+1 R0=pkt_end R1=inv56 R6=ctx R7=pkt(id=0,off=0,r=32) R8=pkt(id=1,off=32,r=32) R9=pkt(id=1,off=0,r=32) R10=fp 14: (71) r1 = *(u8 *)(r9 +16) 15: (b7) r7 = 0 16: (bf) r0 = r7 17: (95) exit We need to get a UNKNOWN_VALUE with imm to force id generation so lines 0-5 make r7 a valid packet pointer. We then read two different bytes from the packet and add them to copies of the constructed packet pointer. r8 (line 9) and r9 (line 11) will get the same id of 1, independently. When either of them is validated (line 13) - find_good_pkt_pointers() will also mark the other as safe. This leads to access on line 14 being mistakenly considered safe. Fixes: 969bf05eb3ce ("bpf: direct packet access") Signed-off-by: Jakub Kicinski Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index f72f23b..7094c69 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -194,6 +194,7 @@ struct verifier_env { struct verifier_state_list **explored_states; /* search pruning optimization */ struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ u32 used_map_cnt; /* number of used maps */ + u32 id_gen; /* used to generate unique reg IDs */ bool allow_ptr_leaks; }; @@ -1301,7 +1302,7 @@ add_imm: /* dst_reg stays as pkt_ptr type and since some positive * integer value was added to the pointer, increment its 'id' */ - dst_reg->id++; + dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range and off to zero */ dst_reg->off = 0; -- cgit v0.10.2 From 087d7a8c917491e6e7feb707a858d624bf5b5f14 Mon Sep 17 00:00:00 2001 From: Satish Baddipadige Date: Wed, 3 Aug 2016 09:43:59 +0530 Subject: tg3: Fix for diasllow rx coalescing time to be 0 When the rx coalescing time is 0, interrupts are not generated from the controller and rx path hangs. To avoid this rx hang, updating the driver to not allow rx coalescing time to be 0. Signed-off-by: Satish Baddipadige Signed-off-by: Siva Reddy Kallam Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ff300f7..f3c6c91 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -14014,6 +14014,7 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) } if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || + (!ec->rx_coalesce_usecs) || (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || -- cgit v0.10.2 From 9ce6fd7a81e6f787756be2f4b85f4f7bb3658de3 Mon Sep 17 00:00:00 2001 From: Siva Reddy Kallam Date: Wed, 3 Aug 2016 09:44:00 +0530 Subject: tg3: Report the correct number of RSS queues through tg3_get_rxnfc This patch remove the wrong substraction from info->data in tg3_get_rxnfc function. Without this patch, the number of RSS queues reported is less by one. Reported-by: Michal Soltys Signed-off-by: Siva Reddy Kallam Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index f3c6c91..6592612 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -12552,10 +12552,6 @@ static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, info->data = TG3_RSS_MAX_NUM_QS; } - /* The first interrupt vector only - * handles link interrupts. - */ - info->data -= 1; return 0; default: -- cgit v0.10.2 From 2b10d3ecf2dac737653828889ff85f614318f01a Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Wed, 3 Aug 2016 04:02:02 -0400 Subject: qlcnic: fix data structure corruption in async mbx command handling This patch fixes a data structure corruption bug in the SRIOV VF mailbox handler code. While handling mailbox commands from the atomic context, driver is accessing and updating qlcnic_async_work_list_struct entry fields in the async work list. These fields could be concurrently accessed by the work function resulting in data corruption. This patch restructures async mbx command handling by using a separate async command list instead of using a list of work_struct structures. A single work_struct is used to schedule and handle the async commands with proper locking mechanism. Signed-off-by: Rajesh Borundia Signed-off-by: Sony Chacko Signed-off-by: Manish Chopra Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h index 017d8c2c..24061b9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h @@ -156,10 +156,8 @@ struct qlcnic_vf_info { spinlock_t vlan_list_lock; /* Lock for VLAN list */ }; -struct qlcnic_async_work_list { +struct qlcnic_async_cmd { struct list_head list; - struct work_struct work; - void *ptr; struct qlcnic_cmd_args *cmd; }; @@ -168,7 +166,10 @@ struct qlcnic_back_channel { struct workqueue_struct *bc_trans_wq; struct workqueue_struct *bc_async_wq; struct workqueue_struct *bc_flr_wq; - struct list_head async_list; + struct qlcnic_adapter *adapter; + struct list_head async_cmd_list; + struct work_struct vf_async_work; + spinlock_t queue_lock; /* async_cmd_list queue lock */ }; struct qlcnic_sriov { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 7327b72..d710705 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -29,6 +29,7 @@ #define QLC_83XX_VF_RESET_FAIL_THRESH 8 #define QLC_BC_CMD_MAX_RETRY_CNT 5 +static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work); static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *); static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); @@ -177,7 +178,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) } bc->bc_async_wq = wq; - INIT_LIST_HEAD(&bc->async_list); + INIT_LIST_HEAD(&bc->async_cmd_list); + INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd); + spin_lock_init(&bc->queue_lock); + bc->adapter = adapter; for (i = 0; i < num_vfs; i++) { vf = &sriov->vf_info[i]; @@ -1517,17 +1521,21 @@ static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac, void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) { - struct list_head *head = &bc->async_list; - struct qlcnic_async_work_list *entry; + struct list_head *head = &bc->async_cmd_list; + struct qlcnic_async_cmd *entry; flush_workqueue(bc->bc_async_wq); + cancel_work_sync(&bc->vf_async_work); + + spin_lock(&bc->queue_lock); while (!list_empty(head)) { - entry = list_entry(head->next, struct qlcnic_async_work_list, + entry = list_entry(head->next, struct qlcnic_async_cmd, list); - cancel_work_sync(&entry->work); list_del(&entry->list); + kfree(entry->cmd); kfree(entry); } + spin_unlock(&bc->queue_lock); } void qlcnic_sriov_vf_set_multi(struct net_device *netdev) @@ -1587,57 +1595,64 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev) static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work) { - struct qlcnic_async_work_list *entry; - struct qlcnic_adapter *adapter; + struct qlcnic_async_cmd *entry, *tmp; + struct qlcnic_back_channel *bc; struct qlcnic_cmd_args *cmd; + struct list_head *head; + LIST_HEAD(del_list); + + bc = container_of(work, struct qlcnic_back_channel, vf_async_work); + head = &bc->async_cmd_list; + + spin_lock(&bc->queue_lock); + list_splice_init(head, &del_list); + spin_unlock(&bc->queue_lock); + + list_for_each_entry_safe(entry, tmp, &del_list, list) { + list_del(&entry->list); + cmd = entry->cmd; + __qlcnic_sriov_issue_cmd(bc->adapter, cmd); + kfree(entry); + } + + if (!list_empty(head)) + queue_work(bc->bc_async_wq, &bc->vf_async_work); - entry = container_of(work, struct qlcnic_async_work_list, work); - adapter = entry->ptr; - cmd = entry->cmd; - __qlcnic_sriov_issue_cmd(adapter, cmd); return; } -static struct qlcnic_async_work_list * -qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc) +static struct qlcnic_async_cmd * +qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc, + struct qlcnic_cmd_args *cmd) { - struct list_head *node; - struct qlcnic_async_work_list *entry = NULL; - u8 empty = 0; + struct qlcnic_async_cmd *entry = NULL; - list_for_each(node, &bc->async_list) { - entry = list_entry(node, struct qlcnic_async_work_list, list); - if (!work_pending(&entry->work)) { - empty = 1; - break; - } - } + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) + return NULL; - if (!empty) { - entry = kzalloc(sizeof(struct qlcnic_async_work_list), - GFP_ATOMIC); - if (entry == NULL) - return NULL; - list_add_tail(&entry->list, &bc->async_list); - } + entry->cmd = cmd; + + spin_lock(&bc->queue_lock); + list_add_tail(&entry->list, &bc->async_cmd_list); + spin_unlock(&bc->queue_lock); return entry; } static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc, - work_func_t func, void *data, struct qlcnic_cmd_args *cmd) { - struct qlcnic_async_work_list *entry = NULL; + struct qlcnic_async_cmd *entry = NULL; - entry = qlcnic_sriov_get_free_node_async_work(bc); - if (!entry) + entry = qlcnic_sriov_alloc_async_cmd(bc, cmd); + if (!entry) { + qlcnic_free_mbx_args(cmd); + kfree(cmd); return; + } - entry->ptr = data; - entry->cmd = cmd; - INIT_WORK(&entry->work, func); - queue_work(bc->bc_async_wq, &entry->work); + queue_work(bc->bc_async_wq, &bc->vf_async_work); } static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, @@ -1649,8 +1664,8 @@ static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, if (adapter->need_fw_reset) return -EIO; - qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd, - adapter, cmd); + qlcnic_sriov_schedule_async_cmd(bc, cmd); + return 0; } -- cgit v0.10.2 From fc4ca987f7cc0cb7ea8cb8bb673447939a84bb07 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Wed, 3 Aug 2016 04:02:03 -0400 Subject: qlcnic: fix napi budget alteration Driver modifies the supplied NAPI budget in qlcnic_83xx_msix_tx_poll() function. Instead, it should use the budget as it is. Signed-off-by: Manish Chopra Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 87c642d..fedd736 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -102,7 +102,6 @@ #define QLCNIC_RESPONSE_DESC 0x05 #define QLCNIC_LRO_DESC 0x12 -#define QLCNIC_TX_POLL_BUDGET 128 #define QLCNIC_TCP_HDR_SIZE 20 #define QLCNIC_TCP_TS_OPTION_SIZE 12 #define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63) @@ -2008,7 +2007,6 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget) struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_adapter *adapter; - budget = QLCNIC_TX_POLL_BUDGET; tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); adapter = tx_ring->adapter; work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget); -- cgit v0.10.2 From b8b2372de9cc00d5ed667c7b8db29b6cfbf037f5 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Wed, 3 Aug 2016 04:02:04 -0400 Subject: qlcnic: Update version to 5.3.65 Signed-off-by: Manish Chopra Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index fd973f4..49bad00 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -37,8 +37,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 3 -#define _QLCNIC_LINUX_SUBVERSION 64 -#define QLCNIC_LINUX_VERSIONID "5.3.64" +#define _QLCNIC_LINUX_SUBVERSION 65 +#define QLCNIC_LINUX_VERSIONID "5.3.65" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) -- cgit v0.10.2 From c45eb4fed12d278d3619f1904885bd0d7bcbf036 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 13 Jul 2016 18:34:45 +0100 Subject: drm/i915/fbdev: Check for the framebuffer before use If the fbdev probing fails, and in our error path we fail to clear the dev_priv->fbdev, then we can try and use a dangling fbdev pointer, and in particular a NULL fb. This could also happen in pathological cases where we try to operate on the fbdev prior to it being probed. Reported-by: Maarten Lankhorst Signed-off-by: Chris Wilson Cc: Maarten Lankhorst Cc: Daniel Vetter Cc: Mika Kuoppala Link: http://patchwork.freedesktop.org/patch/msgid/1468431285-28264-2-git-send-email-chris@chris-wilson.co.uk Reviewed-by: Daniel Vetter Reviewed-by: Mika Kuoppala (cherry picked from commit 6bc265424df02f8162f4a17a37e4982e1c64460e) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 86b00c6..3e3632c 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -782,7 +782,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous struct intel_fbdev *ifbdev = dev_priv->fbdev; struct fb_info *info; - if (!ifbdev) + if (!ifbdev || !ifbdev->fb) return; info = ifbdev->helper.fbdev; @@ -827,31 +827,28 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous void intel_fbdev_output_poll_changed(struct drm_device *dev) { - struct drm_i915_private *dev_priv = to_i915(dev); - if (dev_priv->fbdev) - drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); + struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; + + if (ifbdev && ifbdev->fb) + drm_fb_helper_hotplug_event(&ifbdev->helper); } void intel_fbdev_restore_mode(struct drm_device *dev) { - int ret; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_fbdev *ifbdev = dev_priv->fbdev; - struct drm_fb_helper *fb_helper; + struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; if (!ifbdev) return; intel_fbdev_sync(ifbdev); + if (!ifbdev->fb) + return; - fb_helper = &ifbdev->helper; - - ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); - if (ret) { + if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper)) { DRM_DEBUG("failed to restore crtc mode\n"); } else { - mutex_lock(&fb_helper->dev->struct_mutex); + mutex_lock(&dev->struct_mutex); intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT); - mutex_unlock(&fb_helper->dev->struct_mutex); + mutex_unlock(&dev->struct_mutex); } } -- cgit v0.10.2 From 1bc610e7a17dcf5165f1ed4e0201ee080ba1a0df Mon Sep 17 00:00:00 2001 From: Sylwester Nawrocki Date: Thu, 4 Aug 2016 11:51:25 +0200 Subject: ASoC: samsung: Fix clock handling in S3C24XX_UDA134X card There is no "pclk" alias in the s3c2440 clk driver for "soc-audio" device so related clk_get() fails, which prevents any operation of the S3C24XX_UDA134X sound card. Instead we get the clock on behalf of the I2S device, i.e. we use the I2S block gate clock which has PCLK is its parent clock. Without this patch there is an error like: s3c24xx_uda134x_startup cannot get pclk ASoC: UDA134X startup failed: -2 Signed-off-by: Sylwester Nawrocki Signed-off-by: Mark Brown diff --git a/sound/soc/samsung/s3c24xx_uda134x.c b/sound/soc/samsung/s3c24xx_uda134x.c index 50849e1..92e88bc 100644 --- a/sound/soc/samsung/s3c24xx_uda134x.c +++ b/sound/soc/samsung/s3c24xx_uda134x.c @@ -58,10 +58,12 @@ static struct platform_device *s3c24xx_uda134x_snd_device; static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream) { - int ret = 0; + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_dai *cpu_dai = rtd->cpu_dai; #ifdef ENFORCE_RATES struct snd_pcm_runtime *runtime = substream->runtime; #endif + int ret = 0; mutex_lock(&clk_lock); pr_debug("%s %d\n", __func__, clk_users); @@ -71,8 +73,7 @@ static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream) printk(KERN_ERR "%s cannot get xtal\n", __func__); ret = PTR_ERR(xtal); } else { - pclk = clk_get(&s3c24xx_uda134x_snd_device->dev, - "pclk"); + pclk = clk_get(cpu_dai->dev, "iis"); if (IS_ERR(pclk)) { printk(KERN_ERR "%s cannot get pclk\n", __func__); -- cgit v0.10.2 From d8f7750a08968b105056328652d2c332bdfa062d Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 19 May 2016 15:24:55 +0300 Subject: nvmet-rdma: Correctly handle RDMA device hot removal When configuring a device attached listener, we may see device removal events. In this case we return a non-zero return code from the cm event handler which implicitly destroys the cm_id. It is possible that in the future the user will remove this listener and by that trigger a second call to rdma_destroy_id on an already destroyed cm_id -> BUG. In addition, when a queue bound (active session) cm_id generates a DEVICE_REMOVAL event we must guarantee all resources are cleaned up by the time we return from the event handler. Introduce nvmet_rdma_device_removal which addresses (or at least attempts to) both scenarios. Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index e06d504..48c8118 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -77,6 +77,7 @@ enum nvmet_rdma_queue_state { NVMET_RDMA_Q_CONNECTING, NVMET_RDMA_Q_LIVE, NVMET_RDMA_Q_DISCONNECTING, + NVMET_RDMA_IN_DEVICE_REMOVAL, }; struct nvmet_rdma_queue { @@ -984,7 +985,10 @@ static void nvmet_rdma_release_queue_work(struct work_struct *w) struct nvmet_rdma_device *dev = queue->dev; nvmet_rdma_free_queue(queue); - rdma_destroy_id(cm_id); + + if (queue->state != NVMET_RDMA_IN_DEVICE_REMOVAL) + rdma_destroy_id(cm_id); + kref_put(&dev->ref, nvmet_rdma_free_dev); } @@ -1233,8 +1237,9 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) switch (queue->state) { case NVMET_RDMA_Q_CONNECTING: case NVMET_RDMA_Q_LIVE: - disconnect = true; queue->state = NVMET_RDMA_Q_DISCONNECTING; + case NVMET_RDMA_IN_DEVICE_REMOVAL: + disconnect = true; break; case NVMET_RDMA_Q_DISCONNECTING: break; @@ -1272,6 +1277,62 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, schedule_work(&queue->release_work); } +/** + * nvme_rdma_device_removal() - Handle RDMA device removal + * @queue: nvmet rdma queue (cm id qp_context) + * @addr: nvmet address (cm_id context) + * + * DEVICE_REMOVAL event notifies us that the RDMA device is about + * to unplug so we should take care of destroying our RDMA resources. + * This event will be generated for each allocated cm_id. + * + * Note that this event can be generated on a normal queue cm_id + * and/or a device bound listener cm_id (where in this case + * queue will be null). + * + * we claim ownership on destroying the cm_id. For queues we move + * the queue state to NVMET_RDMA_IN_DEVICE_REMOVAL and for port + * we nullify the priv to prevent double cm_id destruction and destroying + * the cm_id implicitely by returning a non-zero rc to the callout. + */ +static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, + struct nvmet_rdma_queue *queue) +{ + unsigned long flags; + + if (!queue) { + struct nvmet_port *port = cm_id->context; + + /* + * This is a listener cm_id. Make sure that + * future remove_port won't invoke a double + * cm_id destroy. use atomic xchg to make sure + * we don't compete with remove_port. + */ + if (xchg(&port->priv, NULL) != cm_id) + return 0; + } else { + /* + * This is a queue cm_id. Make sure that + * release queue will not destroy the cm_id + * and schedule all ctrl queues removal (only + * if the queue is not disconnecting already). + */ + spin_lock_irqsave(&queue->state_lock, flags); + if (queue->state != NVMET_RDMA_Q_DISCONNECTING) + queue->state = NVMET_RDMA_IN_DEVICE_REMOVAL; + spin_unlock_irqrestore(&queue->state_lock, flags); + nvmet_rdma_queue_disconnect(queue); + flush_scheduled_work(); + } + + /* + * We need to return 1 so that the core will destroy + * it's own ID. What a great API design.. + */ + return 1; +} + static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { @@ -1294,20 +1355,11 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, break; case RDMA_CM_EVENT_ADDR_CHANGE: case RDMA_CM_EVENT_DISCONNECTED: - case RDMA_CM_EVENT_DEVICE_REMOVAL: case RDMA_CM_EVENT_TIMEWAIT_EXIT: - /* - * We can get the device removal callback even for a - * CM ID that we aren't actually using. In that case - * the context pointer is NULL, so we shouldn't try - * to disconnect a non-existing queue. But we also - * need to return 1 so that the core will destroy - * it's own ID. What a great API design.. - */ - if (queue) - nvmet_rdma_queue_disconnect(queue); - else - ret = 1; + nvmet_rdma_queue_disconnect(queue); + break; + case RDMA_CM_EVENT_DEVICE_REMOVAL: + ret = nvmet_rdma_device_removal(cm_id, queue); break; case RDMA_CM_EVENT_REJECTED: case RDMA_CM_EVENT_UNREACHABLE: @@ -1396,9 +1448,10 @@ out_destroy_id: static void nvmet_rdma_remove_port(struct nvmet_port *port) { - struct rdma_cm_id *cm_id = port->priv; + struct rdma_cm_id *cm_id = xchg(&port->priv, NULL); - rdma_destroy_id(cm_id); + if (cm_id) + rdma_destroy_id(cm_id); } static struct nvmet_fabrics_ops nvmet_rdma_ops = { -- cgit v0.10.2 From 40e64e07213201710a51e270595d6e6c028f9502 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 28 Jul 2016 18:04:09 +0300 Subject: nvmet-rdma: Don't use the inline buffer in order to avoid allocation for small reads Under extreme conditions this might cause data corruptions. By doing that we we repost the buffer and then post this buffer for the device to send. If we happen to use shared receive queues the device might write to the buffer before it sends it (there is no ordering between send and recv queues). Without SRQs we probably won't get that if the host doesn't mis-behave and send more than we allowed it, but relying on that is not really a good idea. Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 48c8118..b4d6485 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -616,15 +616,10 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, if (!len) return 0; - /* use the already allocated data buffer if possible */ - if (len <= NVMET_RDMA_INLINE_DATA_SIZE && rsp->queue->host_qid) { - nvmet_rdma_use_inline_sg(rsp, len, 0); - } else { - status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt, - len); - if (status) - return status; - } + status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt, + len); + if (status) + return status; ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, -- cgit v0.10.2 From 28b89118539da03f4b188763e1b2fd1aec0f580a Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 4 Aug 2016 11:18:49 +0300 Subject: nvmet: Fix controller serial number inconsistency The host is allowed to issue identify as many times as it wants, we need to stay consistent when reporting the serial number for a given controller. Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 2fac17a..47c564b 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -13,7 +13,6 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include -#include #include #include "nvmet.h" @@ -83,7 +82,6 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvme_id_ctrl *id; - u64 serial; u16 status = 0; id = kzalloc(sizeof(*id), GFP_KERNEL); @@ -96,10 +94,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) id->vid = 0; id->ssvid = 0; - /* generate a random serial number as our controllers are ephemeral: */ - get_random_bytes(&serial, sizeof(serial)); memset(id->sn, ' ', sizeof(id->sn)); - snprintf(id->sn, sizeof(id->sn), "%llx", serial); + snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial); memset(id->mn, ' ', sizeof(id->mn)); strncpy((char *)id->mn, "Linux", sizeof(id->mn)); diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 8a891ca..6559d5a 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -13,6 +13,7 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include "nvmet.h" static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; @@ -728,6 +729,9 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); + /* generate a random serial number as our controllers are ephemeral: */ + get_random_bytes(&ctrl->serial, sizeof(ctrl->serial)); + kref_init(&ctrl->ref); ctrl->subsys = subsys; diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 57dd6d8..76b6eed 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -113,6 +113,7 @@ struct nvmet_ctrl { struct mutex lock; u64 cap; + u64 serial; u32 cc; u32 csts; -- cgit v0.10.2 From 3ef1b4b298d98ccb3cc895baf1b18f7f9d073bee Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 4 Aug 2016 13:46:19 +0300 Subject: nvme-rdma: start async event handler after reconnecting to a controller When we reset or reconnect to a controller, we are cancelling the async event handler so we can safely re-establish resources, but we need to remember to start it again when we successfully reconnect. Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index f4b83686..e82434a 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -745,6 +745,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) if (ctrl->queue_count > 1) { nvme_start_queues(&ctrl->ctrl); nvme_queue_scan(&ctrl->ctrl); + nvme_queue_async_events(&ctrl->ctrl); } dev_info(ctrl->ctrl.device, "Successfully reconnected\n"); @@ -1747,6 +1748,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work) if (ctrl->queue_count > 1) { nvme_start_queues(&ctrl->ctrl); nvme_queue_scan(&ctrl->ctrl); + nvme_queue_async_events(&ctrl->ctrl); } return; -- cgit v0.10.2 From e3266378bdbca82c2854fc612fa9a391eba1f173 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 4 Aug 2016 17:37:40 +0300 Subject: nvme-rdma: Remove unused includes Signed-off-by: Sagi Grimberg Reviewed-by: Steve Wise diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index e82434a..8d2875b 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -12,13 +12,11 @@ * more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include #include #include #include #include #include -#include #include #include #include @@ -26,7 +24,6 @@ #include #include #include -#include #include #include -- cgit v0.10.2 From ca6ac305f017472a172e53345264abdb495eba46 Mon Sep 17 00:00:00 2001 From: John Hsu Date: Thu, 4 Aug 2016 16:52:06 +0800 Subject: ASoC: nau8825: fix bug in playback when suspend In chromium, the following steps will make codec function fail. \1. plug in headphones, Play music \2. run "powerd_dbus_suspend" \3. resume from S3 After resume, the jack detection will restart and make configuration for the headset. Meanwhile, the playback prepares and starts to work. The two sequences will conflict and make wrong register configuration. Originally, the driver adds protection for the case when it finds the playback is active. But the "powerd_dbus_suspend" command will close the pcm stream before suspend. Therefore, the driver can't detect the playback after resume, and the protection not works. For the issue, the driver raises protection every time after resume. The protection will release after jack detection and configuration completes, and then the playback just will goes on. Signed-off-by: John Hsu Signed-off-by: Mark Brown diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c index 5c9707a..20d2f15 100644 --- a/sound/soc/codecs/nau8825.c +++ b/sound/soc/codecs/nau8825.c @@ -212,31 +212,6 @@ static const unsigned short logtable[256] = { 0xfa2f, 0xfaea, 0xfba5, 0xfc60, 0xfd1a, 0xfdd4, 0xfe8e, 0xff47 }; -static struct snd_soc_dai *nau8825_get_codec_dai(struct nau8825 *nau8825) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(nau8825->dapm); - struct snd_soc_component *component = &codec->component; - struct snd_soc_dai *codec_dai, *_dai; - - list_for_each_entry_safe(codec_dai, _dai, &component->dai_list, list) { - if (!strncmp(codec_dai->name, NUVOTON_CODEC_DAI, - strlen(NUVOTON_CODEC_DAI))) - return codec_dai; - } - return NULL; -} - -static bool nau8825_dai_is_active(struct nau8825 *nau8825) -{ - struct snd_soc_dai *codec_dai = nau8825_get_codec_dai(nau8825); - - if (codec_dai) { - if (codec_dai->playback_active || codec_dai->capture_active) - return true; - } - return false; -} - /** * nau8825_sema_acquire - acquire the semaphore of nau88l25 * @nau8825: component to register the codec private data with @@ -1205,6 +1180,8 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream, struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec); unsigned int val_len = 0; + nau8825_sema_acquire(nau8825, 2 * HZ); + switch (params_width(params)) { case 16: val_len |= NAU8825_I2S_DL_16; @@ -1225,6 +1202,9 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream, regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL1, NAU8825_I2S_DL_MASK, val_len); + /* Release the semaphone. */ + nau8825_sema_release(nau8825); + return 0; } @@ -1234,6 +1214,8 @@ static int nau8825_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec); unsigned int ctrl1_val = 0, ctrl2_val = 0; + nau8825_sema_acquire(nau8825, 2 * HZ); + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: ctrl2_val |= NAU8825_I2S_MS_MASTER; @@ -1282,6 +1264,9 @@ static int nau8825_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2, NAU8825_I2S_MS_MASK, ctrl2_val); + /* Release the semaphone. */ + nau8825_sema_release(nau8825); + return 0; } @@ -2241,20 +2226,8 @@ static int __maybe_unused nau8825_resume(struct snd_soc_codec *codec) regcache_cache_only(nau8825->regmap, false); regcache_sync(nau8825->regmap); - if (nau8825_is_jack_inserted(nau8825->regmap)) { - /* If the jack is inserted, we need to check whether the play- - * back is active before suspend. If active, the driver has to - * raise the protection for cross talk function to avoid the - * playback recovers before cross talk process finish. Other- - * wise, the playback will be interfered by cross talk func- - * tion. It is better to apply hardware related parameters - * before starting playback or record. - */ - if (nau8825_dai_is_active(nau8825)) { - nau8825->xtalk_protect = true; - nau8825_sema_acquire(nau8825, 0); - } - } + nau8825->xtalk_protect = true; + nau8825_sema_acquire(nau8825, 0); enable_irq(nau8825->irq); return 0; -- cgit v0.10.2 From 06746c672cdd60715ee57ab1aced95a9e536fd4d Mon Sep 17 00:00:00 2001 From: John Hsu Date: Thu, 4 Aug 2016 16:52:07 +0800 Subject: ASoC: nau8825: fix static check error about semaphone control The patch is to fix the static check error as the following. The patch commit b50455fab459 ("ASoC: nau8825: cross talk suppression measurement function") from Jun 7, 2016, leads to the following static checker warning: sound/soc/codecs/nau8825.c:265 nau8825_sema_acquire() warn: 'sem:&nau8825->xtalk_sem' is sometimes locked here and sometimes unlocked. The semaphone acquire function has return value, and some callers can do error handling when lock fails. Signed-off-by: John Hsu Signed-off-by: Mark Brown diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c index 20d2f15..2e59a85 100644 --- a/sound/soc/codecs/nau8825.c +++ b/sound/soc/codecs/nau8825.c @@ -225,19 +225,26 @@ static const unsigned short logtable[256] = { * Acquires the semaphore without jiffies. If no more tasks are allowed * to acquire the semaphore, calling this function will put the task to * sleep until the semaphore is released. - * It returns if the semaphore was acquired. + * If the semaphore is not released within the specified number of jiffies, + * this function returns -ETIME. + * If the sleep is interrupted by a signal, this function will return -EINTR. + * It returns 0 if the semaphore was acquired successfully. */ -static void nau8825_sema_acquire(struct nau8825 *nau8825, long timeout) +static int nau8825_sema_acquire(struct nau8825 *nau8825, long timeout) { int ret; - if (timeout) + if (timeout) { ret = down_timeout(&nau8825->xtalk_sem, timeout); - else + if (ret < 0) + dev_warn(nau8825->dev, "Acquire semaphone timeout\n"); + } else { ret = down_interruptible(&nau8825->xtalk_sem); + if (ret < 0) + dev_warn(nau8825->dev, "Acquire semaphone fail\n"); + } - if (ret < 0) - dev_warn(nau8825->dev, "Acquire semaphone fail\n"); + return ret; } /** @@ -1596,8 +1603,11 @@ static irqreturn_t nau8825_interrupt(int irq, void *data) * cess and restore changes if process * is ongoing when ejection. */ + int ret; nau8825->xtalk_protect = true; - nau8825_sema_acquire(nau8825, 0); + ret = nau8825_sema_acquire(nau8825, 0); + if (ret < 0) + nau8825->xtalk_protect = false; } /* Startup cross talk detection process */ nau8825->xtalk_state = NAU8825_XTALK_PREPARE; @@ -2223,11 +2233,14 @@ static int __maybe_unused nau8825_suspend(struct snd_soc_codec *codec) static int __maybe_unused nau8825_resume(struct snd_soc_codec *codec) { struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec); + int ret; regcache_cache_only(nau8825->regmap, false); regcache_sync(nau8825->regmap); nau8825->xtalk_protect = true; - nau8825_sema_acquire(nau8825, 0); + ret = nau8825_sema_acquire(nau8825, 0); + if (ret < 0) + nau8825->xtalk_protect = false; enable_irq(nau8825->irq); return 0; -- cgit v0.10.2 From 5d764912a0ee6db83e962c1501b5b9e58ba14e15 Mon Sep 17 00:00:00 2001 From: Adam Thomson Date: Thu, 4 Aug 2016 15:35:37 +0100 Subject: ASoC: da7213: Default to 64 BCLKs per WCLK to support all formats Previously code defaulted to 32 BCLKS per WCLK which meant 24 and 32 bit DAI formats would not work properly. This patch fixes the issue by defaulting to 64 BCLKs per WCLK. Signed-off-by: Adam Thomson Signed-off-by: Mark Brown diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c index e5527bc..bcf1834 100644 --- a/sound/soc/codecs/da7213.c +++ b/sound/soc/codecs/da7213.c @@ -1247,8 +1247,8 @@ static int da7213_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) return -EINVAL; } - /* By default only 32 BCLK per WCLK is supported */ - dai_clk_mode |= DA7213_DAI_BCLKS_PER_WCLK_32; + /* By default only 64 BCLK per WCLK is supported */ + dai_clk_mode |= DA7213_DAI_BCLKS_PER_WCLK_64; snd_soc_write(codec, DA7213_DAI_CLK_MODE, dai_clk_mode); snd_soc_update_bits(codec, DA7213_DAI_CTRL, DA7213_DAI_FORMAT_MASK, -- cgit v0.10.2 From ea0a95d7f162bfa1c9df74471f0064f71cdf80ea Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 25 Jul 2016 07:00:33 +0000 Subject: fcoe: Use kfree_skb() instead of kfree() Use kfree_skb() instead of kfree() to free sk_buff. Signed-off-by: Wei Yongjun Acked-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index a569c65..dcf3653 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -2923,7 +2923,7 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) mutex_unlock(&fip->ctlr_mutex); drop: - kfree(skb); + kfree_skb(skb); return rc; } -- cgit v0.10.2 From 6b07d9ca9b5363dda959b9582a3fc9c0b89ef3b5 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 2 Aug 2016 11:13:41 +0200 Subject: mac80211: fix purging multicast PS buffer queue The code currently assumes that buffered multicast PS frames don't have a pending ACK frame for tx status reporting. However, hostapd sends a broadcast deauth frame on teardown for which tx status is requested. This can lead to the "Have pending ack frames" warning on module reload. Fix this by using ieee80211_free_txskb/ieee80211_purge_tx_queue. Cc: stable@vger.kernel.org Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 47e99ab8..543b1d4 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -869,7 +869,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) /* free all potentially still buffered bcast frames */ local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf); - skb_queue_purge(&sdata->u.ap.ps.bc_buf); + ieee80211_purge_tx_queue(&local->hw, &sdata->u.ap.ps.bc_buf); mutex_lock(&local->mtx); ieee80211_vif_copy_chanctx_to_vlans(sdata, true); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 91461c4..5023966 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -368,7 +368,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local) skb = skb_dequeue(&ps->bc_buf); if (skb) { purged++; - dev_kfree_skb(skb); + ieee80211_free_txskb(&local->hw, skb); } total += skb_queue_len(&ps->bc_buf); } @@ -451,7 +451,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) { ps_dbg(tx->sdata, "BC TX buffer full - dropping the oldest frame\n"); - dev_kfree_skb(skb_dequeue(&ps->bc_buf)); + ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf)); } else tx->local->total_ps_buffered++; @@ -4275,7 +4275,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb)) break; - dev_kfree_skb_any(skb); + ieee80211_free_txskb(hw, skb); } info = IEEE80211_SKB_CB(skb); -- cgit v0.10.2 From 71f2c3470fca51be27f6fc5975675f7e1c3b91e5 Mon Sep 17 00:00:00 2001 From: Masashi Honma Date: Tue, 2 Aug 2016 17:16:57 +0900 Subject: mac80211: End the MPSP even if EOSP frame was not acked If QoS frame with EOSP (end of service period) subfield=1 sent by local peer was not acked by remote peer, local peer did not end the MPSP. This prevents local peer from going to DOZE state. And if the remote peer goes away without closing connection, local peer continues AWAKE state and wastes battery. Signed-off-by: Masashi Honma Acked-by: Bob Copeland Signed-off-by: Johannes Berg diff --git a/net/mac80211/status.c b/net/mac80211/status.c index c6d5c72..a2a6826 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -771,6 +771,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) clear_sta_flag(sta, WLAN_STA_SP); acked = !!(info->flags & IEEE80211_TX_STAT_ACK); + + /* mesh Peer Service Period support */ + if (ieee80211_vif_is_mesh(&sta->sdata->vif) && + ieee80211_is_data_qos(fc)) + ieee80211_mpsp_trigger_process( + ieee80211_get_qos_ctl(hdr), sta, true, acked); + if (!acked && test_sta_flag(sta, WLAN_STA_PS_STA)) { /* * The STA is in power save mode, so assume @@ -781,13 +788,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) return; } - /* mesh Peer Service Period support */ - if (ieee80211_vif_is_mesh(&sta->sdata->vif) && - ieee80211_is_data_qos(fc)) - ieee80211_mpsp_trigger_process( - ieee80211_get_qos_ctl(hdr), - sta, true, acked); - if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) && (ieee80211_is_data(hdr->frame_control)) && (rates_idx != -1)) -- cgit v0.10.2 From 9757235f451c27deaa88925399f070ff6fcea832 Mon Sep 17 00:00:00 2001 From: Masashi Honma Date: Wed, 3 Aug 2016 10:07:44 +0900 Subject: nl80211: correct checks for NL80211_MESHCONF_HT_OPMODE value Previously, NL80211_MESHCONF_HT_OPMODE validation rejected correct flag combinations, e.g. IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED | IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT. Doing just a range-check allows setting flags that don't exist (0x8) and invalid flag combinations. Implements some checks based on IEEE 802.11 2012 8.4.2.59 "HT Operation element". Signed-off-by: Masashi Honma [reword commit message, simplify a bit] Signed-off-by: Johannes Berg diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 46417f9..f02653a 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -5380,6 +5380,7 @@ static int nl80211_parse_mesh_config(struct genl_info *info, { struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1]; u32 mask = 0; + u16 ht_opmode; #define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, min, max, mask, attr, fn) \ do { \ @@ -5471,9 +5472,36 @@ do { \ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, -255, 0, mask, NL80211_MESHCONF_RSSI_THRESHOLD, nl80211_check_s32); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode, 0, 16, - mask, NL80211_MESHCONF_HT_OPMODE, - nl80211_check_u16); + /* + * Check HT operation mode based on + * IEEE 802.11 2012 8.4.2.59 HT Operation element. + */ + if (tb[NL80211_MESHCONF_HT_OPMODE]) { + ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]); + + if (ht_opmode & ~(IEEE80211_HT_OP_MODE_PROTECTION | + IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT | + IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) + return -EINVAL; + + if ((ht_opmode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) && + (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) + return -EINVAL; + + switch (ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION) { + case IEEE80211_HT_OP_MODE_PROTECTION_NONE: + case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: + if (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT) + return -EINVAL; + break; + case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: + case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: + if (!(ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) + return -EINVAL; + break; + } + cfg->ht_opmode = ht_opmode; + } FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout, 1, 65535, mask, NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, -- cgit v0.10.2 From 2439ca0402091badb24415e1b073ba12b34ba423 Mon Sep 17 00:00:00 2001 From: Maxim Altshul Date: Thu, 4 Aug 2016 15:43:04 +0300 Subject: mac80211: Add ieee80211_hw pointer to get_expected_throughput The variable is added to allow the driver an easy access to it's own hw->priv when the op is invoked. This fixes a crash in wlcore because it was relying on a station pointer that wasn't initialized yet. It's the wrong way to fix the crash, but it solves the problem for now and it does make sense to have the hw pointer here. Signed-off-by: Maxim Altshul [rewrite commit message, fix indentation] Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 1d68916..9e1f2d9 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -5700,10 +5700,11 @@ out: mutex_unlock(&wl->mutex); } -static u32 wlcore_op_get_expected_throughput(struct ieee80211_sta *sta) +static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw, + struct ieee80211_sta *sta) { struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv; - struct wl1271 *wl = wl_sta->wl; + struct wl1271 *wl = hw->priv; u8 hlid = wl_sta->hlid; /* return in units of Kbps */ diff --git a/include/net/mac80211.h b/include/net/mac80211.h index b4faadb..cca510a 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -3620,7 +3620,8 @@ struct ieee80211_ops { int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); - u32 (*get_expected_throughput)(struct ieee80211_sta *sta); + u32 (*get_expected_throughput)(struct ieee80211_hw *hw, + struct ieee80211_sta *sta); int (*get_txpower)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm); diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 184473c..ba5fc1f 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -1094,7 +1094,7 @@ static inline u32 drv_get_expected_throughput(struct ieee80211_local *local, trace_drv_get_expected_throughput(sta); if (local->ops->get_expected_throughput) - ret = local->ops->get_expected_throughput(sta); + ret = local->ops->get_expected_throughput(&local->hw, sta); trace_drv_return_u32(local, ret); return ret; -- cgit v0.10.2 From 9130b8dbc6ac20f2dc5846e1647f5b60eafab6e3 Mon Sep 17 00:00:00 2001 From: Olga Kornievskaia Date: Wed, 3 Aug 2016 20:19:48 -0400 Subject: SUNRPC: allow for upcalls for same uid but different gss service It's possible to have simultaneous upcalls for the same UIDs but different GSS service. In that case, we need to allow for the upcall to gssd to proceed so that not the same context is used by two different GSS services. Some servers lock the use of context to the GSS service. Signed-off-by: Olga Kornievskaia Cc: stable@vger.kernel.org # v3.9+ Signed-off-by: Trond Myklebust diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 23c8e7c..976c781 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -340,12 +340,14 @@ gss_release_msg(struct gss_upcall_msg *gss_msg) } static struct gss_upcall_msg * -__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid) +__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth) { struct gss_upcall_msg *pos; list_for_each_entry(pos, &pipe->in_downcall, list) { if (!uid_eq(pos->uid, uid)) continue; + if (auth && pos->auth->service != auth->service) + continue; atomic_inc(&pos->count); dprintk("RPC: %s found msg %p\n", __func__, pos); return pos; @@ -365,7 +367,7 @@ gss_add_msg(struct gss_upcall_msg *gss_msg) struct gss_upcall_msg *old; spin_lock(&pipe->lock); - old = __gss_find_upcall(pipe, gss_msg->uid); + old = __gss_find_upcall(pipe, gss_msg->uid, gss_msg->auth); if (old == NULL) { atomic_inc(&gss_msg->count); list_add(&gss_msg->list, &pipe->in_downcall); @@ -714,7 +716,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) err = -ENOENT; /* Find a matching upcall */ spin_lock(&pipe->lock); - gss_msg = __gss_find_upcall(pipe, uid); + gss_msg = __gss_find_upcall(pipe, uid, NULL); if (gss_msg == NULL) { spin_unlock(&pipe->lock); goto err_put_ctx; -- cgit v0.10.2 From d88e4d82efc7aca47f1e31808717c9718e466d93 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 4 Aug 2016 16:24:28 +1000 Subject: SUNRPC: disable the use of IPv6 temporary addresses. If the net.ipv6.conf.*.use_temp_addr sysctl is set to '2', then TCP connections over IPv6 will prefer a 'private' source address. These eventually expire and become invalid, typically after a week, but the time is configurable. When the local address becomes invalid the client will not be able to receive replies from the server. Eventually the connection will timeout or break and a new connection will be established, but this can take half an hour (typically TCP connection break time). RFC 4941, which describes private IPv6 addresses, acknowledges that some applications might not work well with them and that the application may explicitly a request non-temporary (i.e. "public") address. I believe this is correct for SUNRPC clients. Without this change, a client will occasionally experience a long delay if private addresses have been enabled. The privacy offered by private addresses is of little value for an NFS server which requires client authentication. For NFSv3 this will often not be a problem because idle connections are closed after 5 minutes. For NFSv4 connections never go idle due to the period RENEW (or equivalent) request. Signed-off-by: NeilBrown Signed-off-by: Trond Myklebust diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index c6b1d48..bf79212 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2236,6 +2236,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) unsigned int keepcnt = xprt->timeout->to_retries + 1; unsigned int opt_on = 1; unsigned int timeo; + unsigned int addr_pref = IPV6_PREFER_SRC_PUBLIC; /* TCP Keepalive options */ kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, @@ -2247,6 +2248,16 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, (char *)&keepcnt, sizeof(keepcnt)); + /* Avoid temporary address, they are bad for long-lived + * connections such as NFS mounts. + * RFC4941, section 3.6 suggests that: + * Individual applications, which have specific + * knowledge about the normal duration of connections, + * MAY override this as appropriate. + */ + kernel_setsockopt(sock, SOL_IPV6, IPV6_ADDR_PREFERENCES, + (char *)&addr_pref, sizeof(addr_pref)); + /* TCP user timeout (see RFC5482) */ timeo = jiffies_to_msecs(xprt->timeout->to_initval) * (xprt->timeout->to_retries + 1); -- cgit v0.10.2 From 206b3bb57481758b5fe98e90f47abb0f3fc0954e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 5 Aug 2016 12:16:19 -0400 Subject: NFSv4.2: LAYOUTSTATS may return NFS4ERR_ADMIN/DELEG_REVOKED We should handle those errors in the same way we handle the other stateid errors: by invalidating the faulty layout stateid. Signed-off-by: Trond Myklebust diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 33da841..6f47527 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -338,6 +338,8 @@ nfs42_layoutstat_done(struct rpc_task *task, void *calldata) case 0: break; case -NFS4ERR_EXPIRED: + case -NFS4ERR_ADMIN_REVOKED: + case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_OLD_STATEID: case -NFS4ERR_BAD_STATEID: -- cgit v0.10.2 From 02910177aede34d6f49e2dc14b1c5c6cd468d94f Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 4 Aug 2016 00:00:33 -0400 Subject: SUNRPC: Fix reconnection timeouts When the connect attempt fails and backs off, we should start the clock at the last connection attempt, not time at which we queue up the reconnect job. Signed-off-by: Trond Myklebust diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index bf79212..04b0c41 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2173,6 +2173,8 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) write_unlock_bh(&sk->sk_callback_lock); } xs_udp_do_set_buffer_size(xprt); + + xprt->stat.connect_start = jiffies; } static void xs_udp_setup_socket(struct work_struct *work) @@ -2384,6 +2386,16 @@ out: xprt_wake_pending_tasks(xprt, status); } +static unsigned long xs_reconnect_delay(const struct rpc_xprt *xprt) +{ + unsigned long start, now = jiffies; + + start = xprt->stat.connect_start + xprt->reestablish_timeout; + if (time_after(start, now)) + return start - now; + return 0; +} + /** * xs_connect - connect a socket to a remote endpoint * @xprt: pointer to transport structure @@ -2401,6 +2413,7 @@ out: static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) { struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); + unsigned long delay = 0; WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport)); @@ -2412,19 +2425,19 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) /* Start by resetting any existing state */ xs_reset_transport(transport); - queue_delayed_work(xprtiod_workqueue, - &transport->connect_worker, - xprt->reestablish_timeout); + delay = xs_reconnect_delay(xprt); + xprt->reestablish_timeout <<= 1; if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; - } else { + } else dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); - queue_delayed_work(xprtiod_workqueue, - &transport->connect_worker, 0); - } + + queue_delayed_work(xprtiod_workqueue, + &transport->connect_worker, + delay); } /** -- cgit v0.10.2 From 34b58355ad1d9987267f071265a7de6c8e00662a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Fri, 5 Aug 2016 18:36:10 +0900 Subject: drm/ttm: Wait for a BO to become idle before unbinding it from GTT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes hangs under memory pressure, e.g. running the piglit test tex3d-maxsize concurrently with other tests. Fixes: 17d33bc9d6ef ("drm/ttm: drop waiting for idle in ttm_bo_evict.") Reviewed-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index b7742e6..9b61c8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -335,7 +335,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, if (unlikely(r)) { goto out_cleanup; } - r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); + r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem); out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; @@ -368,7 +368,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, if (unlikely(r)) { return r; } - r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); + r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 528bdef..6190035 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1151,7 +1151,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, if (ret) goto out; - ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); + ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, new_mem); out: ttm_bo_mem_put(bo, &tmp_mem); return ret; @@ -1179,7 +1179,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, if (ret) return ret; - ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); + ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, &tmp_mem); if (ret) goto out; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index ffdad81..0c00e19 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -346,7 +346,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, if (unlikely(r)) { goto out_cleanup; } - r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); + r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem); out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; @@ -379,7 +379,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, if (unlikely(r)) { return r; } - r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); + r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 4054d80..42c074a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -354,7 +354,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) - ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); + ret = ttm_bo_move_ttm(bo, evict, interruptible, no_wait_gpu, + mem); else if (bdev->driver->move) ret = bdev->driver->move(bo, evict, interruptible, no_wait_gpu, mem); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 2df602a..f157a9e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -45,7 +45,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo) } int ttm_bo_move_ttm(struct ttm_buffer_object *bo, - bool evict, + bool evict, bool interruptible, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct ttm_tt *ttm = bo->ttm; @@ -53,6 +53,14 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, int ret; if (old_mem->mem_type != TTM_PL_SYSTEM) { + ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); + + if (unlikely(ret != 0)) { + if (ret != -ERESTARTSYS) + pr_err("Failed to expire sync object before unbinding TTM\n"); + return ret; + } + ttm_tt_unbind(ttm); ttm_bo_free_old_node(bo); ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 4348d6d..99c6d01 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -962,6 +962,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev, * * @bo: A pointer to a struct ttm_buffer_object. * @evict: 1: This is an eviction. Don't try to pipeline. + * @interruptible: Sleep interruptible if waiting. * @no_wait_gpu: Return immediately if the GPU is busy. * @new_mem: struct ttm_mem_reg indicating where to move. * @@ -976,7 +977,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev, */ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, - bool evict, bool no_wait_gpu, + bool evict, bool interruptible, bool no_wait_gpu, struct ttm_mem_reg *new_mem); /** -- cgit v0.10.2 From 3851f1cdb2b8d507b10395fc110d4c37d6121285 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 4 Aug 2016 00:08:45 -0400 Subject: SUNRPC: Limit the reconnect backoff timer to the max RPC message timeout ...and ensure that we propagate it to new transports on the same client. Signed-off-by: Trond Myklebust diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 5e3e1b6..a16070d 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -218,7 +218,8 @@ struct rpc_xprt { struct work_struct task_cleanup; struct timer_list timer; unsigned long last_used, - idle_timeout; + idle_timeout, + max_reconnect_timeout; /* * Send stuff diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index cb49898..faac547 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -2638,6 +2638,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt, { struct rpc_xprt_switch *xps; struct rpc_xprt *xprt; + unsigned long reconnect_timeout; unsigned char resvport; int ret = 0; @@ -2649,6 +2650,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt, return -EAGAIN; } resvport = xprt->resvport; + reconnect_timeout = xprt->max_reconnect_timeout; rcu_read_unlock(); xprt = xprt_create_transport(xprtargs); @@ -2657,6 +2659,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt, goto out_put_switch; } xprt->resvport = resvport; + xprt->max_reconnect_timeout = reconnect_timeout; rpc_xprt_switch_set_roundrobin(xps); if (setup) { diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 04b0c41..8ede3bc 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -177,7 +177,6 @@ static struct ctl_table sunrpc_table[] = { * increase over time if the server is down or not responding. */ #define XS_TCP_INIT_REEST_TO (3U * HZ) -#define XS_TCP_MAX_REEST_TO (5U * 60 * HZ) /* * TCP idle timeout; client drops the transport socket if it is idle @@ -2396,6 +2395,15 @@ static unsigned long xs_reconnect_delay(const struct rpc_xprt *xprt) return 0; } +static void xs_reconnect_backoff(struct rpc_xprt *xprt) +{ + xprt->reestablish_timeout <<= 1; + if (xprt->reestablish_timeout > xprt->max_reconnect_timeout) + xprt->reestablish_timeout = xprt->max_reconnect_timeout; + if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) + xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; +} + /** * xs_connect - connect a socket to a remote endpoint * @xprt: pointer to transport structure @@ -2426,12 +2434,8 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) xs_reset_transport(transport); delay = xs_reconnect_delay(xprt); + xs_reconnect_backoff(xprt); - xprt->reestablish_timeout <<= 1; - if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) - xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; - if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) - xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; } else dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); @@ -2989,6 +2993,8 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) xprt->ops = &xs_tcp_ops; xprt->timeout = &xs_tcp_default_timeout; + xprt->max_reconnect_timeout = xprt->timeout->to_maxval; + INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn); INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket); -- cgit v0.10.2 From fb10fb67ad2ce43d5e5b8ad22d2ba826844acc56 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 5 Aug 2016 19:13:08 -0400 Subject: NFSv4: Cleanup the setting of the nfs4 lease period Make a helper function nfs4_set_lease_period() and have nfs41_setup_state_renewal() and nfs4_do_fsinfo() use it. Signed-off-by: Trond Myklebust diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 4be567a..74a0e34 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -395,6 +395,10 @@ extern void nfs4_schedule_state_renewal(struct nfs_client *); extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); extern void nfs4_kill_renewd(struct nfs_client *); extern void nfs4_renew_state(struct work_struct *); +extern void nfs4_set_lease_period(struct nfs_client *clp, + unsigned long lease, + unsigned long lastrenewed); + /* nfs4state.c */ struct rpc_cred *nfs4_get_clid_cred(struct nfs_client *clp); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index da5c9e5..b9e1867 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -4237,12 +4237,9 @@ static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, str err = _nfs4_do_fsinfo(server, fhandle, fsinfo); trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); if (err == 0) { - struct nfs_client *clp = server->nfs_client; - - spin_lock(&clp->cl_lock); - clp->cl_lease_time = fsinfo->lease_time * HZ; - clp->cl_last_renewal = now; - spin_unlock(&clp->cl_lock); + nfs4_set_lease_period(server->nfs_client, + fsinfo->lease_time * HZ, + now); break; } err = nfs4_handle_exception(server, err, &exception); diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index e1ba58c..062029a 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c @@ -136,6 +136,23 @@ nfs4_kill_renewd(struct nfs_client *clp) cancel_delayed_work_sync(&clp->cl_renewd); } +/** + * nfs4_set_lease_period - Sets the lease period on a nfs_client + * + * @clp: pointer to nfs_client + * @lease: new value for lease period + * @lastrenewed: time at which lease was last renewed + */ +void nfs4_set_lease_period(struct nfs_client *clp, + unsigned long lease, + unsigned long lastrenewed) +{ + spin_lock(&clp->cl_lock); + clp->cl_lease_time = lease; + clp->cl_last_renewal = lastrenewed; + spin_unlock(&clp->cl_lock); +} + /* * Local variables: * c-basic-offset: 8 diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 834b875..cada00a 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -277,20 +277,17 @@ static int nfs41_setup_state_renewal(struct nfs_client *clp) { int status; struct nfs_fsinfo fsinfo; + unsigned long now; if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) { nfs4_schedule_state_renewal(clp); return 0; } + now = jiffies; status = nfs4_proc_get_lease_time(clp, &fsinfo); if (status == 0) { - /* Update lease time and schedule renewal */ - spin_lock(&clp->cl_lock); - clp->cl_lease_time = fsinfo.lease_time * HZ; - clp->cl_last_renewal = jiffies; - spin_unlock(&clp->cl_lock); - + nfs4_set_lease_period(clp, fsinfo.lease_time * HZ, now); nfs4_schedule_state_renewal(clp); } -- cgit v0.10.2 From 8d480326c3d6921ff5f1cc988c993bd572248deb Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 5 Aug 2016 19:03:31 -0400 Subject: NFSv4: Cap the transport reconnection timer at 1/2 lease period We don't want to miss a lease period renewal due to the TCP connection failing to reconnect in a timely fashion. To ensure this doesn't happen, cap the reconnection timer so that we retry the connection attempt at least every 1/2 lease period. Signed-off-by: Trond Myklebust diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index 062029a..82e7719 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c @@ -151,6 +151,9 @@ void nfs4_set_lease_period(struct nfs_client *clp, clp->cl_lease_time = lease; clp->cl_last_renewal = lastrenewed; spin_unlock(&clp->cl_lock); + + /* Cap maximum reconnect timeout at 1/2 lease period */ + rpc_cap_max_reconnect_timeout(clp->cl_rpcclient, lease >> 1); } /* diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index b6810c9..5c02b06 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -195,6 +195,8 @@ int rpc_clnt_add_xprt(struct rpc_clnt *, struct xprt_create *, struct rpc_xprt *, void *), void *data); +void rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt, + unsigned long timeo); const char *rpc_proc_name(const struct rpc_task *task); #endif /* __KERNEL__ */ diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index faac547..7f79fb7 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -2676,6 +2676,27 @@ out_put_switch: } EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt); +static int +rpc_xprt_cap_max_reconnect_timeout(struct rpc_clnt *clnt, + struct rpc_xprt *xprt, + void *data) +{ + unsigned long timeout = *((unsigned long *)data); + + if (timeout < xprt->max_reconnect_timeout) + xprt->max_reconnect_timeout = timeout; + return 0; +} + +void +rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt, unsigned long timeo) +{ + rpc_clnt_iterate_for_each_xprt(clnt, + rpc_xprt_cap_max_reconnect_timeout, + &timeo); +} +EXPORT_SYMBOL_GPL(rpc_cap_max_reconnect_timeout); + #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) static void rpc_show_header(void) { -- cgit v0.10.2 From 5ef9f289c4e698054e5687edb54f0da3cdc9173a Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 3 Aug 2016 15:44:57 +1000 Subject: OVS: Ignore negative headroom value net_device->ndo_set_rx_headroom (introduced in 871b642adebe300be2e50aa5f65a418510f636ec) says "Setting a negtaive value reset the rx headroom to the default value". It seems that the OVS implementation in 3a927bc7cf9d0fbe8f4a8189dd5f8440228f64e7 overlooked this and sets dev->needed_headroom unconditionally. This doesn't have an immediate effect, but can mess up later LL_RESERVED_SPACE calculations, such as done in net/ipv6/mcast.c:mld_newpack. For reference, this issue was found from a skb_panic raised there after the length calculations had given the wrong result. Note the other current users of this interface (drivers/net/tun.c:tun_set_headroom and drivers/net/veth.c:veth_set_rx_headroom) are both checking this correctly thus need no modification. Thanks to Ben for some pointers from the crash dumps! Cc: Benjamin Poirier Cc: Paolo Abeni Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1361414 Signed-off-by: Ian Wienand Signed-off-by: David S. Miller diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index 434e04c..95c3614 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c @@ -140,7 +140,7 @@ internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) static void internal_set_rx_headroom(struct net_device *dev, int new_hr) { - dev->needed_headroom = new_hr; + dev->needed_headroom = new_hr < 0 ? 0 : new_hr; } static const struct net_device_ops internal_dev_netdev_ops = { -- cgit v0.10.2 From 54447f1ad73414ebb052e9a33d079cabed3a03e8 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 3 Aug 2016 10:58:35 +0000 Subject: net: arc_emac: add missing of_node_put() in arc_emac_probe() commit a94efbd7cc45 ("ethernet: arc: emac_main: add missing of_node_put after calling of_parse_phandle") added missing of_node_put after calling of_parse_phandle, but missing the devm_ioremap_resource() error handling case. Signed-off-by: Wei Yongjun Reviewed-by: Peter Chen Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 4bff0f3..b0da969 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -771,8 +771,10 @@ int arc_emac_probe(struct net_device *ndev, int interface) priv->dev = dev; priv->regs = devm_ioremap_resource(dev, &res_regs); - if (IS_ERR(priv->regs)) - return PTR_ERR(priv->regs); + if (IS_ERR(priv->regs)) { + err = PTR_ERR(priv->regs); + goto out_put_node; + } dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); -- cgit v0.10.2 From 372ee16386bbf6dc5eeb0387e1ede963debba82a Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 3 Aug 2016 14:11:40 +0100 Subject: rxrpc: Fix races between skb free, ACK generation and replying Inside the kafs filesystem it is possible to occasionally have a call processed and terminated before we've had a chance to check whether we need to clean up the rx queue for that call because afs_send_simple_reply() ends the call when it is done, but this is done in a workqueue item that might happen to run to completion before afs_deliver_to_call() completes. Further, it is possible for rxrpc_kernel_send_data() to be called to send a reply before the last request-phase data skb is released. The rxrpc skb destructor is where the ACK processing is done and the call state is advanced upon release of the last skb. ACK generation is also deferred to a work item because it's possible that the skb destructor is not called in a context where kernel_sendmsg() can be invoked. To this end, the following changes are made: (1) kernel_rxrpc_data_consumed() is added. This should be called whenever an skb is emptied so as to crank the ACK and call states. This does not release the skb, however. kernel_rxrpc_free_skb() must now be called to achieve that. These together replace rxrpc_kernel_data_delivered(). (2) kernel_rxrpc_data_consumed() is wrapped by afs_data_consumed(). This makes afs_deliver_to_call() easier to work as the skb can simply be discarded unconditionally here without trying to work out what the return value of the ->deliver() function means. The ->deliver() functions can, via afs_data_complete(), afs_transfer_reply() and afs_extract_data() mark that an skb has been consumed (thereby cranking the state) without the need to conditionally free the skb to make sure the state is correct on an incoming call for when the call processor tries to send the reply. (3) rxrpc_recvmsg() now has to call kernel_rxrpc_data_consumed() when it has finished with a packet and MSG_PEEK isn't set. (4) rxrpc_packet_destructor() no longer calls rxrpc_hard_ACK_data(). Because of this, we no longer need to clear the destructor and put the call before we free the skb in cases where we don't want the ACK/call state to be cranked. (5) The ->deliver() call-type callbacks are made to return -EAGAIN rather than 0 if they expect more data (afs_extract_data() returns -EAGAIN to the delivery function already), and the caller is now responsible for producing an abort if that was the last packet. (6) There are many bits of unmarshalling code where: ret = afs_extract_data(call, skb, last, ...); switch (ret) { case 0: break; case -EAGAIN: return 0; default: return ret; } is to be found. As -EAGAIN can now be passed back to the caller, we now just return if ret < 0: ret = afs_extract_data(call, skb, last, ...); if (ret < 0) return ret; (7) Checks for trailing data and empty final data packets has been consolidated as afs_data_complete(). So: if (skb->len > 0) return -EBADMSG; if (!last) return 0; becomes: ret = afs_data_complete(call, skb, last); if (ret < 0) return ret; (8) afs_transfer_reply() now checks the amount of data it has against the amount of data desired and the amount of data in the skb and returns an error to induce an abort if we don't get exactly what we want. Without these changes, the following oops can occasionally be observed, particularly if some printks are inserted into the delivery path: general protection fault: 0000 [#1] SMP Modules linked in: kafs(E) af_rxrpc(E) [last unloaded: af_rxrpc] CPU: 0 PID: 1305 Comm: kworker/u8:3 Tainted: G E 4.7.0-fsdevel+ #1303 Hardware name: ASUS All Series/H97-PLUS, BIOS 2306 10/09/2014 Workqueue: kafsd afs_async_workfn [kafs] task: ffff88040be041c0 ti: ffff88040c070000 task.ti: ffff88040c070000 RIP: 0010:[] [] __lock_acquire+0xcf/0x15a1 RSP: 0018:ffff88040c073bc0 EFLAGS: 00010002 RAX: 6b6b6b6b6b6b6b6b RBX: 0000000000000000 RCX: ffff88040d29a710 RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff88040d29a710 RBP: ffff88040c073c70 R08: 0000000000000001 R09: 0000000000000001 R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000 R13: 0000000000000000 R14: ffff88040be041c0 R15: ffffffff814c928f FS: 0000000000000000(0000) GS:ffff88041fa00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fa4595f4750 CR3: 0000000001c14000 CR4: 00000000001406f0 Stack: 0000000000000006 000000000be04930 0000000000000000 ffff880400000000 ffff880400000000 ffffffff8108f847 ffff88040be041c0 ffffffff81050446 ffff8803fc08a920 ffff8803fc08a958 ffff88040be041c0 ffff88040c073c38 Call Trace: [] ? mark_held_locks+0x5e/0x74 [] ? __local_bh_enable_ip+0x9b/0xa1 [] ? trace_hardirqs_on_caller+0x16d/0x189 [] lock_acquire+0x122/0x1b6 [] ? lock_acquire+0x122/0x1b6 [] ? skb_dequeue+0x18/0x61 [] _raw_spin_lock_irqsave+0x35/0x49 [] ? skb_dequeue+0x18/0x61 [] skb_dequeue+0x18/0x61 [] afs_deliver_to_call+0x344/0x39d [kafs] [] afs_process_async_call+0x4c/0xd5 [kafs] [] afs_async_workfn+0xe/0x10 [kafs] [] process_one_work+0x29d/0x57c [] worker_thread+0x24a/0x385 [] ? rescuer_thread+0x2d0/0x2d0 [] kthread+0xf3/0xfb [] ret_from_fork+0x1f/0x40 [] ? kthread_create_on_node+0x1cf/0x1cf Signed-off-by: David Howells Signed-off-by: David S. Miller diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt index 16a924c..70c926a 100644 --- a/Documentation/networking/rxrpc.txt +++ b/Documentation/networking/rxrpc.txt @@ -790,13 +790,12 @@ The kernel interface functions are as follows: Data messages can have their contents extracted with the usual bunch of socket buffer manipulation functions. A data message can be determined to be the last one in a sequence with rxrpc_kernel_is_data_last(). When a - data message has been used up, rxrpc_kernel_data_delivered() should be - called on it.. + data message has been used up, rxrpc_kernel_data_consumed() should be + called on it. - Non-data messages should be handled to rxrpc_kernel_free_skb() to dispose - of. It is possible to get extra refs on all types of message for later - freeing, but this may pin the state of a call until the message is finally - freed. + Messages should be handled to rxrpc_kernel_free_skb() to dispose of. It + is possible to get extra refs on all types of message for later freeing, + but this may pin the state of a call until the message is finally freed. (*) Accept an incoming call. @@ -821,12 +820,14 @@ The kernel interface functions are as follows: Other errors may be returned if the call had been aborted (-ECONNABORTED) or had timed out (-ETIME). - (*) Record the delivery of a data message and free it. + (*) Record the delivery of a data message. - void rxrpc_kernel_data_delivered(struct sk_buff *skb); + void rxrpc_kernel_data_consumed(struct rxrpc_call *call, + struct sk_buff *skb); - This is used to record a data message as having been delivered and to - update the ACK state for the call. The socket buffer will be freed. + This is used to record a data message as having been consumed and to + update the ACK state for the call. The message must still be passed to + rxrpc_kernel_free_skb() for disposal by the caller. (*) Free a message. diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index 4b0eff6..85737e9 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -189,11 +189,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb, case 1: _debug("extract FID count"); ret = afs_extract_data(call, skb, last, &call->tmp, 4); - switch (ret) { - case 0: break; - case -EAGAIN: return 0; - default: return ret; - } + if (ret < 0) + return ret; call->count = ntohl(call->tmp); _debug("FID count: %u", call->count); @@ -210,11 +207,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb, _debug("extract FID array"); ret = afs_extract_data(call, skb, last, call->buffer, call->count * 3 * 4); - switch (ret) { - case 0: break; - case -EAGAIN: return 0; - default: return ret; - } + if (ret < 0) + return ret; _debug("unmarshall FID array"); call->request = kcalloc(call->count, @@ -239,11 +233,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb, case 3: _debug("extract CB count"); ret = afs_extract_data(call, skb, last, &call->tmp, 4); - switch (ret) { - case 0: break; - case -EAGAIN: return 0; - default: return ret; - } + if (ret < 0) + return ret; tmp = ntohl(call->tmp); _debug("CB count: %u", tmp); @@ -258,11 +249,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb, _debug("extract CB array"); ret = afs_extract_data(call, skb, last, call->request, call->count * 3 * 4); - switch (ret) { - case 0: break; - case -EAGAIN: return 0; - default: return ret; - } + if (ret < 0) + return ret; _debug("unmarshall CB array"); cb = call->request; @@ -278,9 +266,9 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb, call->unmarshall++; case 5: - _debug("trailer"); - if (skb->len != 0) - return -EBADMSG; + ret = afs_data_complete(call, skb, last); + if (ret < 0) + return ret; /* Record that the message was unmarshalled successfully so * that the call destructor can know do the callback breaking @@ -294,8 +282,6 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb, break; } - if (!last) - return 0; call->state = AFS_CALL_REPLYING; @@ -335,13 +321,13 @@ static int afs_deliver_cb_init_call_back_state(struct afs_call *call, { struct afs_server *server; struct in_addr addr; + int ret; _enter(",{%u},%d", skb->len, last); - if (skb->len > 0) - return -EBADMSG; - if (!last) - return 0; + ret = afs_data_complete(call, skb, last); + if (ret < 0) + return ret; /* no unmarshalling required */ call->state = AFS_CALL_REPLYING; @@ -371,8 +357,10 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call, _enter(",{%u},%d", skb->len, last); + /* There are some arguments that we ignore */ + afs_data_consumed(call, skb); if (!last) - return 0; + return -EAGAIN; /* no unmarshalling required */ call->state = AFS_CALL_REPLYING; @@ -408,12 +396,13 @@ static void SRXAFSCB_Probe(struct work_struct *work) static int afs_deliver_cb_probe(struct afs_call *call, struct sk_buff *skb, bool last) { + int ret; + _enter(",{%u},%d", skb->len, last); - if (skb->len > 0) - return -EBADMSG; - if (!last) - return 0; + ret = afs_data_complete(call, skb, last); + if (ret < 0) + return ret; /* no unmarshalling required */ call->state = AFS_CALL_REPLYING; @@ -460,10 +449,9 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call, struct sk_buff *skb, _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); - if (skb->len > 0) - return -EBADMSG; - if (!last) - return 0; + ret = afs_data_complete(call, skb, last); + if (ret < 0) + return ret; switch (call->unmarshall) { case 0: @@ -509,8 +497,9 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call, struct sk_buff *skb, break; } - if (!last) - return 0; + ret = afs_data_complete(call, skb, last); + if (ret < 0) + return ret; call->state = AFS_CALL_REPLYING; @@ -588,12 +577,13 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work) static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call, struct sk_buff *skb, bool last) { + int ret; + _enter(",{%u},%d", skb->len, last); - if (skb->len > 0) - return -EBADMSG; - if (!last) - return 0; + ret = afs_data_complete(call, skb, last); + if (ret < 0) + return ret; /* no unmarshalling required */ call->state = AFS_CALL_REPLYING; diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index c2e930e..9312b92 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -240,15 +240,13 @@ static int afs_deliver_fs_fetch_status(struct afs_call *call, { struct afs_vnode *vnode = call->reply; const __be32 *bp; + int ret; _enter(",,%u", last); - afs_transfer_reply(call, skb); - if (!last) - return 0; - - if (call->reply_size != call->reply_max) - return -EBADMSG; + ret = afs_transfer_reply(call, skb, last); + if (ret < 0) + return ret; /* unmarshall the reply once we've received all of it */ bp = call->buffer; @@ -335,11 +333,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call, case 1: _debug("extract data length (MSW)"); ret = afs_extract_data(call, skb, last, &call->tmp, 4); - switch (ret) { - case 0: break; - case -EAGAIN: return 0; - default: return ret; - } + if (ret < 0) + return ret; call->count = ntohl(call->tmp); _debug("DATA length MSW: %u", call->count); @@ -353,11 +348,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call, case 2: _debug("extract data length"); ret = afs_extract_data(call, skb, last, &call->tmp, 4); - switch (ret) { - case 0: break; - case -EAGAIN: return 0; - default: return ret; - } + if (ret < 0) + return ret; call->count = ntohl(call->tmp); _debug("DATA length: %u", call->count); @@ -375,11 +367,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call, ret = afs_extract_data(call, skb, last, buffer, call->count); kunmap_atomic(buffer); - switch (ret) { - case 0: break; - case -EAGAIN: return 0; - default: return ret; - } + if (ret < 0) + return ret; } call->offset = 0; @@ -389,11 +378,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call, case 4: ret = afs_extract_data(call, skb, last, call->buffer, (21 + 3 + 6) * 4); - switch (ret) { - case 0: break; - case -EAGAIN: return 0; - default: return ret; - } + if (ret < 0) + return ret; bp = call->buffer; xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL); @@ -405,15 +391,12 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call, call->unmarshall++; case 5: - _debug("trailer"); - if (skb->len != 0) - return -EBADMSG; + ret = afs_data_complete(call, skb, last); + if (ret < 0) + return ret; break; } - if (!last) - return 0; - if (call->count < PAGE_SIZE) { _debug("clear"); page = call->reply3; @@ -537,9 +520,8 @@ static int afs_deliver_fs_give_up_callbacks(struct afs_call *call, { _enter(",{%u},%d", skb->len, last); - if (skb->len > 0) - return -EBADMSG; /* shouldn't be any reply data */ - return 0; + /* shouldn't be any reply data */ + return afs_data_complete(call, skb, last); } /* @@ -622,15 +604,13 @@ static int afs_deliver_fs_create_vnode(struct afs_call *call, { struct afs_vnode *vnode = call->reply; const __be32 *bp; + int ret; _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); - afs_transfer_reply(call, skb); - if (!last) - return 0; - - if (call->reply_size != call->reply_max) - return -EBADMSG; + ret = afs_transfer_reply(call, skb, last); + if (ret < 0) + return ret; /* unmarshall the reply once we've received all of it */ bp = call->buffer; @@ -721,15 +701,13 @@ static int afs_deliver_fs_remove(struct afs_call *call, { struct afs_vnode *vnode = call->reply; const __be32 *bp; + int ret; _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); - afs_transfer_reply(call, skb); - if (!last) - return 0; - - if (call->reply_size != call->reply_max) - return -EBADMSG; + ret = afs_transfer_reply(call, skb, last); + if (ret < 0) + return ret; /* unmarshall the reply once we've received all of it */ bp = call->buffer; @@ -804,15 +782,13 @@ static int afs_deliver_fs_link(struct afs_call *call, { struct afs_vnode *dvnode = call->reply, *vnode = call->reply2; const __be32 *bp; + int ret; _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); - afs_transfer_reply(call, skb); - if (!last) - return 0; - - if (call->reply_size != call->reply_max) - return -EBADMSG; + ret = afs_transfer_reply(call, skb, last); + if (ret < 0) + return ret; /* unmarshall the reply once we've received all of it */ bp = call->buffer; @@ -892,15 +868,13 @@ static int afs_deliver_fs_symlink(struct afs_call *call, { struct afs_vnode *vnode = call->reply; const __be32 *bp; + int ret; _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); - afs_transfer_reply(call, skb); - if (!last) - return 0; - - if (call->reply_size != call->reply_max) - return -EBADMSG; + ret = afs_transfer_reply(call, skb, last); + if (ret < 0) + return ret; /* unmarshall the reply once we've received all of it */ bp = call->buffer; @@ -999,15 +973,13 @@ static int afs_deliver_fs_rename(struct afs_call *call, { struct afs_vnode *orig_dvnode = call->reply, *new_dvnode = call->reply2; const __be32 *bp; + int ret; _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); - afs_transfer_reply(call, skb); - if (!last) - return 0; - - if (call->reply_size != call->reply_max) - return -EBADMSG; + ret = afs_transfer_reply(call, skb, last); + if (ret < 0) + return ret; /* unmarshall the reply once we've received all of it */ bp = call->buffer; @@ -1105,20 +1077,13 @@ static int afs_deliver_fs_store_data(struct afs_call *call, { struct afs_vnode *vnode = call->reply; const __be32 *bp; + int ret; _enter(",,%u", last); - afs_transfer_reply(call, skb); - if (!last) { - _leave(" = 0 [more]"); - return 0; - } - - if (call->reply_size != call->reply_max) { - _leave(" = -EBADMSG [%u != %u]", - call->reply_size, call->reply_max); - return -EBADMSG; - } + ret = afs_transfer_reply(call, skb, last); + if (ret < 0) + return ret; /* unmarshall the reply once we've received all of it */ bp = call->buffer; @@ -1292,20 +1257,13 @@ static int afs_deliver_fs_store_status(struct afs_call *call, afs_dataversion_t *store_version; struct afs_vnode *vnode = call->reply; const __be32 *bp; + int ret; _enter(",,%u", last); - afs_transfer_reply(call, skb); - if (!last) { - _leave(" = 0 [more]"); - return 0; - } - - if (call->reply_size != call->reply_max) { - _leave(" = -EBADMSG [%u != %u]", - call->reply_size, call->reply_max); - return -EBADMSG; - } + ret = afs_transfer_reply(call, skb, last); + if (ret < 0) + return ret; /* unmarshall the reply once we've received all of it */ store_version = NULL; @@ -1504,11 +1462,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call, _debug("extract status"); ret = afs_extract_data(call, skb, last, call->buffer, 12 * 4); - switch (ret) { - case 0: break; - case -EAGAIN: return 0; - default: return ret; - } + if (ret < 0) + return ret; bp = call->buffer; xdr_decode_AFSFetchVolumeStatus(&bp, call->reply2); @@ -1518,11 +1473,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call, /* extract the volume name length */ case 2: ret = afs_extract_data(call, skb, last, &call->tmp, 4); - switch (ret) { - case 0: break; - case -EAGAIN: return 0; - default: return ret; - } + if (ret < 0) + return ret; call->count = ntohl(call->tmp); _debug("volname length: %u", call->count); @@ -1537,11 +1489,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call, if (call->count > 0) { ret = afs_extract_data(call, skb, last, call->reply3, call->count); - switch (ret) { - case 0: break; - case -EAGAIN: return 0; - default: return ret; - } + if (ret < 0) + return ret; } p = call->reply3; @@ -1561,11 +1510,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call, case 4: ret = afs_extract_data(call, skb, last, call->buffer, call->count); - switch (ret) { - case 0: break; - case -EAGAIN: return 0; - default: return ret; - } + if (ret < 0) + return ret; call->offset = 0; call->unmarshall++; @@ -1574,11 +1520,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call, /* extract the offline message length */ case 5: ret = afs_extract_data(call, skb, last, &call->tmp, 4); - switch (ret) { - case 0: break; - case -EAGAIN: return 0; - default: return ret; - } + if (ret < 0) + return ret; call->count = ntohl(call->tmp); _debug("offline msg length: %u", call->count); @@ -1593,11 +1536,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call, if (call->count > 0) { ret = afs_extract_data(call, skb, last, call->reply3, call->count); - switch (ret) { - case 0: break; - case -EAGAIN: return 0; - default: return ret; - } + if (ret < 0) + return ret; } p = call->reply3; @@ -1617,11 +1557,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call, case 7: ret = afs_extract_data(call, skb, last, call->buffer, call->count); - switch (ret) { - case 0: break; - case -EAGAIN: return 0; - default: return ret; - } + if (ret < 0) + return ret; call->offset = 0; call->unmarshall++; @@ -1630,11 +1567,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call, /* extract the message of the day length */ case 8: ret = afs_extract_data(call, skb, last, &call->tmp, 4); - switch (ret) { - case 0: break; - case -EAGAIN: return 0; - default: return ret; - } + if (ret < 0) + return ret; call->count = ntohl(call->tmp); _debug("motd length: %u", call->count); @@ -1649,11 +1583,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call, if (call->count > 0) { ret = afs_extract_data(call, skb, last, call->reply3, call->count); - switch (ret) { - case 0: break; - case -EAGAIN: return 0; - default: return ret; - } + if (ret < 0) + return ret; } p = call->reply3; @@ -1673,26 +1604,20 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call, case 10: ret = afs_extract_data(call, skb, last, call->buffer, call->count); - switch (ret) { - case 0: break; - case -EAGAIN: return 0; - default: return ret; - } + if (ret < 0) + return ret; call->offset = 0; call->unmarshall++; no_motd_padding: case 11: - _debug("trailer %d", skb->len); - if (skb->len != 0) - return -EBADMSG; + ret = afs_data_complete(call, skb, last); + if (ret < 0) + return ret; break; } - if (!last) - return 0; - _leave(" = 0 [done]"); return 0; } @@ -1764,15 +1689,13 @@ static int afs_deliver_fs_xxxx_lock(struct afs_call *call, struct sk_buff *skb, bool last) { const __be32 *bp; + int ret; _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); - afs_transfer_reply(call, skb); - if (!last) - return 0; - - if (call->reply_size != call->reply_max) - return -EBADMSG; + ret = afs_transfer_reply(call, skb, last); + if (ret < 0) + return ret; /* unmarshall the reply once we've received all of it */ bp = call->buffer; diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 71d5982..df976b2 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -609,17 +609,29 @@ extern void afs_proc_cell_remove(struct afs_cell *); */ extern int afs_open_socket(void); extern void afs_close_socket(void); +extern void afs_data_consumed(struct afs_call *, struct sk_buff *); extern int afs_make_call(struct in_addr *, struct afs_call *, gfp_t, const struct afs_wait_mode *); extern struct afs_call *afs_alloc_flat_call(const struct afs_call_type *, size_t, size_t); extern void afs_flat_call_destructor(struct afs_call *); -extern void afs_transfer_reply(struct afs_call *, struct sk_buff *); +extern int afs_transfer_reply(struct afs_call *, struct sk_buff *, bool); extern void afs_send_empty_reply(struct afs_call *); extern void afs_send_simple_reply(struct afs_call *, const void *, size_t); extern int afs_extract_data(struct afs_call *, struct sk_buff *, bool, void *, size_t); +static inline int afs_data_complete(struct afs_call *call, struct sk_buff *skb, + bool last) +{ + if (skb->len > 0) + return -EBADMSG; + afs_data_consumed(call, skb); + if (!last) + return -EAGAIN; + return 0; +} + /* * security.c */ diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 4832de84..14d04c8 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -150,10 +150,9 @@ void afs_close_socket(void) } /* - * note that the data in a socket buffer is now delivered and that the buffer - * should be freed + * Note that the data in a socket buffer is now consumed. */ -static void afs_data_delivered(struct sk_buff *skb) +void afs_data_consumed(struct afs_call *call, struct sk_buff *skb) { if (!skb) { _debug("DLVR NULL [%d]", atomic_read(&afs_outstanding_skbs)); @@ -161,9 +160,7 @@ static void afs_data_delivered(struct sk_buff *skb) } else { _debug("DLVR %p{%u} [%d]", skb, skb->mark, atomic_read(&afs_outstanding_skbs)); - if (atomic_dec_return(&afs_outstanding_skbs) == -1) - BUG(); - rxrpc_kernel_data_delivered(skb); + rxrpc_kernel_data_consumed(call->rxcall, skb); } } @@ -489,9 +486,15 @@ static void afs_deliver_to_call(struct afs_call *call) last = rxrpc_kernel_is_data_last(skb); ret = call->type->deliver(call, skb, last); switch (ret) { + case -EAGAIN: + if (last) { + _debug("short data"); + goto unmarshal_error; + } + break; case 0: - if (last && - call->state == AFS_CALL_AWAIT_REPLY) + ASSERT(last); + if (call->state == AFS_CALL_AWAIT_REPLY) call->state = AFS_CALL_COMPLETE; break; case -ENOTCONN: @@ -501,6 +504,7 @@ static void afs_deliver_to_call(struct afs_call *call) abort_code = RX_INVALID_OPERATION; goto do_abort; default: + unmarshal_error: abort_code = RXGEN_CC_UNMARSHAL; if (call->state != AFS_CALL_AWAIT_REPLY) abort_code = RXGEN_SS_UNMARSHAL; @@ -511,9 +515,7 @@ static void afs_deliver_to_call(struct afs_call *call) call->state = AFS_CALL_ERROR; break; } - afs_data_delivered(skb); - skb = NULL; - continue; + break; case RXRPC_SKB_MARK_FINAL_ACK: _debug("Rcv ACK"); call->state = AFS_CALL_COMPLETE; @@ -685,15 +687,35 @@ static void afs_process_async_call(struct afs_call *call) } /* - * empty a socket buffer into a flat reply buffer + * Empty a socket buffer into a flat reply buffer. */ -void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb) +int afs_transfer_reply(struct afs_call *call, struct sk_buff *skb, bool last) { size_t len = skb->len; - if (skb_copy_bits(skb, 0, call->buffer + call->reply_size, len) < 0) - BUG(); - call->reply_size += len; + if (len > call->reply_max - call->reply_size) { + _leave(" = -EBADMSG [%zu > %u]", + len, call->reply_max - call->reply_size); + return -EBADMSG; + } + + if (len > 0) { + if (skb_copy_bits(skb, 0, call->buffer + call->reply_size, + len) < 0) + BUG(); + call->reply_size += len; + } + + afs_data_consumed(call, skb); + if (!last) + return -EAGAIN; + + if (call->reply_size != call->reply_max) { + _leave(" = -EBADMSG [%u != %u]", + call->reply_size, call->reply_max); + return -EBADMSG; + } + return 0; } /* @@ -745,7 +767,8 @@ static void afs_collect_incoming_call(struct work_struct *work) } /* - * grab the operation ID from an incoming cache manager call + * Grab the operation ID from an incoming cache manager call. The socket + * buffer is discarded on error or if we don't yet have sufficient data. */ static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb, bool last) @@ -766,12 +789,9 @@ static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb, call->offset += len; if (call->offset < 4) { - if (last) { - _leave(" = -EBADMSG [op ID short]"); - return -EBADMSG; - } - _leave(" = 0 [incomplete]"); - return 0; + afs_data_consumed(call, skb); + _leave(" = -EAGAIN"); + return -EAGAIN; } call->state = AFS_CALL_AWAIT_REQUEST; @@ -855,7 +875,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len) } /* - * extract a piece of data from the received data socket buffers + * Extract a piece of data from the received data socket buffers. */ int afs_extract_data(struct afs_call *call, struct sk_buff *skb, bool last, void *buf, size_t count) @@ -873,10 +893,7 @@ int afs_extract_data(struct afs_call *call, struct sk_buff *skb, call->offset += len; if (call->offset < count) { - if (last) { - _leave(" = -EBADMSG [%d < %zu]", call->offset, count); - return -EBADMSG; - } + afs_data_consumed(call, skb); _leave(" = -EAGAIN"); return -EAGAIN; } diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c index 340afd0..f94d1ab 100644 --- a/fs/afs/vlclient.c +++ b/fs/afs/vlclient.c @@ -64,16 +64,13 @@ static int afs_deliver_vl_get_entry_by_xxx(struct afs_call *call, struct afs_cache_vlocation *entry; __be32 *bp; u32 tmp; - int loop; + int loop, ret; _enter(",,%u", last); - afs_transfer_reply(call, skb); - if (!last) - return 0; - - if (call->reply_size != call->reply_max) - return -EBADMSG; + ret = afs_transfer_reply(call, skb, last); + if (ret < 0) + return ret; /* unmarshall the reply once we've received all of it */ entry = call->reply; diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index ac1bc3c..7b0f886 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -40,12 +40,12 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *, unsigned long, gfp_t); int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, size_t); +void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *); void rxrpc_kernel_abort_call(struct rxrpc_call *, u32); void rxrpc_kernel_end_call(struct rxrpc_call *); bool rxrpc_kernel_is_data_last(struct sk_buff *); u32 rxrpc_kernel_get_abort_code(struct sk_buff *); int rxrpc_kernel_get_error_number(struct sk_buff *); -void rxrpc_kernel_data_delivered(struct sk_buff *); void rxrpc_kernel_free_skb(struct sk_buff *); struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, unsigned long); int rxrpc_kernel_reject_call(struct socket *); diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 1bb9e7a..ff83fb1 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -425,6 +425,7 @@ struct rxrpc_call { spinlock_t lock; rwlock_t state_lock; /* lock for state transition */ atomic_t usage; + atomic_t skb_count; /* Outstanding packets on this call */ atomic_t sequence; /* Tx data packet sequence counter */ u32 local_abort; /* local abort code */ u32 remote_abort; /* remote abort code */ diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 0b28321..9bae21e 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -130,6 +130,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local, call->state = RXRPC_CALL_SERVER_ACCEPTING; list_add_tail(&call->accept_link, &rx->acceptq); rxrpc_get_call(call); + atomic_inc(&call->skb_count); nsp = rxrpc_skb(notification); nsp->call = call; diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index fc32aa5..f5e9916 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -460,6 +460,7 @@ static void rxrpc_insert_oos_packet(struct rxrpc_call *call, ASSERTCMP(sp->call, ==, NULL); sp->call = call; rxrpc_get_call(call); + atomic_inc(&call->skb_count); /* insert into the buffer in sequence order */ spin_lock_bh(&call->lock); @@ -734,6 +735,7 @@ all_acked: skb->mark = RXRPC_SKB_MARK_FINAL_ACK; sp->call = call; rxrpc_get_call(call); + atomic_inc(&call->skb_count); spin_lock_bh(&call->lock); if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0) BUG(); @@ -793,6 +795,7 @@ static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error, sp->error = error; sp->call = call; rxrpc_get_call(call); + atomic_inc(&call->skb_count); spin_lock_bh(&call->lock); ret = rxrpc_queue_rcv_skb(call, skb, true, fatal); diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 91287c9..c47f14f 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -491,13 +491,6 @@ void rxrpc_release_call(struct rxrpc_call *call) spin_lock_bh(&call->lock); while ((skb = skb_dequeue(&call->rx_queue)) || (skb = skb_dequeue(&call->rx_oos_queue))) { - sp = rxrpc_skb(skb); - if (sp->call) { - ASSERTCMP(sp->call, ==, call); - rxrpc_put_call(call); - sp->call = NULL; - } - skb->destructor = NULL; spin_unlock_bh(&call->lock); _debug("- zap %s %%%u #%u", @@ -605,6 +598,7 @@ void __rxrpc_put_call(struct rxrpc_call *call) if (atomic_dec_and_test(&call->usage)) { _debug("call %d dead", call->debug_id); + WARN_ON(atomic_read(&call->skb_count) != 0); ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); rxrpc_queue_work(&call->destroyer); } diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 991a20d..9e0f58e 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -55,9 +55,6 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb, if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) { _debug("already terminated"); ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE); - skb->destructor = NULL; - sp->call = NULL; - rxrpc_put_call(call); rxrpc_free_skb(skb); return 0; } @@ -111,13 +108,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb, ret = 0; out: - /* release the socket buffer */ - if (skb) { - skb->destructor = NULL; - sp->call = NULL; - rxrpc_put_call(call); - rxrpc_free_skb(skb); - } + rxrpc_free_skb(skb); _leave(" = %d", ret); return ret; @@ -200,6 +191,7 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call, sp->call = call; rxrpc_get_call(call); + atomic_inc(&call->skb_count); terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) && !(sp->hdr.flags & RXRPC_CLIENT_INITIATED)); ret = rxrpc_queue_rcv_skb(call, skb, false, terminal); diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index a3fa2ed..9ed66d5 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -203,6 +203,9 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, } /* we transferred the whole data packet */ + if (!(flags & MSG_PEEK)) + rxrpc_kernel_data_consumed(call, skb); + if (sp->hdr.flags & RXRPC_LAST_PACKET) { _debug("last"); if (rxrpc_conn_is_client(call->conn)) { @@ -360,28 +363,6 @@ wait_error: } /** - * rxrpc_kernel_data_delivered - Record delivery of data message - * @skb: Message holding data - * - * Record the delivery of a data message. This permits RxRPC to keep its - * tracking correct. The socket buffer will be deleted. - */ -void rxrpc_kernel_data_delivered(struct sk_buff *skb) -{ - struct rxrpc_skb_priv *sp = rxrpc_skb(skb); - struct rxrpc_call *call = sp->call; - - ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv); - ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1); - call->rx_data_recv = sp->hdr.seq; - - ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten); - rxrpc_free_skb(skb); -} - -EXPORT_SYMBOL(rxrpc_kernel_data_delivered); - -/** * rxrpc_kernel_is_data_last - Determine if data message is last one * @skb: Message holding data * diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c index eee0cfd9..06c51d4 100644 --- a/net/rxrpc/skbuff.c +++ b/net/rxrpc/skbuff.c @@ -98,11 +98,39 @@ static void rxrpc_hard_ACK_data(struct rxrpc_call *call, spin_unlock_bh(&call->lock); } +/** + * rxrpc_kernel_data_consumed - Record consumption of data message + * @call: The call to which the message pertains. + * @skb: Message holding data + * + * Record the consumption of a data message and generate an ACK if appropriate. + * The call state is shifted if this was the final packet. The caller must be + * in process context with no spinlocks held. + * + * TODO: Actually generate the ACK here rather than punting this to the + * workqueue. + */ +void rxrpc_kernel_data_consumed(struct rxrpc_call *call, struct sk_buff *skb) +{ + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + + _enter("%d,%p{%u}", call->debug_id, skb, sp->hdr.seq); + + ASSERTCMP(sp->call, ==, call); + ASSERTCMP(sp->hdr.type, ==, RXRPC_PACKET_TYPE_DATA); + + /* TODO: Fix the sequence number tracking */ + ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv); + ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1); + ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten); + + call->rx_data_recv = sp->hdr.seq; + rxrpc_hard_ACK_data(call, sp); +} +EXPORT_SYMBOL(rxrpc_kernel_data_consumed); + /* - * destroy a packet that has an RxRPC control buffer - * - advance the hard-ACK state of the parent call (done here in case something - * in the kernel bypasses recvmsg() and steals the packet directly off of the - * socket receive queue) + * Destroy a packet that has an RxRPC control buffer */ void rxrpc_packet_destructor(struct sk_buff *skb) { @@ -112,9 +140,8 @@ void rxrpc_packet_destructor(struct sk_buff *skb) _enter("%p{%p}", skb, call); if (call) { - /* send the final ACK on a client call */ - if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) - rxrpc_hard_ACK_data(call, sp); + if (atomic_dec_return(&call->skb_count) < 0) + BUG(); rxrpc_put_call(call); sp->call = NULL; } -- cgit v0.10.2 From 94d9f1c5906b20053efe375b6d66610bca4b8b64 Mon Sep 17 00:00:00 2001 From: David Forster Date: Wed, 3 Aug 2016 15:13:01 +0100 Subject: ipv4: panic in leaf_walk_rcu due to stale node pointer Panic occurs when issuing "cat /proc/net/route" whilst populating FIB with > 1M routes. Use of cached node pointer in fib_route_get_idx is unsafe. BUG: unable to handle kernel paging request at ffffc90001630024 IP: [] leaf_walk_rcu+0x10/0xe0 PGD 11b08d067 PUD 11b08e067 PMD dac4b067 PTE 0 Oops: 0000 [#1] SMP Modules linked in: nfsd auth_rpcgss oid_registry nfs_acl nfs lockd grace fscac snd_hda_codec_generic snd_hda_intel snd_hda_codec snd_hda_core snd_hwdep virti acpi_cpufreq button parport_pc ppdev lp parport autofs4 ext4 crc16 mbcache jbd tio_ring virtio floppy uhci_hcd ehci_hcd usbcore usb_common libata scsi_mod CPU: 1 PID: 785 Comm: cat Not tainted 4.2.0-rc8+ #4 Hardware name: Bochs Bochs, BIOS Bochs 01/01/2007 task: ffff8800da1c0bc0 ti: ffff88011a05c000 task.ti: ffff88011a05c000 RIP: 0010:[] [] leaf_walk_rcu+0x10/0xe0 RSP: 0018:ffff88011a05fda0 EFLAGS: 00010202 RAX: ffff8800d8a40c00 RBX: ffff8800da4af940 RCX: ffff88011a05ff20 RDX: ffffc90001630020 RSI: 0000000001013531 RDI: ffff8800da4af950 RBP: 0000000000000000 R08: ffff8800da1f9a00 R09: 0000000000000000 R10: ffff8800db45b7e4 R11: 0000000000000246 R12: ffff8800da4af950 R13: ffff8800d97a74c0 R14: 0000000000000000 R15: ffff8800d97a7480 FS: 00007fd3970e0700(0000) GS:ffff88011fd00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: ffffc90001630024 CR3: 000000011a7e4000 CR4: 00000000000006e0 Stack: ffffffff814d00d3 0000000000000000 ffff88011a05ff20 ffff8800da1f9a00 ffffffff811dd8b9 0000000000000800 0000000000020000 00007fd396f35000 ffffffff811f8714 0000000000003431 ffffffff8138dce0 0000000000000f80 Call Trace: [] ? fib_route_seq_start+0x93/0xc0 [] ? seq_read+0x149/0x380 [] ? fsnotify+0x3b4/0x500 [] ? process_echoes+0x70/0x70 [] ? proc_reg_read+0x47/0x70 [] ? __vfs_read+0x23/0xd0 [] ? rw_verify_area+0x52/0xf0 [] ? vfs_read+0x81/0x120 [] ? SyS_read+0x42/0xa0 [] ? entry_SYSCALL_64_fastpath+0x16/0x75 Code: 48 85 c0 75 d8 f3 c3 31 c0 c3 f3 c3 66 66 66 66 66 66 2e 0f 1f 84 00 00 a 04 89 f0 33 02 44 89 c9 48 d3 e8 0f b6 4a 05 49 89 RIP [] leaf_walk_rcu+0x10/0xe0 RSP CR2: ffffc90001630024 Signed-off-by: Dave Forster Acked-by: Alexander Duyck Signed-off-by: David S. Miller diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index d07fc07..febca0f 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -2452,9 +2452,7 @@ struct fib_route_iter { static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos) { - struct fib_table *tb = iter->main_tb; struct key_vector *l, **tp = &iter->tnode; - struct trie *t; t_key key; /* use cache location of next-to-find key */ @@ -2462,8 +2460,6 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, pos -= iter->pos; key = iter->key; } else { - t = (struct trie *)tb->tb_data; - iter->tnode = t->kv; iter->pos = 0; key = 0; } @@ -2504,12 +2500,12 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos) return NULL; iter->main_tb = tb; + t = (struct trie *)tb->tb_data; + iter->tnode = t->kv; if (*pos != 0) return fib_route_get_idx(iter, *pos); - t = (struct trie *)tb->tb_data; - iter->tnode = t->kv; iter->pos = 0; iter->key = 0; -- cgit v0.10.2 From 5e3b724e2767fb6495df1dcccaf7c79585c78ae9 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 3 Aug 2016 21:42:18 +0200 Subject: net: dsa: b53: Add missing ULL suffix for 64-bit constant MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On 32-bit (e.g. with m68k-linux-gnu-gcc-4.1): drivers/net/dsa/b53/b53_common.c: In function ‘b53_arl_read’: drivers/net/dsa/b53/b53_common.c:1072: warning: integer constant is too large for ‘long’ type Fixes: 1da6df85c6fbed8f ("net: dsa: b53: Implement ARL add/del/dump operations") Signed-off-by: Geert Uytterhoeven Signed-off-by: David S. Miller diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h index 8f12bdd..a0b453e 100644 --- a/drivers/net/dsa/b53/b53_regs.h +++ b/drivers/net/dsa/b53/b53_regs.h @@ -258,7 +258,7 @@ * BCM5325 and BCM5365 share most definitions below */ #define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n)) -#define ARLTBL_MAC_MASK 0xffffffffffff +#define ARLTBL_MAC_MASK 0xffffffffffffULL #define ARLTBL_VID_S 48 #define ARLTBL_VID_MASK_25 0xff #define ARLTBL_VID_MASK 0xfff -- cgit v0.10.2 From 8e85946777d3f667085f298c0acfec12fe105078 Mon Sep 17 00:00:00 2001 From: Akshay Adiga Date: Thu, 4 Aug 2016 20:59:17 +0530 Subject: cpufreq: powernv: Fix crash in gpstate_timer_handler() Commit 09ca4c9b5958 (cpufreq: powernv: Replacing pstate_id with frequency table index) changes calc_global_pstate() to use cpufreq_table index instead of pstate_id. But in gpstate_timer_handler(), pstate_id was being passed instead of cpufreq_table index, which caused index_to_pstate() to access out of bound indices, leading to this crash. Adding sanity check for index and pstate, to ensure only valid pstate and index values are returned. Call Trace: [c00000078d66b130] [c00000000011d224] __free_irq+0x234/0x360 (unreliable) [c00000078d66b1c0] [c00000000011d44c] free_irq+0x6c/0xa0 [c00000078d66b1f0] [c00000000006c4f8] opal_event_shutdown+0x88/0xd0 [c00000078d66b230] [c000000000067a4c] opal_shutdown+0x1c/0x90 [c00000078d66b260] [c000000000063a00] pnv_shutdown+0x20/0x40 [c00000078d66b280] [c000000000021538] machine_restart+0x38/0x90 [c0000000078d66b310] [c000000000965ea0] panic+0x284/0x300 [c00000078d66b3a0] [c00000000001f508] die+0x388/0x450 [c00000078d66b430] [c000000000045a50] bad_page_fault+0xd0/0x140 [c00000078d66b4a0] [c000000000008964] handle_page_fault+0x2c/0x30 interrupt: 300 at gpstate_timer_handler+0x150/0x260 LR = gpstate_timer_handler+0x130/0x260 [c00000078d66b7f0] [c000000000132b58] call_timer_fn+0x58/0x1c0 [c00000078d66b880] [c000000000132e20] expire_timers+0x130/0x1d0 [c00000078d66b8f0] [c000000000133068] run_timer_softirq+0x1a8/0x230 [c00000078d66b980] [c0000000000b535c] __do_softirq+0x18c/0x400 [c00000078d66ba70] [c0000000000b5828] irq_exit+0xc8/0x100 [c00000078d66ba90] [c00000000001e214] timer_interrupt+0xa4/0xe0 [c00000078d66bac0] [c0000000000027d0] decrementer_common+0x150/0x180 interrupt: 901 at arch_local_irq_restore+0x74/0x90 0] [c000000000106b34] call_cpuidle+0x44/0x90 [c00000078d66be50] [c00000000010708c] cpu_startup_entry+0x38c/0x460 [c00000078d66bf20] [c00000000003d930] start_secondary+0x330/0x380 [c00000078d66bf90] [c000000000008e6c] start_secondary_prolog+0x10/0x14 Fixes: 09ca4c9b5958 (cpufreq: powernv: Replacing pstate_id with frequency table index) Reported-by: Madhavan Srinivasan Signed-off-by: Akshay Adiga Acked-by: Viresh Kumar Tested-by: Andrew Donnellan Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 0f138b0..2fcc879 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -145,11 +145,30 @@ static struct powernv_pstate_info { /* Use following macros for conversions between pstate_id and index */ static inline int idx_to_pstate(unsigned int i) { + if (unlikely(i >= powernv_pstate_info.nr_pstates)) { + pr_warn_once("index %u is out of bound\n", i); + return powernv_freqs[powernv_pstate_info.nominal].driver_data; + } + return powernv_freqs[i].driver_data; } static inline unsigned int pstate_to_idx(int pstate) { + int min = powernv_freqs[powernv_pstate_info.min].driver_data; + int max = powernv_freqs[powernv_pstate_info.max].driver_data; + + if (min > 0) { + if (unlikely((pstate < max) || (pstate > min))) { + pr_warn_once("pstate %d is out of bound\n", pstate); + return powernv_pstate_info.nominal; + } + } else { + if (unlikely((pstate > max) || (pstate < min))) { + pr_warn_once("pstate %d is out of bound\n", pstate); + return powernv_pstate_info.nominal; + } + } /* * abs() is deliberately used so that is works with * both monotonically increasing and decreasing @@ -594,7 +613,7 @@ void gpstate_timer_handler(unsigned long data) } else { gpstate_idx = calc_global_pstate(gpstates->elapsed_time, gpstates->highest_lpstate_idx, - freq_data.pstate_id); + gpstates->last_lpstate_idx); } /* -- cgit v0.10.2 From a6ed3ea65d9868fdf9eff84e6fe4f666b8d14b02 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Fri, 5 Aug 2016 14:01:27 -0700 Subject: bpf: restore behavior of bpf_map_update_elem The introduction of pre-allocated hash elements inadvertently broke the behavior of bpf hash maps where users expected to call bpf_map_update_elem() without considering that the map can be full. Some programs do: old_value = bpf_map_lookup_elem(map, key); if (old_value) { ... prepare new_value on stack ... bpf_map_update_elem(map, key, new_value); } Before pre-alloc the update() for existing element would work even in 'map full' condition. Restore this behavior. The above program could have updated old_value in place instead of update() which would be faster and most programs use that approach, but sometimes the values are large and the programs use update() helper to do atomic replacement of the element. Note we cannot simply update element's value in-place like percpu hash map does and have to allocate extra num_possible_cpu elements and use this extra reserve when the map is full. Fixes: 6c9059817432 ("bpf: pre-allocate hash map elements") Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index fff3650..570eeca 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -26,11 +26,18 @@ struct bpf_htab { struct bucket *buckets; void *elems; struct pcpu_freelist freelist; + void __percpu *extra_elems; atomic_t count; /* number of elements in this hashtable */ u32 n_buckets; /* number of hash buckets */ u32 elem_size; /* size of each element in bytes */ }; +enum extra_elem_state { + HTAB_NOT_AN_EXTRA_ELEM = 0, + HTAB_EXTRA_ELEM_FREE, + HTAB_EXTRA_ELEM_USED +}; + /* each htab element is struct htab_elem + key + value */ struct htab_elem { union { @@ -38,7 +45,10 @@ struct htab_elem { struct bpf_htab *htab; struct pcpu_freelist_node fnode; }; - struct rcu_head rcu; + union { + struct rcu_head rcu; + enum extra_elem_state state; + }; u32 hash; char key[0] __aligned(8); }; @@ -113,6 +123,23 @@ free_elems: return err; } +static int alloc_extra_elems(struct bpf_htab *htab) +{ + void __percpu *pptr; + int cpu; + + pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN); + if (!pptr) + return -ENOMEM; + + for_each_possible_cpu(cpu) { + ((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state = + HTAB_EXTRA_ELEM_FREE; + } + htab->extra_elems = pptr; + return 0; +} + /* Called from syscall */ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) { @@ -185,6 +212,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) if (percpu) cost += (u64) round_up(htab->map.value_size, 8) * num_possible_cpus() * htab->map.max_entries; + else + cost += (u64) htab->elem_size * num_possible_cpus(); if (cost >= U32_MAX - PAGE_SIZE) /* make sure page count doesn't overflow */ @@ -212,14 +241,22 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) raw_spin_lock_init(&htab->buckets[i].lock); } + if (!percpu) { + err = alloc_extra_elems(htab); + if (err) + goto free_buckets; + } + if (!(attr->map_flags & BPF_F_NO_PREALLOC)) { err = prealloc_elems_and_freelist(htab); if (err) - goto free_buckets; + goto free_extra_elems; } return &htab->map; +free_extra_elems: + free_percpu(htab->extra_elems); free_buckets: kvfree(htab->buckets); free_htab: @@ -349,7 +386,6 @@ static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l) if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) free_percpu(htab_elem_get_ptr(l, htab->map.key_size)); kfree(l); - } static void htab_elem_free_rcu(struct rcu_head *head) @@ -370,6 +406,11 @@ static void htab_elem_free_rcu(struct rcu_head *head) static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) { + if (l->state == HTAB_EXTRA_ELEM_USED) { + l->state = HTAB_EXTRA_ELEM_FREE; + return; + } + if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) { pcpu_freelist_push(&htab->freelist, &l->fnode); } else { @@ -381,25 +422,44 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, void *value, u32 key_size, u32 hash, - bool percpu, bool onallcpus) + bool percpu, bool onallcpus, + bool old_elem_exists) { u32 size = htab->map.value_size; bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC); struct htab_elem *l_new; void __percpu *pptr; + int err = 0; if (prealloc) { l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist); if (!l_new) - return ERR_PTR(-E2BIG); + err = -E2BIG; } else { if (atomic_inc_return(&htab->count) > htab->map.max_entries) { atomic_dec(&htab->count); - return ERR_PTR(-E2BIG); + err = -E2BIG; + } else { + l_new = kmalloc(htab->elem_size, + GFP_ATOMIC | __GFP_NOWARN); + if (!l_new) + return ERR_PTR(-ENOMEM); } - l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN); - if (!l_new) - return ERR_PTR(-ENOMEM); + } + + if (err) { + if (!old_elem_exists) + return ERR_PTR(err); + + /* if we're updating the existing element and the hash table + * is full, use per-cpu extra elems + */ + l_new = this_cpu_ptr(htab->extra_elems); + if (l_new->state != HTAB_EXTRA_ELEM_FREE) + return ERR_PTR(-E2BIG); + l_new->state = HTAB_EXTRA_ELEM_USED; + } else { + l_new->state = HTAB_NOT_AN_EXTRA_ELEM; } memcpy(l_new->key, key, key_size); @@ -489,7 +549,8 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, if (ret) goto err; - l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false); + l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, + !!l_old); if (IS_ERR(l_new)) { /* all pre-allocated elements are in use or memory exhausted */ ret = PTR_ERR(l_new); @@ -563,7 +624,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, } } else { l_new = alloc_htab_elem(htab, key, value, key_size, - hash, true, onallcpus); + hash, true, onallcpus, false); if (IS_ERR(l_new)) { ret = PTR_ERR(l_new); goto err; @@ -652,6 +713,7 @@ static void htab_map_free(struct bpf_map *map) htab_free_elems(htab); pcpu_freelist_destroy(&htab->freelist); } + free_percpu(htab->extra_elems); kvfree(htab->buckets); kfree(htab); } -- cgit v0.10.2 From ba0cc3c153590e3d31985b8f8914d205a20b0d7a Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Fri, 5 Aug 2016 14:01:28 -0700 Subject: samples/bpf: add bpf_map_update_elem() tests increase test coverage to check previously missing 'update when full' Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller diff --git a/samples/bpf/test_maps.c b/samples/bpf/test_maps.c index 47bf085..cce2b59 100644 --- a/samples/bpf/test_maps.c +++ b/samples/bpf/test_maps.c @@ -68,7 +68,16 @@ static void test_hashmap_sanity(int i, void *data) assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 && errno == E2BIG); + /* update existing element, thought the map is full */ + key = 1; + assert(bpf_update_elem(map_fd, &key, &value, BPF_EXIST) == 0); + key = 2; + assert(bpf_update_elem(map_fd, &key, &value, BPF_ANY) == 0); + key = 1; + assert(bpf_update_elem(map_fd, &key, &value, BPF_ANY) == 0); + /* check that key = 0 doesn't exist */ + key = 0; assert(bpf_delete_elem(map_fd, &key) == -1 && errno == ENOENT); /* iterate over two elements */ @@ -413,10 +422,12 @@ static void do_work(int fn, void *data) for (i = fn; i < MAP_SIZE; i += TASKS) { key = value = i; - if (do_update) + if (do_update) { assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == 0); - else + assert(bpf_update_elem(map_fd, &key, &value, BPF_EXIST) == 0); + } else { assert(bpf_delete_elem(map_fd, &key) == 0); + } } } -- cgit v0.10.2 From c518189567eaf42b2ec50a4d982484c8e38799f8 Mon Sep 17 00:00:00 2001 From: Harini Katakam Date: Fri, 5 Aug 2016 10:31:58 +0530 Subject: net: macb: Correct CAPS mask USRIO and JUMBO CAPS have the same mask. Fix the same. Fixes: ce721a702197 ("net: ethernet: cadence-macb: Add disabled usrio caps") Cc: stable@vger.kernel.org # v4.5+ Signed-off-by: Harini Katakam Acked-by: Nicolas Ferre Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 36893d8..b6fcf10 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -403,11 +403,11 @@ #define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004 #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 #define MACB_CAPS_USRIO_DISABLED 0x00000010 +#define MACB_CAPS_JUMBO 0x00000020 #define MACB_CAPS_FIFO_MODE 0x10000000 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 #define MACB_CAPS_SG_DISABLED 0x40000000 #define MACB_CAPS_MACB_IS_GEM 0x80000000 -#define MACB_CAPS_JUMBO 0x00000010 /* Bit manipulation macros */ #define MACB_BIT(name) \ -- cgit v0.10.2 From 0b98027122d0715aa2dc39882e4c295112275785 Mon Sep 17 00:00:00 2001 From: Greg Ungerer Date: Fri, 29 Jul 2016 14:26:53 +1000 Subject: m68knommu: fix user a5 register being overwritten On no-MMU systems the application a5 register can be overwitten with the address of the process data segment when processing application signals. For flat format applications compiled with full absolute relocation this effectively corrupts the a5 register on signal processing - and this very quickly leads to process crash and often takes out the whole system with a panic as well. This has no effect on flat format applications compiled with the more common PIC methods (such as -msep-data). These format applications reserve a5 for the pointer to the data segment anyway - so it doesn't change it. A long time ago the a5 register was used in the code packed into the user stack to enable signal return processing. And so it had to be restored on end of signal cleanup processing back to the original a5 user value. This was historically done by saving away a5 in the sigcontext structure. At some point (a long time back it seems) the a5 restore process was changed and it was hard coded to put the user data segment address directly into a5. Which is ok for the common PIC compiled application case, but breaks the full relocation application code. We no longer use this type of signal handling mechanism and so we don't need to do anything special to save and restore a5 at all now. So remove the code that hard codes a5 to the address of the user data segment. Signed-off-by: Greg Ungerer diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c index 2dcee3a..9202f82 100644 --- a/arch/m68k/kernel/signal.c +++ b/arch/m68k/kernel/signal.c @@ -213,7 +213,6 @@ static inline int frame_extra_sizes(int f) static inline void adjustformat(struct pt_regs *regs) { - ((struct switch_stack *)regs - 1)->a5 = current->mm->start_data; /* * set format byte to make stack appear modulo 4, which it will * be when doing the rte -- cgit v0.10.2 From 176b1ec21306f0775e6d7a03f42d82a0b144ef0e Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Thu, 23 Jun 2016 15:02:46 -0700 Subject: thermal: intel_pch_thermal: Add suspend/resume callback Added suspend/resume callback to disable/enable PCH thermal sensor respectively. If the sensor is enabled by the BIOS, then the sensor status will not be changed during suspend/resume. Signed-off-by: Srinivas Pandruvada Signed-off-by: Zhang Rui diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel_pch_thermal.c index 6a6ec1c..9b4815e 100644 --- a/drivers/thermal/intel_pch_thermal.c +++ b/drivers/thermal/intel_pch_thermal.c @@ -21,6 +21,7 @@ #include #include #include +#include /* Intel PCH thermal Device IDs */ #define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */ @@ -65,6 +66,7 @@ struct pch_thermal_device { unsigned long crt_temp; int hot_trip_id; unsigned long hot_temp; + bool bios_enabled; }; static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips) @@ -75,8 +77,10 @@ static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips) *nr_trips = 0; /* Check if BIOS has already enabled thermal sensor */ - if (WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS)) + if (WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS)) { + ptd->bios_enabled = true; goto read_trips; + } tsel = readb(ptd->hw_base + WPT_TSEL); /* @@ -130,9 +134,39 @@ static int pch_wpt_get_temp(struct pch_thermal_device *ptd, int *temp) return 0; } +static int pch_wpt_suspend(struct pch_thermal_device *ptd) +{ + u8 tsel; + + if (ptd->bios_enabled) + return 0; + + tsel = readb(ptd->hw_base + WPT_TSEL); + + writeb(tsel & 0xFE, ptd->hw_base + WPT_TSEL); + + return 0; +} + +static int pch_wpt_resume(struct pch_thermal_device *ptd) +{ + u8 tsel; + + if (ptd->bios_enabled) + return 0; + + tsel = readb(ptd->hw_base + WPT_TSEL); + + writeb(tsel | WPT_TSEL_ETS, ptd->hw_base + WPT_TSEL); + + return 0; +} + struct pch_dev_ops { int (*hw_init)(struct pch_thermal_device *ptd, int *nr_trips); int (*get_temp)(struct pch_thermal_device *ptd, int *temp); + int (*suspend)(struct pch_thermal_device *ptd); + int (*resume)(struct pch_thermal_device *ptd); }; @@ -140,6 +174,8 @@ struct pch_dev_ops { static const struct pch_dev_ops pch_dev_ops_wpt = { .hw_init = pch_wpt_init, .get_temp = pch_wpt_get_temp, + .suspend = pch_wpt_suspend, + .resume = pch_wpt_resume, }; static int pch_thermal_get_temp(struct thermal_zone_device *tzd, int *temp) @@ -269,6 +305,22 @@ static void intel_pch_thermal_remove(struct pci_dev *pdev) pci_disable_device(pdev); } +static int intel_pch_thermal_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct pch_thermal_device *ptd = pci_get_drvdata(pdev); + + return ptd->ops->suspend(ptd); +} + +static int intel_pch_thermal_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct pch_thermal_device *ptd = pci_get_drvdata(pdev); + + return ptd->ops->resume(ptd); +} + static struct pci_device_id intel_pch_thermal_id[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL) }, @@ -276,11 +328,17 @@ static struct pci_device_id intel_pch_thermal_id[] = { }; MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id); +static const struct dev_pm_ops intel_pch_pm_ops = { + .suspend = intel_pch_thermal_suspend, + .resume = intel_pch_thermal_resume, +}; + static struct pci_driver intel_pch_thermal_driver = { .name = "intel_pch_thermal", .id_table = intel_pch_thermal_id, .probe = intel_pch_thermal_probe, .remove = intel_pch_thermal_remove, + .driver.pm = &intel_pch_pm_ops, }; module_pci_driver(intel_pch_thermal_driver); -- cgit v0.10.2 From 70c50ee72eebb3d1e3aae7450269fd8d8074c610 Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Fri, 5 Aug 2016 15:20:41 +0200 Subject: thermal/powerclamp: Prevent division by zero when counting interval I have got a zero division error when disabling the forced idle injection from the intel powerclamp. I did echo 0 >/sys/class/thermal/cooling_device48/cur_state and got [ 986.072632] divide error: 0000 [#1] PREEMPT SMP [ 986.078989] Modules linked in: [ 986.083618] CPU: 17 PID: 24967 Comm: kidle_inject/17 Not tainted 4.7.0-1-default+ #3055 [ 986.093781] Hardware name: Intel Corporation S2600CP/S2600CP, BIOS RMLSDP.86I.R3.27.D685.1305151734 05/15/2013 [ 986.106227] task: ffff880430e1c080 task.stack: ffff880427ef0000 [ 986.114122] RIP: 0010:[] [] clamp_thread+0x1d9/0x600 [ 986.124609] RSP: 0018:ffff880427ef3e20 EFLAGS: 00010246 [ 986.131860] RAX: 0000000000000258 RBX: 0000000000000006 RCX: 0000000000000001 [ 986.141179] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000018 [ 986.150478] RBP: ffff880427ef3ec8 R08: ffff880427ef0000 R09: 0000000000000002 [ 986.159779] R10: 0000000000003df2 R11: 0000000000000018 R12: 0000000000000002 [ 986.169089] R13: 0000000000000000 R14: ffff880427ef0000 R15: ffff880427ef0000 [ 986.178388] FS: 0000000000000000(0000) GS:ffff880435940000(0000) knlGS:0000000000000000 [ 986.188785] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 986.196559] CR2: 00007f1d0caf0000 CR3: 0000000002006000 CR4: 00000000001406e0 [ 986.205909] Stack: [ 986.209524] ffff8802be897b00 ffff880430e1c080 0000000000000011 0000006a35959780 [ 986.219236] 0000000000000011 ffff880427ef0008 0000000000000000 ffff8804359503d0 [ 986.228966] 0000000100029d93 ffffffff81794140 0000000000000000 ffffffff05000011 [ 986.238686] Call Trace: [ 986.242825] [] ? pkg_state_counter+0x80/0x80 [ 986.250866] [] ? powerclamp_set_cur_state+0x180/0x180 [ 986.259797] [] kthread+0xc9/0xe0 [ 986.266682] [] ret_from_fork+0x1f/0x40 [ 986.274142] [] ? kthread_create_on_node+0x180/0x180 [ 986.282869] Code: d1 ea 48 89 d6 80 3d 6a d0 d4 00 00 ba 64 00 00 00 89 d8 41 0f 45 f5 0f af c2 42 8d 14 2e be 31 00 00 00 83 fa 31 0f 42 f2 31 d2 f6 48 8b 15 9e 07 87 00 48 8b 3d 97 07 87 00 48 63 f0 83 e8 [ 986.307806] RIP [] clamp_thread+0x1d9/0x600 [ 986.315871] RSP RIP points to the following lines: compensation = get_compensation(target_ratio); interval = duration_jiffies*100/(target_ratio+compensation); A solution would be to switch the following two commands in powerclamp_set_cur_state(): set_target_ratio = 0; end_power_clamp(); But I think that the zero division might happen also when target_ratio is non-zero because the compensation might be negative. Therefore we also check the sum of target_ratio and compensation explicitly. Also the compensated_ratio variable is always set. Therefore there is no need to initialize it. Signed-off-by: Petr Mladek Acked-by: Jacob Pan Signed-off-by: Zhang Rui diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c index 015ce2e..0e4dc0a 100644 --- a/drivers/thermal/intel_powerclamp.c +++ b/drivers/thermal/intel_powerclamp.c @@ -388,7 +388,7 @@ static int clamp_thread(void *arg) int sleeptime; unsigned long target_jiffies; unsigned int guard; - unsigned int compensation = 0; + unsigned int compensated_ratio; int interval; /* jiffies to sleep for each attempt */ unsigned int duration_jiffies = msecs_to_jiffies(duration); unsigned int window_size_now; @@ -409,8 +409,11 @@ static int clamp_thread(void *arg) * c-states, thus we need to compensate the injected idle ratio * to achieve the actual target reported by the HW. */ - compensation = get_compensation(target_ratio); - interval = duration_jiffies*100/(target_ratio+compensation); + compensated_ratio = target_ratio + + get_compensation(target_ratio); + if (compensated_ratio <= 0) + compensated_ratio = 1; + interval = duration_jiffies * 100 / compensated_ratio; /* align idle time */ target_jiffies = roundup(jiffies, interval); @@ -647,8 +650,8 @@ static int powerclamp_set_cur_state(struct thermal_cooling_device *cdev, goto exit_set; } else if (set_target_ratio > 0 && new_target_ratio == 0) { pr_info("Stop forced idle injection\n"); - set_target_ratio = 0; end_power_clamp(); + set_target_ratio = 0; } else /* adjust currently running */ { set_target_ratio = new_target_ratio; /* make new set_target_ratio visible to other cpus */ -- cgit v0.10.2 From d0b7306d203c82e7c04d6eb066ca4898f016ebdd Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Thu, 2 Jun 2016 15:25:31 +0100 Subject: thermal: fix race condition when updating cooling device When multiple thermal zones are bound to the same cooling device, multiple kernel threads may want to update the cooling device state by calling thermal_cdev_update(). Having cdev not protected by a mutex can lead to a race condition. Consider the following situation with two kernel threads k1 and k2: Thread k1 Thread k2 || || call thermal_cdev_update() || ... || set_cur_state(cdev, target); call power_actor_set_power() || ... || instance->target = state; || cdev->updated = false; || || cdev->updated = true; || // completes execution call thermal_cdev_update() || // cdev->updated == true || return; || \/ time k2 has already looped through the thermal instances looking for the deepest cooling device state and is preempted right before setting cdev->updated to true. Now, k1 runs, modifies the thermal instance state and sets cdev->updated to false. Then, k1 is preempted and k2 continues the execution by setting cdev->updated to true, therefore preventing k1 from performing the update. Notice that this is not an issue if k2 looks at the instance->target modified by k1 "after" it is assigned by k1. In fact, in this case the update will happen anyway and k1 can safely return immediately from thermal_cdev_update(). This may lead to a situation where a thermal governor never updates the cooling device. For example, this is the case for the step_wise governor: when calling the function thermal_zone_trip_update(), the governor may always get a new state equal to the old one (which, however, wasn't notified to the cooling device) and will therefore skip the update. CC: Zhang Rui CC: Eduardo Valentin CC: Peter Feuerer Reported-by: Toby Huang Signed-off-by: Michele Di Giorgio Reviewed-by: Javi Merino Signed-off-by: Zhang Rui diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c index 34fe365..68bd1b5 100644 --- a/drivers/thermal/fair_share.c +++ b/drivers/thermal/fair_share.c @@ -116,7 +116,9 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip) instance->target = get_target_state(tz, cdev, percentage, cur_trip_level); + mutex_lock(&instance->cdev->lock); instance->cdev->updated = false; + mutex_unlock(&instance->cdev->lock); thermal_cdev_update(cdev); } return 0; diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c index fc52016..bb118a1 100644 --- a/drivers/thermal/gov_bang_bang.c +++ b/drivers/thermal/gov_bang_bang.c @@ -71,7 +71,9 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) dev_dbg(&instance->cdev->device, "target=%d\n", (int)instance->target); + mutex_lock(&instance->cdev->lock); instance->cdev->updated = false; /* cdev needs update */ + mutex_unlock(&instance->cdev->lock); } mutex_unlock(&tz->lock); diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c index 2f1a863..b4d3116 100644 --- a/drivers/thermal/power_allocator.c +++ b/drivers/thermal/power_allocator.c @@ -529,7 +529,9 @@ static void allow_maximum_power(struct thermal_zone_device *tz) continue; instance->target = 0; + mutex_lock(&instance->cdev->lock); instance->cdev->updated = false; + mutex_unlock(&instance->cdev->lock); thermal_cdev_update(instance->cdev); } } diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c index ea9366a..bcef2e7 100644 --- a/drivers/thermal/step_wise.c +++ b/drivers/thermal/step_wise.c @@ -175,7 +175,9 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) update_passive_instance(tz, trip_type, -1); instance->initialized = true; + mutex_lock(&instance->cdev->lock); instance->cdev->updated = false; /* cdev needs update */ + mutex_unlock(&instance->cdev->lock); } mutex_unlock(&tz->lock); diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 5133cd1..e2fc616 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -1093,7 +1093,9 @@ int power_actor_set_power(struct thermal_cooling_device *cdev, return ret; instance->target = state; + mutex_lock(&cdev->lock); cdev->updated = false; + mutex_unlock(&cdev->lock); thermal_cdev_update(cdev); return 0; @@ -1623,11 +1625,13 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev) struct thermal_instance *instance; unsigned long target = 0; + mutex_lock(&cdev->lock); /* cooling device is updated*/ - if (cdev->updated) + if (cdev->updated) { + mutex_unlock(&cdev->lock); return; + } - mutex_lock(&cdev->lock); /* Make sure cdev enters the deepest cooling state */ list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) { dev_dbg(&cdev->device, "zone%d->target=%lu\n", @@ -1637,9 +1641,9 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev) if (instance->target > target) target = instance->target; } - mutex_unlock(&cdev->lock); cdev->ops->set_cur_state(cdev, target); cdev->updated = true; + mutex_unlock(&cdev->lock); trace_cdev_update(cdev, target); dev_dbg(&cdev->device, "set to state %lu\n", target); } -- cgit v0.10.2 From f4c592439b144ef1de66eac764140643a54099eb Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Mon, 4 Jul 2016 07:19:32 +0000 Subject: thermal: hwmon: EXPORT_SYMBOL_GPL for thermal hwmon sysfs thermal_add_hwmon_sysfs()/thermal_remove_hwmon_sysfs() need EXPORT_SYMBOL_GPL(). Otherwise we will have ERROR >> ERROR: "thermal_remove_hwmon_sysfs" [drivers/thermal/rcar_thermal.ko] undefined! >> ERROR: "thermal_add_hwmon_sysfs" [drivers/thermal/rcar_thermal.ko] undefined! Signed-off-by: Kuninori Morimoto Signed-off-by: Zhang Rui diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c index 06fd2ed9..c41c774 100644 --- a/drivers/thermal/thermal_hwmon.c +++ b/drivers/thermal/thermal_hwmon.c @@ -232,6 +232,7 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) return result; } +EXPORT_SYMBOL_GPL(thermal_add_hwmon_sysfs); void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) { @@ -270,3 +271,4 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) hwmon_device_unregister(hwmon->device); kfree(hwmon); } +EXPORT_SYMBOL_GPL(thermal_remove_hwmon_sysfs); -- cgit v0.10.2 From 165989a5b667b90589f21c8affe496ad21f08591 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 25 Jul 2016 07:01:19 +0000 Subject: thermal: clock_cooling: Fix missing mutex_init() The driver allocates the mutex but not initialize it. Use mutex_init() on it to initialize it correctly. This is detected by Coccinelle semantic patch. Signed-off-by: Wei Yongjun Signed-off-by: Zhang Rui diff --git a/drivers/thermal/clock_cooling.c b/drivers/thermal/clock_cooling.c index 1b4ff0f..ed5dd0e 100644 --- a/drivers/thermal/clock_cooling.c +++ b/drivers/thermal/clock_cooling.c @@ -426,6 +426,7 @@ clock_cooling_register(struct device *dev, const char *clock_name) if (!ccdev) return ERR_PTR(-ENOMEM); + mutex_init(&ccdev->lock); ccdev->dev = dev; ccdev->clk = devm_clk_get(dev, clock_name); if (IS_ERR(ccdev->clk)) -- cgit v0.10.2 From c5b48fa7e298b9a8968a1c1fc0ef013069ca2dd2 Mon Sep 17 00:00:00 2001 From: Lukasz Odzioba Date: Sat, 23 Jul 2016 01:44:49 +0200 Subject: EDAC, sb_edac: Fix channel reporting on Knights Landing On Intel Xeon Phi Knights Landing processor family the channels of the memory controller have untypical arrangement - MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2. This causes the EDAC driver to report the channel name incorrectly. We missed this change earlier, so the code already contains similar comment, but the translation function is incorrect. Without this patch: errors in DIMM_A and DIMM_D were reported in DIMM_D errors in DIMM_B and DIMM_E were reported in DIMM_E errors in DIMM_C and DIMM_F were reported in DIMM_F Correct this. Hubert Chrzaniuk: - rebased to 4.8 - comments and code cleanup Fixes: d0cdf9003140 ("sb_edac: Add Knights Landing (Xeon Phi gen 2) support") Reviewed-by: Tony Luck Cc: Mauro Carvalho Chehab Cc: Hubert Chrzaniuk Cc: linux-edac Cc: lukasz.anaczkowski@intel.com Cc: lukasz.odzioba@intel.com Cc: mchehab@kernel.org Cc: # v4.5.. Link: http://lkml.kernel.org/r/1469231089-22837-1-git-send-email-lukasz.odzioba@intel.com Signed-off-by: Lukasz Odzioba [ Boris: Simplify a bit by removing char mc. ] Signed-off-by: Borislav Petkov diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 4fb2eb7..ce0067b 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -552,9 +552,9 @@ static const struct pci_id_table pci_dev_descr_haswell_table[] = { /* Knight's Landing Support */ /* * KNL's memory channels are swizzled between memory controllers. - * MC0 is mapped to CH3,5,6 and MC1 is mapped to CH0,1,2 + * MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2 */ -#define knl_channel_remap(channel) ((channel + 3) % 6) +#define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3) /* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */ #define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840 @@ -1286,7 +1286,7 @@ static u32 knl_get_mc_route(int entry, u32 reg) mc = GET_BITFIELD(reg, entry*3, (entry*3)+2); chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1); - return knl_channel_remap(mc*3 + chan); + return knl_channel_remap(mc, chan); } /* @@ -2997,8 +2997,15 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, } else { char A = *("A"); - channel = knl_channel_remap(channel); + /* + * Reported channel is in range 0-2, so we can't map it + * back to mc. To figure out mc we check machine check + * bank register that reported this error. + * bank15 means mc0 and bank16 means mc1. + */ + channel = knl_channel_remap(m->bank == 16, channel); channel_mask = 1 << channel; + snprintf(msg, sizeof(msg), "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)", overflow ? " OVERFLOW" : "", -- cgit v0.10.2 From 29986cc8a66fcc4b8ea8a01216a72f06b865360b Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 3 Aug 2016 23:31:36 +0300 Subject: drm: rcar-du: Link HDMI encoder with bridge The conversion of the rcar-du driver from the I2C slave encoder to the DRM bridge API left the HDMI encoder's bridge pointer NULL, preventing the bridge from being handled automatically by the DRM core. Fix it. Fixes: 1d926114d8f4 ("drm: rcar-du: Remove i2c slave encoder interface for hdmi encoder") Signed-off-by: Laurent Pinchart Signed-off-by: Dave Airlie diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c index 4de3ff0..e03004f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c @@ -125,6 +125,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu, /* Link drm_bridge to encoder */ bridge->encoder = encoder; + encoder->bridge = bridge; ret = drm_bridge_attach(rcdu->ddev, bridge); if (ret) { -- cgit v0.10.2 From 5c6c201ccbaf9d3901f829441d457293f7ca8ef4 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 4 Aug 2016 18:35:48 +0200 Subject: drm: Paper over locking inversion after registration rework drm_connector_register_all requires a few too many locks because our connector_list locking is busted. Add another FIXME+hack to work around this. This should address the below lockdep splat: ====================================================== [ INFO: possible circular locking dependency detected ] 4.7.0-rc5+ #524 Tainted: G O ------------------------------------------------------- kworker/u8:0/6 is trying to acquire lock: (&dev->mode_config.mutex){+.+.+.}, at: [] drm_modeset_lock_all+0x40/0x120 but task is already holding lock: ((fb_notifier_list).rwsem){++++.+}, at: [] __blocking_notifier_call_chain+0x35/0x70 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 ((fb_notifier_list).rwsem){++++.+}: [] lock_acquire+0xb1/0x200 [] down_write+0x44/0x80 [] blocking_notifier_chain_register+0x21/0xb0 [] fb_register_client+0x18/0x20 [] backlight_device_register+0x136/0x260 [] intel_backlight_device_register+0xa2/0x160 [i915] [] intel_connector_register+0xe/0x10 [i915] [] intel_dp_connector_register+0x1b/0x80 [i915] [] drm_connector_register+0x4a/0x80 [] drm_connector_register_all+0x64/0xf0 [] drm_modeset_register_all+0x174/0x1c0 [] drm_dev_register+0xc2/0xd0 [] i915_driver_load+0x1547/0x2200 [i915] [] i915_pci_probe+0x4f/0x70 [i915] [] local_pci_probe+0x45/0xa0 [] pci_device_probe+0xdb/0x130 [] driver_probe_device+0x223/0x440 [] __driver_attach+0xd5/0x100 [] bus_for_each_dev+0x66/0xa0 [] driver_attach+0x1e/0x20 [] bus_add_driver+0x1ee/0x280 [] driver_register+0x60/0xe0 [] __pci_register_driver+0x60/0x70 [] i915_init+0x5b/0x62 [i915] [] do_one_initcall+0x3d/0x150 [] do_init_module+0x5f/0x1d9 [] load_module+0x20e6/0x27e0 [] SYSC_finit_module+0xc3/0xf0 [] SyS_finit_module+0xe/0x10 [] entry_SYSCALL_64_fastpath+0x1c/0xac -> #0 (&dev->mode_config.mutex){+.+.+.}: [] __lock_acquire+0x10fc/0x1260 [] lock_acquire+0xb1/0x200 [] mutex_lock_nested+0x67/0x3c0 [] drm_modeset_lock_all+0x40/0x120 [] drm_fb_helper_restore_fbdev_mode_unlocked+0x2b/0x80 [] drm_fb_helper_set_par+0x2d/0x50 [] intel_fbdev_set_par+0x1a/0x60 [i915] [] fbcon_init+0x586/0x610 [] visual_init+0xca/0x130 [] do_bind_con_driver+0x1c1/0x3a0 [] do_take_over_console+0x116/0x180 [] do_fbcon_takeover+0x57/0xb0 [] fbcon_event_notify+0x658/0x750 [] notifier_call_chain+0x3e/0xb0 [] __blocking_notifier_call_chain+0x4d/0x70 [] blocking_notifier_call_chain+0x16/0x20 [] fb_notifier_call_chain+0x1b/0x20 [] register_framebuffer+0x251/0x330 [] drm_fb_helper_initial_config+0x25f/0x3f0 [] intel_fbdev_initial_config+0x18/0x30 [i915] [] async_run_entry_fn+0x48/0x150 [] process_one_work+0x1e7/0x750 [] worker_thread+0x4b/0x4f0 [] kthread+0xef/0x110 [] ret_from_fork+0x1f/0x40 other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock((fb_notifier_list).rwsem); lock(&dev->mode_config.mutex); lock((fb_notifier_list).rwsem); lock(&dev->mode_config.mutex); *** DEADLOCK *** 6 locks held by kworker/u8:0/6: #0: ("events_unbound"){.+.+.+}, at: [] process_one_work+0x169/0x750 #1: ((&entry->work)){+.+.+.}, at: [] process_one_work+0x169/0x750 #2: (registration_lock){+.+.+.}, at: [] register_framebuffer+0x27/0x330 #3: (console_lock){+.+.+.}, at: [] register_framebuffer+0x26e/0x330 #4: (&fb_info->lock){+.+.+.}, at: [] lock_fb_info+0x1d/0x40 #5: ((fb_notifier_list).rwsem){++++.+}, at: [] __blocking_notifier_call_chain+0x35/0x70 stack backtrace: CPU: 2 PID: 6 Comm: kworker/u8:0 Tainted: G O 4.7.0-rc5+ #524 Hardware name: Intel Corp. Broxton P/NOTEBOOK, BIOS APLKRVPA.X64.0138.B33.1606250842 06/25/2016 Workqueue: events_unbound async_run_entry_fn 0000000000000000 ffff8800758577f0 ffffffff814507a5 ffffffff828b9900 ffffffff828b9900 ffff880075857830 ffffffff810dc6fa ffff880075857880 ffff88007584d688 0000000000000005 0000000000000006 ffff88007584d6b0 Call Trace: [] dump_stack+0x67/0x92 [] print_circular_bug+0x1aa/0x200 [] __lock_acquire+0x10fc/0x1260 [] lock_acquire+0xb1/0x200 [] ? drm_modeset_lock_all+0x40/0x120 [] ? drm_modeset_lock_all+0x40/0x120 [] mutex_lock_nested+0x67/0x3c0 [] ? drm_modeset_lock_all+0x40/0x120 [] ? rcu_read_lock_sched_held+0x7f/0x90 [] ? kmem_cache_alloc_trace+0x248/0x2b0 [] ? drm_modeset_lock_all+0x25/0x120 [] drm_modeset_lock_all+0x40/0x120 [] drm_fb_helper_restore_fbdev_mode_unlocked+0x2b/0x80 [] drm_fb_helper_set_par+0x2d/0x50 [] intel_fbdev_set_par+0x1a/0x60 [i915] [] fbcon_init+0x586/0x610 [] visual_init+0xca/0x130 [] do_bind_con_driver+0x1c1/0x3a0 [] do_take_over_console+0x116/0x180 [] do_fbcon_takeover+0x57/0xb0 [] fbcon_event_notify+0x658/0x750 [] notifier_call_chain+0x3e/0xb0 [] __blocking_notifier_call_chain+0x4d/0x70 [] blocking_notifier_call_chain+0x16/0x20 [] fb_notifier_call_chain+0x1b/0x20 [] register_framebuffer+0x251/0x330 [] ? vga_switcheroo_client_fb_set+0x5d/0x70 [] drm_fb_helper_initial_config+0x25f/0x3f0 [] intel_fbdev_initial_config+0x18/0x30 [i915] [] async_run_entry_fn+0x48/0x150 [] process_one_work+0x1e7/0x750 [] ? process_one_work+0x169/0x750 [] worker_thread+0x4b/0x4f0 [] ? process_one_work+0x750/0x750 [] kthread+0xef/0x110 [] ret_from_fork+0x1f/0x40 [] ? kthread_stop+0x2e0/0x2e0 v2: Rebase onto the right branch (hand-editing patches ftw) and add more reporters. Reported-by: Imre Deak Cc: Imre Deak Cc: Chris Wilson Acked-by: Chris Wilson Reported-by: Jiri Kosina Cc: Jiri Kosina Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index f1d9f05..b1dbb60 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1121,16 +1121,14 @@ static int drm_connector_register_all(struct drm_device *dev) struct drm_connector *connector; int ret; - mutex_lock(&dev->mode_config.mutex); - - drm_for_each_connector(connector, dev) { + /* FIXME: taking the mode config mutex ends up in a clash with + * fbcon/backlight registration */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ret = drm_connector_register(connector); if (ret) goto err; } - mutex_unlock(&dev->mode_config.mutex); - return 0; err: -- cgit v0.10.2 From 416f37d0816b9720b8227953e55954d81456f991 Mon Sep 17 00:00:00 2001 From: Darren Stevens Date: Wed, 27 Jul 2016 15:41:29 +0100 Subject: powerpc/pasemi: Fix coherent_dma_mask for dma engine Commit 817820b0226a ("powerpc/iommu: Support "hybrid" iommu/direct DMA ops for coherent_mask < dma_mask) adds a check of coherent_dma_mask for dma allocations. Unfortunately current PASemi code does not set this value for the DMA engine, which ends up with the default value of 0xffffffff, the result is on a PASemi system with >2Gb ram and iommu enabled the the onboard ethernet stops working due to an inability to allocate memory. Add an initialisation to pci_dma_dev_setup_pasemi(). Signed-off-by: Darren Stevens Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c index 309d9cc..c61667e 100644 --- a/arch/powerpc/platforms/pasemi/iommu.c +++ b/arch/powerpc/platforms/pasemi/iommu.c @@ -187,6 +187,11 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev) if (dev->vendor == 0x1959 && dev->device == 0xa007 && !firmware_has_feature(FW_FEATURE_LPAR)) { dev->dev.archdata.dma_ops = &dma_direct_ops; + /* + * Set the coherent DMA mask to prevent the iommu + * being used unnecessarily + */ + dev->dev.coherent_dma_mask = DMA_BIT_MASK(44); return; } #endif -- cgit v0.10.2 From 707e6835f89419ff1c05e828c2d9939fd95a7ad8 Mon Sep 17 00:00:00 2001 From: Liping Zhang Date: Sat, 23 Jul 2016 22:16:56 +0800 Subject: netfilter: nf_ct_h323: do not re-activate already expired timer Commit 96d1327ac2e3 ("netfilter: h323: Use mod_timer instead of set_expect_timeout") just simplify the source codes if (!del_timer(&exp->timeout)) return 0; add_timer(&exp->timeout); to mod_timer(&exp->timeout, jiffies + info->timeout * HZ); This is not correct, and introduce a race codition: CPU0 CPU1 - timer expire process_rcf expectation_timed_out lock(exp_lock) - find_exp waiting exp_lock... re-activate timer!! waiting exp_lock... unlock(exp_lock) lock(exp_lock) - unlink expect - free(expect) - unlock(exp_lock) So when the timer expires again, we will access the memory that was already freed. Replace mod_timer with mod_timer_pending here to fix this problem. Fixes: 96d1327ac2e3 ("netfilter: h323: Use mod_timer instead of set_expect_timeout") Cc: Gao Feng Signed-off-by: Liping Zhang Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index bb77a97..5c0db5c 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c @@ -1473,7 +1473,8 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct, "timeout to %u seconds for", info->timeout); nf_ct_dump_tuple(&exp->tuple); - mod_timer(&exp->timeout, jiffies + info->timeout * HZ); + mod_timer_pending(&exp->timeout, + jiffies + info->timeout * HZ); } spin_unlock_bh(&nf_conntrack_expect_lock); } -- cgit v0.10.2 From 2c86943c20e375b0fe562af0626f2e5461d8d203 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Sun, 24 Jul 2016 19:53:19 +0200 Subject: netfilter: nf_tables: s/MFT_REG32_01/NFT_REG32_01 MFT_REG32_01 is a typo, rename this to NFT_REG32_01. Signed-off-by: Pablo Neira Ayuso diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 01751fa..c674ba2 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -24,7 +24,7 @@ enum nft_registers { __NFT_REG_MAX, NFT_REG32_00 = 8, - MFT_REG32_01, + NFT_REG32_01, NFT_REG32_02, NFT_REG32_03, NFT_REG32_04, -- cgit v0.10.2 From c1eda3c6394f805886b2afa8c7ea5e04305ec698 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Mon, 1 Aug 2016 13:13:08 +0200 Subject: netfilter: nft_rbtree: ignore inactive matching element with no descendants If we find a matching element that is inactive with no descendants, we jump to the found label, then crash because of nul-dereference on the left branch. Fix this by checking that the element is active and not an interval end and skipping the logic that only applies to the tree iteration. Signed-off-by: Pablo Neira Ayuso Tested-by: Anders K. Pedersen diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c index 6473936..ffe9ae0 100644 --- a/net/netfilter/nft_rbtree.c +++ b/net/netfilter/nft_rbtree.c @@ -70,7 +70,6 @@ static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set, } else if (d > 0) parent = parent->rb_right; else { -found: if (!nft_set_elem_active(&rbe->ext, genmask)) { parent = parent->rb_left; continue; @@ -84,9 +83,12 @@ found: } } - if (set->flags & NFT_SET_INTERVAL && interval != NULL) { - rbe = interval; - goto found; + if (set->flags & NFT_SET_INTERVAL && interval != NULL && + nft_set_elem_active(&interval->ext, genmask) && + !nft_rbtree_interval_end(interval)) { + spin_unlock_bh(&nft_rbtree_lock); + *ext = &interval->ext; + return true; } out: spin_unlock_bh(&nft_rbtree_lock); -- cgit v0.10.2 From cf1b18030de29e4e5b0a57695ae5db4a89da0ff7 Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Sun, 24 Jul 2016 13:53:30 +0200 Subject: USB: serial: option: add D-Link DWM-156/A3 The device has four interfaces; the three serial ports ought to be handled by this driver: 00 Diagnostic interface serial port 01 NMEA device serial port 02 Mass storage (sd card) 03 Modem serial port The other product ids listed in the Windows driver are present already. Signed-off-by: Lubomir Rintel Cc: stable Signed-off-by: Johan Hovold diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 8e07536..0338851 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1966,6 +1966,7 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, -- cgit v0.10.2 From 0d35d0815b19216f852f257afe420f7c7d182780 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 3 Aug 2016 12:41:40 +0200 Subject: netfilter: nf_conntrack_sip: CSeq 0 is a valid CSeq Do not drop packet when CSeq is 0 as 0 is also a valid value for CSeq. simple_strtoul() will return 0 either when all digits are 0 or if there are no digits at all. Therefore when simple_strtoul() returns 0 we check if first character is digit 0 or not. Signed-off-by: Christophe Leroy Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 8d9db9d..7d77217 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -1383,7 +1383,7 @@ static int process_sip_response(struct sk_buff *skb, unsigned int protoff, return NF_DROP; } cseq = simple_strtoul(*dptr + matchoff, NULL, 10); - if (!cseq) { + if (!cseq && *(*dptr + matchoff) != '0') { nf_ct_helper_log(skb, ct, "cannot get cseq"); return NF_DROP; } @@ -1446,7 +1446,7 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff, return NF_DROP; } cseq = simple_strtoul(*dptr + matchoff, NULL, 10); - if (!cseq) { + if (!cseq && *(*dptr + matchoff) != '0') { nf_ct_helper_log(skb, ct, "cannot get cseq"); return NF_DROP; } -- cgit v0.10.2 From 6977495c06f7f47636a076ee5a0ca571279d9697 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Deli=C3=ABn?= Date: Thu, 28 Jul 2016 18:52:55 +0000 Subject: USB: serial: ftdi_sio: add PIDs for Ivium Technologies devices Ivium Technologies uses the FTDI VID with custom PIDs for their line of electrochemical interfaces and the PalmSens they developed for PalmSens BV. Signed-off-by: Robert Delien Cc: stable Signed-off-by: Johan Hovold diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 0082080..8962cdc 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -648,6 +648,8 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_PALMSENS_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_IVIUM_XSTAT_PID) }, { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index c5d6c1e..067e3a6 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -406,6 +406,12 @@ #define FTDI_4N_GALAXY_DE_3_PID 0xF3C2 /* + * Ivium Technologies product IDs + */ +#define FTDI_PALMSENS_PID 0xf440 +#define FTDI_IVIUM_XSTAT_PID 0xf441 + +/* * Linx Technologies product ids */ #define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */ -- cgit v0.10.2 From ae34d12cc1e212ffcd92e069030e54dae69c832f Mon Sep 17 00:00:00 2001 From: "Sheng-Hui J. Chu" Date: Thu, 28 Jul 2016 17:01:45 -0400 Subject: USB: serial: ftdi_sio: add device ID for WICED USB UART dev board BCM20706V2_EVAL is a WICED dev board designed with FT2232H USB 2.0 UART/FIFO IC. To support BCM920706V2_EVAL dev board for WICED development on Linux. Add the VID(0a5c) and PID(6422) to ftdi_sio driver to allow loading ftdi_sio for this board. Signed-off-by: Sheng-Hui J. Chu Cc: stable Signed-off-by: Johan Hovold diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 8962cdc..b2d767e 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1010,6 +1010,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) }, { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, + { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 067e3a6..f87a938 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -679,6 +679,12 @@ #define INTREPID_NEOVI_PID 0x0701 /* + * WICED USB UART + */ +#define WICED_VID 0x0A5C +#define WICED_USB20706V2_PID 0x6422 + +/* * Definitions for ID TECH (www.idt-net.com) devices */ #define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */ -- cgit v0.10.2 From 01d7956b58e644ea0d2e8d9340c5727a8fc39d70 Mon Sep 17 00:00:00 2001 From: Daniele Palmas Date: Tue, 2 Aug 2016 11:29:25 +0200 Subject: USB: serial: option: add support for Telit LE920A4 This patch adds a set of compositions for Telit LE920A4. Compositions in short are: 0x1207: tty + tty 0x1208: tty + adb + tty + tty 0x1211: tty + adb + ecm 0x1212: tty + adb 0x1213: ecm + tty 0x1214: tty + adb + ecm + tty telit_le922_blacklist_usbcfg3 is reused for compositions 0x1211 and 0x1214 due to the same interfaces positions. Signed-off-by: Daniele Palmas Cc: stable Signed-off-by: Johan Hovold diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 0338851..bc47258 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -274,6 +274,12 @@ static void option_instat_callback(struct urb *urb); #define TELIT_PRODUCT_LE920 0x1200 #define TELIT_PRODUCT_LE910 0x1201 #define TELIT_PRODUCT_LE910_USBCFG4 0x1206 +#define TELIT_PRODUCT_LE920A4_1207 0x1207 +#define TELIT_PRODUCT_LE920A4_1208 0x1208 +#define TELIT_PRODUCT_LE920A4_1211 0x1211 +#define TELIT_PRODUCT_LE920A4_1212 0x1212 +#define TELIT_PRODUCT_LE920A4_1213 0x1213 +#define TELIT_PRODUCT_LE920A4_1214 0x1214 /* ZTE PRODUCTS */ #define ZTE_VENDOR_ID 0x19d2 @@ -628,6 +634,11 @@ static const struct option_blacklist_info telit_le920_blacklist = { .reserved = BIT(1) | BIT(5), }; +static const struct option_blacklist_info telit_le920a4_blacklist_1 = { + .sendsetup = BIT(0), + .reserved = BIT(1), +}; + static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = { .sendsetup = BIT(2), .reserved = BIT(0) | BIT(1) | BIT(3), @@ -1203,6 +1214,16 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208), + .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211), + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212), + .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214), + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, -- cgit v0.10.2 From 647024a7df36014bbc4479d92d88e6b77c0afcf6 Mon Sep 17 00:00:00 2001 From: Alexey Klimov Date: Mon, 8 Aug 2016 02:34:46 +0100 Subject: USB: serial: fix memleak in driver-registration error path udriver struct allocated by kzalloc() will not be freed if usb_register() and next calls fail. This patch fixes this by adding one more step with kfree(udriver) in error path. Signed-off-by: Alexey Klimov Acked-by: Alan Stern Cc: stable Signed-off-by: Johan Hovold diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index b1b9bac..d213cf4 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -1433,7 +1433,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[] rc = usb_register(udriver); if (rc) - return rc; + goto failed_usb_register; for (sd = serial_drivers; *sd; ++sd) { (*sd)->usb_driver = udriver; @@ -1451,6 +1451,8 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[] while (sd-- > serial_drivers) usb_serial_deregister(*sd); usb_deregister(udriver); +failed_usb_register: + kfree(udriver); return rc; } EXPORT_SYMBOL_GPL(usb_serial_register_drivers); -- cgit v0.10.2 From bf47dc572aaf53c3f2311c71e03ec01be3131fd6 Mon Sep 17 00:00:00 2001 From: Sascha Silbe Date: Mon, 4 Jul 2016 17:16:47 +0200 Subject: s390: clarify compressed image code path The way the decompressor is hooked into the start-up code is rather subtle, with a mix of multiply-defined symbols and hardcoded address literals. Add some comments at the junction points to clarify how it works. Signed-off-by: Sascha Silbe Signed-off-by: Martin Schwidefsky diff --git a/arch/s390/boot/compressed/head.S b/arch/s390/boot/compressed/head.S index f86a4ee..28c4f96 100644 --- a/arch/s390/boot/compressed/head.S +++ b/arch/s390/boot/compressed/head.S @@ -21,16 +21,21 @@ ENTRY(startup_continue) lg %r15,.Lstack-.LPG1(%r13) aghi %r15,-160 brasl %r14,decompress_kernel - # setup registers for memory mover & branch to target + # Set up registers for memory mover. We move the decompressed image to + # 0x11000, starting at offset 0x11000 in the decompressed image so + # that code living at 0x11000 in the image will end up at 0x11000 in + # memory. lgr %r4,%r2 lg %r2,.Loffset-.LPG1(%r13) la %r4,0(%r2,%r4) lg %r3,.Lmvsize-.LPG1(%r13) lgr %r5,%r3 - # move the memory mover someplace safe + # Move the memory mover someplace safe so it doesn't overwrite itself. la %r1,0x200 mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13) - # decompress image is started at 0x11000 + # When the memory mover is done we pass control to + # arch/s390/kernel/head64.S:startup_continue which lives at 0x11000 in + # the decompressed image. lgr %r6,%r2 br %r1 mover: diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 56e4d82..4431905 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -309,7 +309,9 @@ ENTRY(startup_kdump) l %r15,.Lstack-.LPG0(%r13) ahi %r15,-STACK_FRAME_OVERHEAD brasl %r14,verify_facilities - /* Continue with startup code in head64.S */ +# For uncompressed images, continue in +# arch/s390/kernel/head64.S. For compressed images, continue in +# arch/s390/boot/compressed/head.S. jg startup_continue .Lstack: -- cgit v0.10.2 From 134a24cd895c5d138d639a0741ad27e389cd4562 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Wed, 3 Aug 2016 17:55:00 +0200 Subject: s390/crc32-vx: Fix checksum calculation for small sizes The current prealign logic will fail for sizes < alignment, as the new datalen passed to the vector function is smaller than zero. Being a size_t this gets wrapped to a huge number causing memory overruns and wrong data. Let's add an early exit if the size is smaller than the minimal size with alignment. This will also avoid calling the software fallback twice for all sizes smaller than the minimum size (prealign + remaining) Signed-off-by: Christian Borntraeger Fixes: f848dbd3bc1a ("s390/crc32-vx: add crypto API module for optimized CRC-32 algorithms") Reviewed-by: Hendrik Brueckner Signed-off-by: Martin Schwidefsky diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c index 577ae1d..2bad9d8 100644 --- a/arch/s390/crypto/crc32-vx.c +++ b/arch/s390/crypto/crc32-vx.c @@ -51,6 +51,9 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size); struct kernel_fpu vxstate; \ unsigned long prealign, aligned, remaining; \ \ + if (datalen < VX_MIN_LEN + VX_ALIGN_MASK) \ + return ___crc32_sw(crc, data, datalen); \ + \ if ((unsigned long)data & VX_ALIGN_MASK) { \ prealign = VX_ALIGNMENT - \ ((unsigned long)data & VX_ALIGN_MASK); \ @@ -59,9 +62,6 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size); data = (void *)((unsigned long)data + prealign); \ } \ \ - if (datalen < VX_MIN_LEN) \ - return ___crc32_sw(crc, data, datalen); \ - \ aligned = datalen & ~VX_ALIGN_MASK; \ remaining = datalen & VX_ALIGN_MASK; \ \ -- cgit v0.10.2 From e2efc424549eb66d227dfe6d073d1789a24bc698 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Thu, 4 Aug 2016 10:24:30 +0200 Subject: s390/lib: fix memcmp and strstr if two string compare equal the clcle instruction will update the string addresses to point _after_ the string. This might already be on a different page, so we should not use these pointer to calculate the difference as in that case the calculation of the difference can cause oopses. The return value of memcmp does not need the difference, we can just reuse the condition code and return for CC=1 (All bytes compared, first operand low) -1 and for CC=2 (All bytes compared, first operand high) +1 strstr also does not need the diff. While fixing this, make the common function clcle "correct on its own" by using l1 instead of l2 for the first length. strstr will call this with l2 for both strings. Signed-off-by: Christian Borntraeger Fixes: db7f5eef3dc0 ("s390/lib: use basic blocks for inline assemblies") Reviewed-by: Heiko Carstens Signed-off-by: Martin Schwidefsky diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c index e390bbb..48352bf 100644 --- a/arch/s390/lib/string.c +++ b/arch/s390/lib/string.c @@ -237,11 +237,10 @@ char * strrchr(const char * s, int c) EXPORT_SYMBOL(strrchr); static inline int clcle(const char *s1, unsigned long l1, - const char *s2, unsigned long l2, - int *diff) + const char *s2, unsigned long l2) { register unsigned long r2 asm("2") = (unsigned long) s1; - register unsigned long r3 asm("3") = (unsigned long) l2; + register unsigned long r3 asm("3") = (unsigned long) l1; register unsigned long r4 asm("4") = (unsigned long) s2; register unsigned long r5 asm("5") = (unsigned long) l2; int cc; @@ -252,7 +251,6 @@ static inline int clcle(const char *s1, unsigned long l1, " srl %0,28" : "=&d" (cc), "+a" (r2), "+a" (r3), "+a" (r4), "+a" (r5) : : "cc"); - *diff = *(char *)r2 - *(char *)r4; return cc; } @@ -270,9 +268,9 @@ char * strstr(const char * s1,const char * s2) return (char *) s1; l1 = __strend(s1) - s1; while (l1-- >= l2) { - int cc, dummy; + int cc; - cc = clcle(s1, l1, s2, l2, &dummy); + cc = clcle(s1, l2, s2, l2); if (!cc) return (char *) s1; s1++; @@ -313,11 +311,11 @@ EXPORT_SYMBOL(memchr); */ int memcmp(const void *cs, const void *ct, size_t n) { - int ret, diff; + int ret; - ret = clcle(cs, n, ct, n, &diff); + ret = clcle(cs, n, ct, n); if (ret) - ret = diff; + ret = ret == 1 ? -1 : 1; return ret; } EXPORT_SYMBOL(memcmp); -- cgit v0.10.2 From 6e127efeeae5bbbc323e028d96c43dec0cc7b1b8 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Thu, 4 Aug 2016 12:51:09 +0200 Subject: s390/config: make the vector optimized crc function builtin For all configs with CONFIG_BTRFS_FS = y we should also make the optimized crc module builtin. Otherwise early mounts will fall back to the software variant. Signed-off-by: Christian Borntraeger Signed-off-by: Martin Schwidefsky diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index 889ea34..26e0c7f 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig @@ -678,7 +678,7 @@ CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m -CONFIG_CRYPTO_CRC32_S390=m +CONFIG_CRYPTO_CRC32_S390=y CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m CONFIG_X509_CERTIFICATE_PARSER=m diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index 1bcfd76..24879da 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig @@ -616,7 +616,7 @@ CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m -CONFIG_CRYPTO_CRC32_S390=m +CONFIG_CRYPTO_CRC32_S390=y CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m CONFIG_X509_CERTIFICATE_PARSER=m diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 13ff090..a5c1e5f 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -615,7 +615,7 @@ CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m -CONFIG_CRYPTO_CRC32_S390=m +CONFIG_CRYPTO_CRC32_S390=y CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m CONFIG_X509_CERTIFICATE_PARSER=m diff --git a/arch/s390/defconfig b/arch/s390/defconfig index ccccebe..73610f2 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -234,7 +234,7 @@ CONFIG_CRYPTO_SHA256_S390=m CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m -CONFIG_CRYPTO_CRC32_S390=m +CONFIG_CRYPTO_CRC32_S390=y CONFIG_CRC7=m # CONFIG_XZ_DEC_X86 is not set # CONFIG_XZ_DEC_POWERPC is not set -- cgit v0.10.2 From 7de6a63ca2ff21ea67e50a546ca0e9bb6e2b0718 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Thu, 28 Jul 2016 20:26:37 +0200 Subject: s390/cio: stop using subchannel_id from ccw_device_private We want to get rid of the copy of struct subchannel_id maintained in ccw_device_private, so obtain it from the subchannel directly. Signed-off-by: Sebastian Ott Reviewed-by: Cornelia Huck Signed-off-by: Martin Schwidefsky diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 15b56a1..9bc3512 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c @@ -26,6 +26,7 @@ static void ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) { + struct subchannel *sch = to_subchannel(cdev->dev.parent); char dbf_text[15]; if (!scsw_is_valid_cstat(&irb->scsw) || @@ -36,10 +37,10 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) "received" " ... device %04x on subchannel 0.%x.%04x, dev_stat " ": %02X sch_stat : %02X\n", - cdev->private->dev_id.devno, cdev->private->schid.ssid, - cdev->private->schid.sch_no, + cdev->private->dev_id.devno, sch->schid.ssid, + sch->schid.sch_no, scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw)); - sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no); + sprintf(dbf_text, "chk%x", sch->schid.sch_no); CIO_TRACE_EVENT(0, dbf_text); CIO_HEX_EVENT(0, irb, sizeof(struct irb)); } -- cgit v0.10.2 From 9080c92494f0d7e2bca1197bbddcc417117057c3 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Thu, 28 Jul 2016 20:30:31 +0200 Subject: s390/qdio: obtain subchannel_id via ccw_device_get_schid() We want to get rid of the copy of struct subchannel_id maintained in ccw_device_private, so obtain it using ccw_device_get_schid(). Signed-off-by: Sebastian Ott Reviewed-by: Cornelia Huck Reviewed-by: Ursula Braun Signed-off-by: Martin Schwidefsky diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 4bb5262f..94e9620 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -1066,10 +1066,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct subchannel_id schid; int cstat, dstat; if (!intparm || !irq_ptr) { - DBF_ERROR("qint:%4x", cdev->private->schid.sch_no); + ccw_device_get_schid(cdev, &schid); + DBF_ERROR("qint:%4x", schid.sch_no); return; } @@ -1122,12 +1124,14 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, int qdio_get_ssqd_desc(struct ccw_device *cdev, struct qdio_ssqd_desc *data) { + struct subchannel_id schid; if (!cdev || !cdev->private) return -EINVAL; - DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no); - return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data); + ccw_device_get_schid(cdev, &schid); + DBF_EVENT("get ssqd:%4x", schid.sch_no); + return qdio_setup_get_ssqd(NULL, &schid, data); } EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); @@ -1154,6 +1158,7 @@ static void qdio_shutdown_queues(struct ccw_device *cdev) int qdio_shutdown(struct ccw_device *cdev, int how) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct subchannel_id schid; int rc; unsigned long flags; @@ -1161,7 +1166,8 @@ int qdio_shutdown(struct ccw_device *cdev, int how) return -ENODEV; WARN_ON_ONCE(irqs_disabled()); - DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); + ccw_device_get_schid(cdev, &schid); + DBF_EVENT("qshutdown:%4x", schid.sch_no); mutex_lock(&irq_ptr->setup_mutex); /* @@ -1228,11 +1234,13 @@ EXPORT_SYMBOL_GPL(qdio_shutdown); int qdio_free(struct ccw_device *cdev) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct subchannel_id schid; if (!irq_ptr) return -ENODEV; - DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no); + ccw_device_get_schid(cdev, &schid); + DBF_EVENT("qfree:%4x", schid.sch_no); DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned"); mutex_lock(&irq_ptr->setup_mutex); @@ -1251,9 +1259,11 @@ EXPORT_SYMBOL_GPL(qdio_free); */ int qdio_allocate(struct qdio_initialize *init_data) { + struct subchannel_id schid; struct qdio_irq *irq_ptr; - DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no); + ccw_device_get_schid(init_data->cdev, &schid); + DBF_EVENT("qallocate:%4x", schid.sch_no); if ((init_data->no_input_qs && !init_data->input_handler) || (init_data->no_output_qs && !init_data->output_handler)) @@ -1331,12 +1341,14 @@ static void qdio_detect_hsicq(struct qdio_irq *irq_ptr) */ int qdio_establish(struct qdio_initialize *init_data) { - struct qdio_irq *irq_ptr; struct ccw_device *cdev = init_data->cdev; + struct subchannel_id schid; + struct qdio_irq *irq_ptr; unsigned long saveflags; int rc; - DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no); + ccw_device_get_schid(cdev, &schid); + DBF_EVENT("qestablish:%4x", schid.sch_no); irq_ptr = cdev->private->qdio_data; if (!irq_ptr) @@ -1407,11 +1419,13 @@ EXPORT_SYMBOL_GPL(qdio_establish); */ int qdio_activate(struct ccw_device *cdev) { + struct subchannel_id schid; struct qdio_irq *irq_ptr; int rc; unsigned long saveflags; - DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no); + ccw_device_get_schid(cdev, &schid); + DBF_EVENT("qactivate:%4x", schid.sch_no); irq_ptr = cdev->private->qdio_data; if (!irq_ptr) -- cgit v0.10.2 From 1ab50a99f8cc6f4d761421d55a4044ad7f185cb5 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Fri, 29 Jul 2016 10:43:53 +0200 Subject: s390/cio: remove subchannel_id from ccw_device_private A copy of struct subchannel_id is maintained in ccw_device_private. The subchannel id is a property of the subchannel. The additional copy is not needed. Internal users can obtain it from subchannel.schid - device drivers can use ccw_device_get_schid(). Signed-off-by: Sebastian Ott Reviewed-by: Cornelia Huck Signed-off-by: Martin Schwidefsky diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 7ada078..6a58bc8 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -762,7 +762,6 @@ static int io_subchannel_initialize_dev(struct subchannel *sch, priv->state = DEV_STATE_NOT_OPER; priv->dev_id.devno = sch->schib.pmcw.dev; priv->dev_id.ssid = sch->schid.ssid; - priv->schid = sch->schid; INIT_WORK(&priv->todo_work, ccw_device_todo); INIT_LIST_HEAD(&priv->cmb_list); @@ -1000,7 +999,6 @@ static int ccw_device_move_to_sch(struct ccw_device *cdev, put_device(&old_sch->dev); /* Initialize new subchannel. */ spin_lock_irq(sch->lock); - cdev->private->schid = sch->schid; cdev->ccwlock = sch->lock; if (!sch_is_pseudo_sch(sch)) sch_set_cdev(sch, cdev); diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index 8975060..220f491 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h @@ -120,7 +120,6 @@ struct ccw_device_private { int state; /* device state */ atomic_t onoff; struct ccw_dev_id dev_id; /* device id */ - struct subchannel_id schid; /* subchannel number */ struct ccw_request req; /* internal I/O request */ int iretry; u8 pgid_valid_mask; /* mask of valid PGIDs */ -- cgit v0.10.2 From a48ed867153c6d2f6d058267213a82dbd8b6737a Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Fri, 29 Jul 2016 13:41:20 +0200 Subject: s390/qdio: get rid of spin_lock_irqsave usage All qdio functions that use spin_lock_irqsave are never used from irq context. Thus it is safe to convert all of them to use spin_lock_irq. Signed-off-by: Sebastian Ott Reviewed-by: Ursula Braun Signed-off-by: Martin Schwidefsky diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 94e9620..b200f27 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -1160,7 +1160,6 @@ int qdio_shutdown(struct ccw_device *cdev, int how) struct qdio_irq *irq_ptr = cdev->private->qdio_data; struct subchannel_id schid; int rc; - unsigned long flags; if (!irq_ptr) return -ENODEV; @@ -1190,7 +1189,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how) qdio_shutdown_debug_entries(irq_ptr); /* cleanup subchannel */ - spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + spin_lock_irq(get_ccwdev_lock(cdev)); if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); @@ -1204,12 +1203,12 @@ int qdio_shutdown(struct ccw_device *cdev, int how) } qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + spin_unlock_irq(get_ccwdev_lock(cdev)); wait_event_interruptible_timeout(cdev->private->wait_q, irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || irq_ptr->state == QDIO_IRQ_STATE_ERR, 10 * HZ); - spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + spin_lock_irq(get_ccwdev_lock(cdev)); no_cleanup: qdio_shutdown_thinint(irq_ptr); @@ -1217,7 +1216,7 @@ no_cleanup: /* restore interrupt handler */ if ((void *)cdev->handler == (void *)qdio_int_handler) cdev->handler = irq_ptr->orig_handler; - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + spin_unlock_irq(get_ccwdev_lock(cdev)); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); mutex_unlock(&irq_ptr->setup_mutex); @@ -1344,7 +1343,6 @@ int qdio_establish(struct qdio_initialize *init_data) struct ccw_device *cdev = init_data->cdev; struct subchannel_id schid; struct qdio_irq *irq_ptr; - unsigned long saveflags; int rc; ccw_device_get_schid(cdev, &schid); @@ -1373,7 +1371,7 @@ int qdio_establish(struct qdio_initialize *init_data) irq_ptr->ccw.count = irq_ptr->equeue.count; irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr); - spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); + spin_lock_irq(get_ccwdev_lock(cdev)); ccw_device_set_options_mask(cdev, 0); rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); @@ -1381,7 +1379,7 @@ int qdio_establish(struct qdio_initialize *init_data) DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); DBF_ERROR("rc:%4x", rc); } - spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); + spin_unlock_irq(get_ccwdev_lock(cdev)); if (rc) { mutex_unlock(&irq_ptr->setup_mutex); @@ -1422,7 +1420,6 @@ int qdio_activate(struct ccw_device *cdev) struct subchannel_id schid; struct qdio_irq *irq_ptr; int rc; - unsigned long saveflags; ccw_device_get_schid(cdev, &schid); DBF_EVENT("qactivate:%4x", schid.sch_no); @@ -1445,7 +1442,7 @@ int qdio_activate(struct ccw_device *cdev) irq_ptr->ccw.count = irq_ptr->aqueue.count; irq_ptr->ccw.cda = 0; - spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); + spin_lock_irq(get_ccwdev_lock(cdev)); ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, @@ -1454,7 +1451,7 @@ int qdio_activate(struct ccw_device *cdev) DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); DBF_ERROR("rc:%4x", rc); } - spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); + spin_unlock_irq(get_ccwdev_lock(cdev)); if (rc) goto out; -- cgit v0.10.2 From ddebf6612c5dcc479725867b9fd9a4d98f41350f Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Fri, 29 Jul 2016 14:00:27 +0200 Subject: s390/qdio: fix double return code evaluation qdio sometimes checks return codes twice. First with the ccw device's lock held and then a 2nd time after the lock is released. Simplify the code by releasing the lock earlier and unify the return code evaluation. Signed-off-by: Sebastian Ott Reviewed-by: Ursula Braun Signed-off-by: Martin Schwidefsky diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index b200f27..da96978 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -1375,13 +1375,10 @@ int qdio_establish(struct qdio_initialize *init_data) ccw_device_set_options_mask(cdev, 0); rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); + spin_unlock_irq(get_ccwdev_lock(cdev)); if (rc) { DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); DBF_ERROR("rc:%4x", rc); - } - spin_unlock_irq(get_ccwdev_lock(cdev)); - - if (rc) { mutex_unlock(&irq_ptr->setup_mutex); qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); return rc; @@ -1447,14 +1444,12 @@ int qdio_activate(struct ccw_device *cdev) rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, 0, DOIO_DENY_PREFETCH); + spin_unlock_irq(get_ccwdev_lock(cdev)); if (rc) { DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); DBF_ERROR("rc:%4x", rc); - } - spin_unlock_irq(get_ccwdev_lock(cdev)); - - if (rc) goto out; + } if (is_thinint_irq(irq_ptr)) tiqdio_add_input_queues(irq_ptr); -- cgit v0.10.2 From 6e30c549f6cab7a41d0934cea80822a2211132ae Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Tue, 2 Aug 2016 16:01:39 +0200 Subject: s390/qdio: remove checks for ccw device internal state Prior to starting IO qdio checks for the internal state of the ccw device. These checks happen without locking, so consistency between state evaluation and starting of the IO is not guaranteed. Since the internal state is checked during ccw_device_start it is safe to get rid of these additional checks. Signed-off-by: Sebastian Ott Reviewed-by: Cornelia Huck Signed-off-by: Martin Schwidefsky diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index da96978..fda3b26 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -1352,9 +1352,6 @@ int qdio_establish(struct qdio_initialize *init_data) if (!irq_ptr) return -ENODEV; - if (cdev->private->state != DEV_STATE_ONLINE) - return -EINVAL; - mutex_lock(&irq_ptr->setup_mutex); qdio_setup_irq(init_data); @@ -1425,9 +1422,6 @@ int qdio_activate(struct ccw_device *cdev) if (!irq_ptr) return -ENODEV; - if (cdev->private->state != DEV_STATE_ONLINE) - return -EINVAL; - mutex_lock(&irq_ptr->setup_mutex); if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { rc = -EBUSY; -- cgit v0.10.2 From 9bce8b2cbebf9f11b41021ccb98c6b18d1796edd Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Fri, 5 Aug 2016 12:33:10 +0200 Subject: s390/qdio: avoid reschedule of outbound tasklet once killed During qdio_shutdown the queue tasklets are killed for all inbound and outbound queues. The queue structures might be freed after qdio_shutdown. Thus it must be guaranteed that these queue tasklets are not rescheduled after that. In addition the outbound queue timers are deleted and it must be guaranteed that these timers are not restarted after qdio_shutdown processing. Timer deletion should make use of del_timer_sync() to make sure qdio_outbound_timer() is finished on other CPUs as well. Queue tasklets should be scheduled in state QDIO_IRQ_STATE_ACTIVE only. Signed-off-by: Ursula Braun Reviewed-by: Benjamin Block Signed-off-by: Martin Schwidefsky diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index fda3b26..71bf9bd 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -686,6 +686,15 @@ static void qdio_kick_handler(struct qdio_q *q) q->qdio_error = 0; } +static inline int qdio_tasklet_schedule(struct qdio_q *q) +{ + if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) { + tasklet_schedule(&q->tasklet); + return 0; + } + return -EPERM; +} + static void __qdio_inbound_processing(struct qdio_q *q) { qperf_inc(q, tasklet_inbound); @@ -698,10 +707,8 @@ static void __qdio_inbound_processing(struct qdio_q *q) if (!qdio_inbound_q_done(q)) { /* means poll time is not yet over */ qperf_inc(q, tasklet_inbound_resched); - if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { - tasklet_schedule(&q->tasklet); + if (!qdio_tasklet_schedule(q)) return; - } } qdio_stop_polling(q); @@ -711,8 +718,7 @@ static void __qdio_inbound_processing(struct qdio_q *q) */ if (!qdio_inbound_q_done(q)) { qperf_inc(q, tasklet_inbound_resched2); - if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); } } @@ -869,16 +875,15 @@ static void __qdio_outbound_processing(struct qdio_q *q) * is noticed and outbound_handler is called after some time. */ if (qdio_outbound_q_done(q)) - del_timer(&q->u.out.timer); + del_timer_sync(&q->u.out.timer); else - if (!timer_pending(&q->u.out.timer)) + if (!timer_pending(&q->u.out.timer) && + likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) mod_timer(&q->u.out.timer, jiffies + 10 * HZ); return; sched: - if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) - return; - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); } /* outbound tasklet */ @@ -892,9 +897,7 @@ void qdio_outbound_timer(unsigned long data) { struct qdio_q *q = (struct qdio_q *)data; - if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) - return; - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); } static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) @@ -907,7 +910,7 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) for_each_output_queue(q->irq_ptr, out, i) if (!qdio_outbound_q_done(out)) - tasklet_schedule(&out->tasklet); + qdio_tasklet_schedule(out); } static void __tiqdio_inbound_processing(struct qdio_q *q) @@ -929,10 +932,8 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) if (!qdio_inbound_q_done(q)) { qperf_inc(q, tasklet_inbound_resched); - if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { - tasklet_schedule(&q->tasklet); + if (!qdio_tasklet_schedule(q)) return; - } } qdio_stop_polling(q); @@ -942,8 +943,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) */ if (!qdio_inbound_q_done(q)) { qperf_inc(q, tasklet_inbound_resched2); - if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); } } @@ -977,7 +977,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) int i; struct qdio_q *q; - if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) + if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) return; for_each_input_queue(irq_ptr, q, i) { @@ -1003,7 +1003,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) continue; if (need_siga_sync(q) && need_siga_sync_out_after_pci(q)) qdio_siga_sync_q(q); - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); } } @@ -1145,7 +1145,7 @@ static void qdio_shutdown_queues(struct ccw_device *cdev) tasklet_kill(&q->tasklet); for_each_output_queue(irq_ptr, q, i) { - del_timer(&q->u.out.timer); + del_timer_sync(&q->u.out.timer); tasklet_kill(&q->tasklet); } } @@ -1585,10 +1585,11 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, /* in case of SIGA errors we must process the error immediately */ if (used >= q->u.out.scan_threshold || rc) - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); else /* free the SBALs in case of no further traffic */ - if (!timer_pending(&q->u.out.timer)) + if (!timer_pending(&q->u.out.timer) && + likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) mod_timer(&q->u.out.timer, jiffies + HZ); return rc; } -- cgit v0.10.2 From f743e70e8f8f3200dde1dcf50c14aa2900d65071 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 3 Aug 2016 19:37:03 +0200 Subject: crypto: sha3 - Add missing ULL suffixes for 64-bit constants MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On 32-bit (e.g. with m68k-linux-gnu-gcc-4.1): crypto/sha3_generic.c:27: warning: integer constant is too large for ‘long’ type crypto/sha3_generic.c:28: warning: integer constant is too large for ‘long’ type crypto/sha3_generic.c:29: warning: integer constant is too large for ‘long’ type crypto/sha3_generic.c:29: warning: integer constant is too large for ‘long’ type crypto/sha3_generic.c:31: warning: integer constant is too large for ‘long’ type crypto/sha3_generic.c:31: warning: integer constant is too large for ‘long’ type crypto/sha3_generic.c:32: warning: integer constant is too large for ‘long’ type crypto/sha3_generic.c:32: warning: integer constant is too large for ‘long’ type crypto/sha3_generic.c:32: warning: integer constant is too large for ‘long’ type crypto/sha3_generic.c:33: warning: integer constant is too large for ‘long’ type crypto/sha3_generic.c:33: warning: integer constant is too large for ‘long’ type crypto/sha3_generic.c:34: warning: integer constant is too large for ‘long’ type crypto/sha3_generic.c:34: warning: integer constant is too large for ‘long’ type Fixes: 53964b9ee63b7075 ("crypto: sha3 - Add SHA-3 hash algorithm") Signed-off-by: Geert Uytterhoeven Signed-off-by: Herbert Xu diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c index 6226439..7e8ed96 100644 --- a/crypto/sha3_generic.c +++ b/crypto/sha3_generic.c @@ -24,14 +24,14 @@ #define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y)))) static const u64 keccakf_rndc[24] = { - 0x0000000000000001, 0x0000000000008082, 0x800000000000808a, - 0x8000000080008000, 0x000000000000808b, 0x0000000080000001, - 0x8000000080008081, 0x8000000000008009, 0x000000000000008a, - 0x0000000000000088, 0x0000000080008009, 0x000000008000000a, - 0x000000008000808b, 0x800000000000008b, 0x8000000000008089, - 0x8000000000008003, 0x8000000000008002, 0x8000000000000080, - 0x000000000000800a, 0x800000008000000a, 0x8000000080008081, - 0x8000000000008080, 0x0000000080000001, 0x8000000080008008 + 0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL, + 0x8000000080008000ULL, 0x000000000000808bULL, 0x0000000080000001ULL, + 0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008aULL, + 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL, + 0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, + 0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL, + 0x000000000000800aULL, 0x800000008000000aULL, 0x8000000080008081ULL, + 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL }; static const int keccakf_rotc[24] = { -- cgit v0.10.2 From 1d2d87e81ea21f64c19b95ef228b865a6880e17e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Horia=20Geant=C4=83?= Date: Thu, 4 Aug 2016 20:02:46 +0300 Subject: crypto: caam - fix echainiv(authenc) encrypt shared descriptor MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are a few things missed by the conversion to the new AEAD interface: 1 - echainiv(authenc) encrypt shared descriptor The shared descriptor is incorrect: due to the order of operations, at some point in time MATH3 register is being overwritten. 2 - buffer used for echainiv(authenc) encrypt shared descriptor Encrypt and givencrypt shared descriptors (for AEAD ops) are mutually exclusive and thus use the same buffer in context state: sh_desc_enc. However, there's one place missed by s/sh_desc_givenc/sh_desc_enc, leading to errors when echainiv(authenc(...)) algorithms are used: DECO: desc idx 14: Header Error. Invalid length or parity, or certain other problems. While here, also fix a typo: dma_mapping_error() is checking for validity of sh_desc_givenc_dma instead of sh_desc_enc_dma. Cc: # 4.3+ Fixes: 479bcc7c5b9e ("crypto: caam - Convert authenc to new AEAD interface") Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index ea8189f..e356005 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -614,7 +614,7 @@ skip_enc: keys_fit_inline = true; /* aead_givencrypt shared descriptor */ - desc = ctx->sh_desc_givenc; + desc = ctx->sh_desc_enc; /* Note: Context registers are saved. */ init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); @@ -645,13 +645,13 @@ copy_iv: append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); - /* ivsize + cryptlen = seqoutlen - authsize */ - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); - /* Read and write assoclen bytes */ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + /* ivsize + cryptlen = seqoutlen - authsize */ + append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); + /* Skip assoc data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); @@ -697,7 +697,7 @@ copy_iv: ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { + if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { dev_err(jrdev, "unable to map shared descriptor\n"); return -ENOMEM; } -- cgit v0.10.2 From 2fdea258fde036a87d3396ec9c0ef66f10768530 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Horia=20Geant=C4=83?= Date: Thu, 4 Aug 2016 20:02:47 +0300 Subject: crypto: caam - defer aead_set_sh_desc in case of zero authsize MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To be able to generate shared descriptors for AEAD, the authentication size needs to be known. However, there is no imposed order of calling .setkey, .setauthsize callbacks. Thus, in case authentication size is not known at .setkey time, defer it until .setauthsize is called. The authsize != 0 check was incorrectly removed when converting the driver to the new AEAD interface. Cc: # 4.3+ Fixes: 479bcc7c5b9e ("crypto: caam - Convert authenc to new AEAD interface") Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index e356005..6dc5971 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -441,6 +441,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead) OP_ALG_AAI_CTR_MOD128); const bool is_rfc3686 = alg->caam.rfc3686; + if (!ctx->authsize) + return 0; + /* NULL encryption / decryption */ if (!ctx->enckeylen) return aead_null_set_sh_desc(aead); -- cgit v0.10.2 From 68202c9f0ad6e16ee806fbadbc5838d55fe5aa5c Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Fri, 29 Jul 2016 14:59:12 -0600 Subject: libnvdimm, nd_blk: mask off reserved status bits The "NVDIMM Block Window Driver Writer's Guide": http://pmem.io/documents/NVDIMM_DriverWritersGuide-July-2016.pdf ...defines the layout of the block window status register. For the July 2016 version of the spec linked to above, this happens in Figure 4 on page 26. The only bits defined in this spec are bits 31, 5, 4, 2, 1 and 0. The rest of the bits in the status register are reserved, and there is a warning following the diagram that says: Note: The driver cannot assume the value of the RESERVED bits in the status register are zero. These reserved bits need to be masked off, and the driver must avoid checking the state of those bits. This change ensures that for hardware implementations that set these reserved bits in the status register, the driver won't incorrectly fail the block I/Os. Cc: #v4.2+ Reviewed-by: Lee, Chun-Yi Signed-off-by: Ross Zwisler Signed-off-by: Dan Williams diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 8c234dd..80cc7c0 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -1527,11 +1527,12 @@ static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) { struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; u64 offset = nfit_blk->stat_offset + mmio->size * bw; + const u32 STATUS_MASK = 0x80000037; if (mmio->num_lines) offset = to_interleave_offset(offset, mmio); - return readl(mmio->addr.base + offset); + return readl(mmio->addr.base + offset) & STATUS_MASK; } static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, -- cgit v0.10.2 From abe8b4e3cef88b8202641d63f5ad58141b970b0f Mon Sep 17 00:00:00 2001 From: Vishal Verma Date: Wed, 27 Jul 2016 16:38:59 -0600 Subject: nvdimm, btt: add a size attribute for BTTs To be consistent with other namespaces, expose a 'size' attribute for BTT devices also. Cc: Dan Williams Reported-by: Linda Knippers Signed-off-by: Vishal Verma Signed-off-by: Dan Williams diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 88e9166..368795a 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1269,6 +1269,7 @@ static int btt_blk_init(struct btt *btt) } } set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); + btt->nd_btt->size = btt->nlba * (u64)btt->sector_size; revalidate_disk(btt->btt_disk); return 0; diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index 3fa7919..97dd292 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c @@ -140,10 +140,30 @@ static ssize_t namespace_store(struct device *dev, } static DEVICE_ATTR_RW(namespace); +static ssize_t size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nd_btt *nd_btt = to_nd_btt(dev); + ssize_t rc; + + device_lock(dev); + if (dev->driver) + rc = sprintf(buf, "%llu\n", nd_btt->size); + else { + /* no size to convey if the btt instance is disabled */ + rc = -ENXIO; + } + device_unlock(dev); + + return rc; +} +static DEVICE_ATTR_RO(size); + static struct attribute *nd_btt_attributes[] = { &dev_attr_sector_size.attr, &dev_attr_namespace.attr, &dev_attr_uuid.attr, + &dev_attr_size.attr, NULL, }; diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 4047639..8024a0e 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -143,6 +143,7 @@ struct nd_btt { struct nd_namespace_common *ndns; struct btt *btt; unsigned long lbasize; + u64 size; u8 *uuid; int id; }; -- cgit v0.10.2 From 65ea11ec6a82b1d44aba62b59e9eb20247e57c6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Mon, 8 Aug 2016 20:35:29 +0300 Subject: x86/hweight: Don't clobber %rdi MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The caller expects %rdi to remain intact, push+pop it make that happen. Fixes the following kind of explosions on my core2duo machine when trying to reboot or shut down: general protection fault: 0000 [#1] PREEMPT SMP Modules linked in: i915 i2c_algo_bit drm_kms_helper cfbfillrect syscopyarea cfbimgblt sysfillrect sysimgblt fb_sys_fops cfbcopyarea drm netconsole configfs binfmt_misc iTCO_wdt psmouse pcspkr snd_hda_codec_idt e100 coretemp hwmon snd_hda_codec_generic i2c_i801 mii i2c_smbus lpc_ich mfd_core snd_hda_intel uhci_hcd snd_hda_codec snd_hwdep snd_hda_core ehci_pci 8250 ehci_hcd snd_pcm 8250_base usbcore evdev serial_core usb_common parport_pc parport snd_timer snd soundcore CPU: 0 PID: 3070 Comm: reboot Not tainted 4.8.0-rc1-perf-dirty #69 Hardware name: /D946GZIS, BIOS TS94610J.86A.0087.2007.1107.1049 11/07/2007 task: ffff88012a0b4080 task.stack: ffff880123850000 RIP: 0010:[] [] x86_perf_event_update+0x52/0xc0 RSP: 0018:ffff880123853b60 EFLAGS: 00010087 RAX: 0000000000000001 RBX: ffff88012fc0a3c0 RCX: 000000000000001e RDX: 0000000000000000 RSI: 0000000040000000 RDI: ffff88012b014800 RBP: ffff880123853b88 R08: ffffffffffffffff R09: 0000000000000000 R10: ffffea0004a012c0 R11: ffffea0004acedc0 R12: ffffffff80000001 R13: ffff88012b0149c0 R14: ffff88012b014800 R15: 0000000000000018 FS: 00007f8b155cd700(0000) GS:ffff88012fc00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f8b155f5000 CR3: 000000012a2d7000 CR4: 00000000000006f0 Stack: ffff88012fc0a3c0 ffff88012b014800 0000000000000004 0000000000000001 ffff88012fc1b750 ffff880123853bb0 ffffffff81003d59 ffff88012b014800 ffff88012fc0a3c0 ffff88012b014800 ffff880123853bd8 ffffffff81003e13 Call Trace: [] x86_pmu_stop+0x59/0xd0 [] x86_pmu_del+0x43/0x140 [] event_sched_out.isra.105+0xbd/0x260 [] __perf_remove_from_context+0x2d/0xb0 [] __perf_event_exit_context+0x4d/0x70 [] generic_exec_single+0xb6/0x140 [] ? __perf_remove_from_context+0xb0/0xb0 [] ? __perf_remove_from_context+0xb0/0xb0 [] smp_call_function_single+0xdf/0x140 [] perf_event_exit_cpu_context+0x87/0xc0 [] perf_reboot+0x13/0x40 [] notifier_call_chain+0x4a/0x70 [] __blocking_notifier_call_chain+0x47/0x60 [] blocking_notifier_call_chain+0x16/0x20 [] kernel_restart_prepare+0x1d/0x40 [] kernel_restart+0x12/0x60 [] SYSC_reboot+0xf6/0x1b0 [] ? mntput_no_expire+0x2c/0x1b0 [] ? mntput+0x24/0x40 [] ? __fput+0x16c/0x1e0 [] ? ____fput+0xe/0x10 [] ? task_work_run+0x83/0xa0 [] ? exit_to_usermode_loop+0x53/0xc0 [] ? trace_hardirqs_on_thunk+0x1a/0x1c [] SyS_reboot+0xe/0x10 [] entry_SYSCALL_64_fastpath+0x18/0xa3 Code: 7c 4c 8d af c0 01 00 00 49 89 fe eb 10 48 09 c2 4c 89 e0 49 0f b1 55 00 4c 39 e0 74 35 4d 8b a6 c0 01 00 00 41 8b 8e 60 01 00 00 <0f> 33 8b 35 6e 02 8c 00 48 c1 e2 20 85 f6 7e d2 48 89 d3 89 cf RIP [] x86_perf_event_update+0x52/0xc0 RSP ---[ end trace 7ec95181faf211be ]--- note: reboot[3070] exited with preempt_count 2 Cc: Borislav Petkov Cc: H. Peter Anvin Cc: Andy Lutomirski Cc: Brian Gerst Cc: Denys Vlasenko Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ingo Molnar Fixes: f5967101e9de ("x86/hweight: Get rid of the special calling convention") Signed-off-by: Ville Syrjälä Signed-off-by: Linus Torvalds diff --git a/arch/x86/lib/hweight.S b/arch/x86/lib/hweight.S index 02de3d7..8a602a1 100644 --- a/arch/x86/lib/hweight.S +++ b/arch/x86/lib/hweight.S @@ -35,6 +35,7 @@ ENDPROC(__sw_hweight32) ENTRY(__sw_hweight64) #ifdef CONFIG_X86_64 + pushq %rdi pushq %rdx movq %rdi, %rdx # w -> t @@ -60,6 +61,7 @@ ENTRY(__sw_hweight64) shrq $56, %rax # w = w_tmp >> 56 popq %rdx + popq %rdi ret #else /* CONFIG_X86_32 */ /* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */ -- cgit v0.10.2 From 65a97a67a7d7b7a953492573d5740d32510b9daa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Wed, 27 Jul 2016 09:34:42 +0200 Subject: dell-wmi: Ignore WMI event 0xe00e MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit WMI event 0xe00e is received when battery was removed or inserted. Signed-off-by: Pali Rohár Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index d2bc092..da2fe18 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c @@ -110,8 +110,8 @@ static const struct key_entry dell_wmi_keymap_type_0000[] __initconst = { /* BIOS error detected */ { KE_IGNORE, 0xe00d, { KEY_RESERVED } }, - /* Unknown, defined in ACPI DSDT */ - /* { KE_IGNORE, 0xe00e, { KEY_RESERVED } }, */ + /* Battery was removed or inserted */ + { KE_IGNORE, 0xe00e, { KEY_RESERVED } }, /* Wifi Catcher */ { KE_KEY, 0xe011, { KEY_PROG2 } }, -- cgit v0.10.2 From 574673c231a5fad1560249cc3a598907acb36cf9 Mon Sep 17 00:00:00 2001 From: Andreas Ziegler Date: Thu, 4 Aug 2016 09:52:09 +0200 Subject: printk: Remove unnecessary #ifdef CONFIG_PRINTK In commit 874f9c7da9a4 ("printk: create pr_ functions"), new pr_level defines were added to printk.c. These new defines are guarded by an #ifdef CONFIG_PRINTK - however, there is already a surrounding #ifdef CONFIG_PRINTK starting a lot earlier in line 249 which means the newly introduced #ifdef is unnecessary. Let's remove it to avoid confusion. Signed-off-by: Andreas Ziegler Cc: Joe Perches Cc: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index a5ef95c..a37fc8c 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1930,7 +1930,6 @@ asmlinkage int printk_emit(int facility, int level, } EXPORT_SYMBOL(printk_emit); -#ifdef CONFIG_PRINTK #define define_pr_level(func, loglevel) \ asmlinkage __visible void func(const char *fmt, ...) \ { \ @@ -1949,7 +1948,6 @@ define_pr_level(__pr_err, LOGLEVEL_ERR); define_pr_level(__pr_warn, LOGLEVEL_WARNING); define_pr_level(__pr_notice, LOGLEVEL_NOTICE); define_pr_level(__pr_info, LOGLEVEL_INFO); -#endif int vprintk_default(int level, const char *fmt, va_list args) { -- cgit v0.10.2 From c22e853a2ed19321d00c1eae339ffdc4f5e7757e Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Sat, 30 Jul 2016 00:37:57 +0000 Subject: libceph: fix return value check in alloc_msg_with_page_vector() In case of error, the function ceph_alloc_page_vector() returns ERR_PTR() and never returns NULL. The NULL test in the return value check should be replaced with IS_ERR(). Fixes: 1907920324f1 ('libceph: support for sending notifies') Signed-off-by: Wei Yongjun Signed-off-by: Ilya Dryomov diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index b5ec096..a97e7b5 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -4220,7 +4220,7 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr) pages = ceph_alloc_page_vector(calc_pages_for(0, data_len), GFP_NOIO); - if (!pages) { + if (IS_ERR(pages)) { ceph_msg_put(m); return NULL; } -- cgit v0.10.2 From f52ec33cbd848632559c87c9305a70fb6eb97f18 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Sat, 30 Jul 2016 00:37:31 +0000 Subject: libceph: make cancel_generic_request() static Fixes the following sparse warning: net/ceph/mon_client.c:577:6: warning: symbol 'cancel_generic_request' was not declared. Should it be static? Signed-off-by: Wei Yongjun Signed-off-by: Ilya Dryomov diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index c83326c..ef34a02 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c @@ -574,7 +574,7 @@ static void complete_generic_request(struct ceph_mon_generic_request *req) put_generic_request(req); } -void cancel_generic_request(struct ceph_mon_generic_request *req) +static void cancel_generic_request(struct ceph_mon_generic_request *req) { struct ceph_mon_client *monc = req->monc; struct ceph_mon_generic_request *lookup_req; -- cgit v0.10.2 From 864364a29c26ed83b3eeca5fa278468dc3ae9ed4 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Sat, 30 Jul 2016 00:38:26 +0000 Subject: libceph: using kfree_rcu() to simplify the code The callback function of call_rcu() just calls a kfree(), so we can use kfree_rcu() instead of call_rcu() + callback function. Signed-off-by: Wei Yongjun Signed-off-by: Ilya Dryomov diff --git a/net/ceph/string_table.c b/net/ceph/string_table.c index ca53c83..22fb96e 100644 --- a/net/ceph/string_table.c +++ b/net/ceph/string_table.c @@ -84,12 +84,6 @@ retry: } EXPORT_SYMBOL(ceph_find_or_create_string); -static void ceph_free_string(struct rcu_head *head) -{ - struct ceph_string *cs = container_of(head, struct ceph_string, rcu); - kfree(cs); -} - void ceph_release_string(struct kref *ref) { struct ceph_string *cs = container_of(ref, struct ceph_string, kref); @@ -101,7 +95,7 @@ void ceph_release_string(struct kref *ref) } spin_unlock(&string_tree_lock); - call_rcu(&cs->rcu, ceph_free_string); + kfree_rcu(cs, rcu); } EXPORT_SYMBOL(ceph_release_string); -- cgit v0.10.2 From e4d2b16a445f85b7dc3fc67b21756eca515e0c74 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Thu, 4 Aug 2016 08:43:33 +0800 Subject: ceph: fix null pointer dereference in ceph_flush_snaps() Signed-off-by: Yan, Zheng diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 99115ca..16e6ded 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1347,9 +1347,12 @@ void ceph_flush_snaps(struct ceph_inode_info *ci, { struct inode *inode = &ci->vfs_inode; struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; - struct ceph_mds_session *session = *psession; + struct ceph_mds_session *session = NULL; int mds; + dout("ceph_flush_snaps %p\n", inode); + if (psession) + session = *psession; retry: spin_lock(&ci->i_ceph_lock); if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) { -- cgit v0.10.2 From 6b6dddbe11b13bb00e0f9a1af2021e266811be85 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Fri, 5 Aug 2016 16:15:38 +0200 Subject: rbd: destroy header_oloc in rbd_dev_release() Purely cosmetic at this point, as rbd doesn't use RADOS namespaces and hence rbd_dev->header_oloc->pool_ns is always NULL. Signed-off-by: Ilya Dryomov diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 1a04af6..07668a6 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3950,6 +3950,7 @@ static void rbd_dev_release(struct device *dev) bool need_put = !!rbd_dev->opts; ceph_oid_destroy(&rbd_dev->header_oid); + ceph_oloc_destroy(&rbd_dev->header_oloc); rbd_put_client(rbd_dev->rbd_client); rbd_spec_put(rbd_dev->spec); -- cgit v0.10.2 From dca3f53c02e325bb19dfc05b1571b9a706226fea Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Thu, 4 Aug 2016 12:11:55 +0200 Subject: sctp: Export struct sctp_info to userspace This is required to correctly interpret INET_DIAG_INFO messages exported by sctp_diag module. Signed-off-by: Phil Sutter Signed-off-by: David S. Miller diff --git a/include/linux/sctp.h b/include/linux/sctp.h index de1f643..fcb4c36 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -705,70 +705,6 @@ typedef struct sctp_auth_chunk { sctp_authhdr_t auth_hdr; } __packed sctp_auth_chunk_t; -struct sctp_info { - __u32 sctpi_tag; - __u32 sctpi_state; - __u32 sctpi_rwnd; - __u16 sctpi_unackdata; - __u16 sctpi_penddata; - __u16 sctpi_instrms; - __u16 sctpi_outstrms; - __u32 sctpi_fragmentation_point; - __u32 sctpi_inqueue; - __u32 sctpi_outqueue; - __u32 sctpi_overall_error; - __u32 sctpi_max_burst; - __u32 sctpi_maxseg; - __u32 sctpi_peer_rwnd; - __u32 sctpi_peer_tag; - __u8 sctpi_peer_capable; - __u8 sctpi_peer_sack; - __u16 __reserved1; - - /* assoc status info */ - __u64 sctpi_isacks; - __u64 sctpi_osacks; - __u64 sctpi_opackets; - __u64 sctpi_ipackets; - __u64 sctpi_rtxchunks; - __u64 sctpi_outofseqtsns; - __u64 sctpi_idupchunks; - __u64 sctpi_gapcnt; - __u64 sctpi_ouodchunks; - __u64 sctpi_iuodchunks; - __u64 sctpi_oodchunks; - __u64 sctpi_iodchunks; - __u64 sctpi_octrlchunks; - __u64 sctpi_ictrlchunks; - - /* primary transport info */ - struct sockaddr_storage sctpi_p_address; - __s32 sctpi_p_state; - __u32 sctpi_p_cwnd; - __u32 sctpi_p_srtt; - __u32 sctpi_p_rto; - __u32 sctpi_p_hbinterval; - __u32 sctpi_p_pathmaxrxt; - __u32 sctpi_p_sackdelay; - __u32 sctpi_p_sackfreq; - __u32 sctpi_p_ssthresh; - __u32 sctpi_p_partial_bytes_acked; - __u32 sctpi_p_flight_size; - __u16 sctpi_p_error; - __u16 __reserved2; - - /* sctp sock info */ - __u32 sctpi_s_autoclose; - __u32 sctpi_s_adaptation_ind; - __u32 sctpi_s_pd_point; - __u8 sctpi_s_nodelay; - __u8 sctpi_s_disable_fragments; - __u8 sctpi_s_v4mapped; - __u8 sctpi_s_frag_interleave; - __u32 sctpi_s_type; - __u32 __reserved3; -}; - struct sctp_infox { struct sctp_info *sctpinfo; struct sctp_association *asoc; diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index d304f4c..a406adc 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -944,4 +944,68 @@ struct sctp_default_prinfo { __u16 pr_policy; }; +struct sctp_info { + __u32 sctpi_tag; + __u32 sctpi_state; + __u32 sctpi_rwnd; + __u16 sctpi_unackdata; + __u16 sctpi_penddata; + __u16 sctpi_instrms; + __u16 sctpi_outstrms; + __u32 sctpi_fragmentation_point; + __u32 sctpi_inqueue; + __u32 sctpi_outqueue; + __u32 sctpi_overall_error; + __u32 sctpi_max_burst; + __u32 sctpi_maxseg; + __u32 sctpi_peer_rwnd; + __u32 sctpi_peer_tag; + __u8 sctpi_peer_capable; + __u8 sctpi_peer_sack; + __u16 __reserved1; + + /* assoc status info */ + __u64 sctpi_isacks; + __u64 sctpi_osacks; + __u64 sctpi_opackets; + __u64 sctpi_ipackets; + __u64 sctpi_rtxchunks; + __u64 sctpi_outofseqtsns; + __u64 sctpi_idupchunks; + __u64 sctpi_gapcnt; + __u64 sctpi_ouodchunks; + __u64 sctpi_iuodchunks; + __u64 sctpi_oodchunks; + __u64 sctpi_iodchunks; + __u64 sctpi_octrlchunks; + __u64 sctpi_ictrlchunks; + + /* primary transport info */ + struct sockaddr_storage sctpi_p_address; + __s32 sctpi_p_state; + __u32 sctpi_p_cwnd; + __u32 sctpi_p_srtt; + __u32 sctpi_p_rto; + __u32 sctpi_p_hbinterval; + __u32 sctpi_p_pathmaxrxt; + __u32 sctpi_p_sackdelay; + __u32 sctpi_p_sackfreq; + __u32 sctpi_p_ssthresh; + __u32 sctpi_p_partial_bytes_acked; + __u32 sctpi_p_flight_size; + __u16 sctpi_p_error; + __u16 __reserved2; + + /* sctp sock info */ + __u32 sctpi_s_autoclose; + __u32 sctpi_s_adaptation_ind; + __u32 sctpi_s_pd_point; + __u8 sctpi_s_nodelay; + __u8 sctpi_s_disable_fragments; + __u8 sctpi_s_v4mapped; + __u8 sctpi_s_frag_interleave; + __u32 sctpi_s_type; + __u32 __reserved3; +}; + #endif /* _UAPI_SCTP_H */ -- cgit v0.10.2 From 12474e8e58d82e2b815cd35956c0c06fab104ee7 Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Thu, 4 Aug 2016 12:11:56 +0200 Subject: sctp_diag: Fix T3_rtx timer export The asoc's timer value is not kept in asoc->timeouts array but in it's primary transport instead. Furthermore, we must export the timer only if it is pending, otherwise the value will underrun when stored in an unsigned variable and user space will only see a very large timeout value. Signed-off-by: Phil Sutter Signed-off-by: David S. Miller diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c index f69edcf..f728ef0 100644 --- a/net/sctp/sctp_diag.c +++ b/net/sctp/sctp_diag.c @@ -13,6 +13,7 @@ static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r, { union sctp_addr laddr, paddr; struct dst_entry *dst; + struct timer_list *t3_rtx = &asoc->peer.primary_path->T3_rtx_timer; laddr = list_entry(asoc->base.bind_addr.address_list.next, struct sctp_sockaddr_entry, list)->a; @@ -40,10 +41,15 @@ static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r, } r->idiag_state = asoc->state; - r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX; - r->idiag_retrans = asoc->rtx_data_chunks; - r->idiag_expires = jiffies_to_msecs( - asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] - jiffies); + if (timer_pending(t3_rtx)) { + r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX; + r->idiag_retrans = asoc->rtx_data_chunks; + r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies); + } else { + r->idiag_timer = 0; + r->idiag_retrans = 0; + r->idiag_expires = 0; + } } static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb, -- cgit v0.10.2 From 1ba8d77f410d3b7423df39e8a10a02af272d3c42 Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Thu, 4 Aug 2016 12:11:57 +0200 Subject: sctp_diag: Respect ss adding TCPF_CLOSE to idiag_states Since 'ss' always adds TCPF_CLOSE to idiag_states flags, sctp_diag can't rely upon TCPF_LISTEN flag solely being present when listening sockets are requested. Signed-off-by: Phil Sutter Signed-off-by: David S. Miller diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c index f728ef0..bb69153 100644 --- a/net/sctp/sctp_diag.c +++ b/net/sctp/sctp_diag.c @@ -356,7 +356,7 @@ static int sctp_ep_dump(struct sctp_endpoint *ep, void *p) if (cb->args[4] < cb->args[1]) goto next; - if ((r->idiag_states & ~TCPF_LISTEN) && !list_empty(&ep->asocs)) + if (!(r->idiag_states & TCPF_LISTEN) && !list_empty(&ep->asocs)) goto next; if (r->sdiag_family != AF_UNSPEC && @@ -471,7 +471,7 @@ skip: * 3 : to mark if we have dumped the ep info of the current asoc * 4 : to work as a temporary variable to traversal list */ - if (!(idiag_states & ~TCPF_LISTEN)) + if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE))) goto done; sctp_for_each_transport(sctp_tsp_dump, net, cb->args[2], &commp); done: -- cgit v0.10.2 From 3b3bf80b994f0c6c35a25ef8965ab956b4bcced5 Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Thu, 4 Aug 2016 12:37:17 +0200 Subject: rhashtable-test: Fix max_size parameter description Looks like a simple copy'n'paste error. Fixes: 1aa661f5c3df1 ("rhashtable-test: Measure time to insert, remove & traverse entries") Signed-off-by: Phil Sutter Signed-off-by: David S. Miller diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 297fdb5..64e899b 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -38,7 +38,7 @@ MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)"); static int max_size = 0; module_param(max_size, int, 0); -MODULE_PARM_DESC(runs, "Maximum table size (default: calculated)"); +MODULE_PARM_DESC(max_size, "Maximum table size (default: calculated)"); static bool shrinking = false; module_param(shrinking, bool, 0); -- cgit v0.10.2 From b489a2000f19e414710d2887fe3e24e903242766 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 4 Aug 2016 17:36:20 +0300 Subject: mlxsw: spectrum: Do not assume PAUSE frames are disabled When ieee_setpfc() gets called, PAUSE frames are not necessarily disabled on the port. Check if PAUSE frames are disabled or enabled and configure the port's headroom buffer accordingly. Fixes: d81a6bdb87ce ("mlxsw: spectrum: Add IEEE 802.1Qbb PFC support") Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c index 01cfb75..3c4a178 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c @@ -351,17 +351,17 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); int err; - if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) && - pfc->pfc_en) { + if (pause_en && pfc->pfc_en) { netdev_err(dev, "PAUSE frames already enabled on port\n"); return -EINVAL; } err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, mlxsw_sp_port->dcb.ets->prio_tc, - false, pfc); + pause_en, pfc); if (err) { netdev_err(dev, "Failed to configure port's headroom for PFC\n"); return err; @@ -380,7 +380,7 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev, err_port_pfc_set: __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, - mlxsw_sp_port->dcb.ets->prio_tc, false, + mlxsw_sp_port->dcb.ets->prio_tc, pause_en, mlxsw_sp_port->dcb.pfc); return err; } -- cgit v0.10.2 From 07d50cae0661e5479d54d6e3e21cad15b1198103 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 4 Aug 2016 17:36:21 +0300 Subject: mlxsw: spectrum: Do not override PAUSE settings The PFCC register is used to configure both PAUSE and PFC frames. Therefore, when PFC frames are disabled we must make sure we don't mistakenly also disable PAUSE frames (which might be enabled). Fix this by packing the PFCC register with the current PAUSE settings. Note that this register is also accessed via ethtool ops, but there we are guaranteed to have PFC disabled. Fixes: d81a6bdb87ce ("mlxsw: spectrum: Add IEEE 802.1Qbb PFC support") Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c index 3c4a178..b6ed7f7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c @@ -341,6 +341,8 @@ static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port, char pfcc_pl[MLXSW_REG_PFCC_LEN]; mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); + mlxsw_reg_pfcc_pprx_set(pfcc_pl, mlxsw_sp_port->link.rx_pause); + mlxsw_reg_pfcc_pptx_set(pfcc_pl, mlxsw_sp_port->link.tx_pause); mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en); return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), -- cgit v0.10.2 From 4de34eb5743f720dc4798f0647f75c21d44aa1f8 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 4 Aug 2016 17:36:22 +0300 Subject: mlxsw: spectrum: Add missing DCB rollback in error path We correctly execute mlxsw_sp_port_dcb_fini() when port is removed, but I missed its rollback in the error path of port creation, so add it. Fixes: f00817df2b42 ("mlxsw: spectrum: Introduce support for Data Center Bridging (DCB)") Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index c3e6150..e1b8f62 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2220,6 +2220,7 @@ err_port_vlan_init: err_core_port_init: unregister_netdev(dev); err_register_netdev: + mlxsw_sp_port_dcb_fini(mlxsw_sp_port); err_port_dcb_init: err_port_ets_init: err_port_buffers_init: -- cgit v0.10.2 From 1bd4403d86a1c06cb6cc9ac87664a0c9d3413d51 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 8 Aug 2016 13:02:01 -0700 Subject: unsafe_[get|put]_user: change interface to use a error target label When I initially added the unsafe_[get|put]_user() helpers in commit 5b24a7a2aa20 ("Add 'unsafe' user access functions for batched accesses"), I made the mistake of modeling the interface on our traditional __[get|put]_user() functions, which return zero on success, or -EFAULT on failure. That interface is fairly easy to use, but it's actually fairly nasty for good code generation, since it essentially forces the caller to check the error value for each access. In particular, since the error handling is already internally implemented with an exception handler, and we already use "asm goto" for various other things, we could fairly easily make the error cases just jump directly to an error label instead, and avoid the need for explicit checking after each operation. So switch the interface to pass in an error label, rather than checking the error value in the caller. Best do it now before we start growing more users (the signal handling code in particular would be a good place to use the new interface). So rather than if (unsafe_get_user(x, ptr)) ... handle error .. the interface is now unsafe_get_user(x, ptr, label); where an error during the user mode fetch will now just cause a jump to 'label' in the caller. Right now the actual _implementation_ of this all still ends up being a "if (err) goto label", and does not take advantage of any exception label tricks, but for "unsafe_put_user()" in particular it should be fairly straightforward to convert to using the exception table model. Note that "unsafe_get_user()" is much harder to convert to a clever exception table model, because current versions of gcc do not allow the use of "asm goto" (for the exception) with output values (for the actual value to be fetched). But that is hopefully not a limitation in the long term. [ Also note that it might be a good idea to switch unsafe_get_user() to actually _return_ the value it fetches from user space, but this commit only changes the error handling semantics ] Signed-off-by: Linus Torvalds diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index c03bfb6..52f2300 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -812,21 +812,21 @@ copy_to_user(void __user *to, const void *from, unsigned long n) #define user_access_begin() __uaccess_begin() #define user_access_end() __uaccess_end() -#define unsafe_put_user(x, ptr) \ -({ \ +#define unsafe_put_user(x, ptr, err_label) \ +do { \ int __pu_err; \ __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \ - __builtin_expect(__pu_err, 0); \ -}) + if (unlikely(__pu_err)) goto err_label; \ +} while (0) -#define unsafe_get_user(x, ptr) \ -({ \ +#define unsafe_get_user(x, ptr, err_label) \ +do { \ int __gu_err; \ unsigned long __gu_val; \ __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ - __builtin_expect(__gu_err, 0); \ -}) + if (unlikely(__gu_err)) goto err_label; \ +} while (0) #endif /* _ASM_X86_UACCESS_H */ diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 3495578..f30c187 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -114,8 +114,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); #ifndef user_access_begin #define user_access_begin() do { } while (0) #define user_access_end() do { } while (0) -#define unsafe_get_user(x, ptr) __get_user(x, ptr) -#define unsafe_put_user(x, ptr) __put_user(x, ptr) +#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0) +#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) #endif #endif /* __LINUX_UACCESS_H__ */ diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index 33f655e..9c5fe81 100644 --- a/lib/strncpy_from_user.c +++ b/lib/strncpy_from_user.c @@ -40,8 +40,8 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long unsigned long c, data; /* Fall back to byte-at-a-time if we get a page fault */ - if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res)))) - break; + unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time); + *(unsigned long *)(dst+res) = c; if (has_zero(c, &data, &constants)) { data = prep_zero_mask(c, data, &constants); @@ -56,8 +56,7 @@ byte_at_a_time: while (max) { char c; - if (unlikely(unsafe_get_user(c,src+res))) - return -EFAULT; + unsafe_get_user(c,src+res, efault); dst[res] = c; if (!c) return res; @@ -76,6 +75,7 @@ byte_at_a_time: * Nope: we hit the address space limit, and we still had more * characters the caller would have wanted. That's an EFAULT. */ +efault: return -EFAULT; } diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 2625943..8e105ed 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -45,8 +45,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, src -= align; max += align; - if (unlikely(unsafe_get_user(c,(unsigned long __user *)src))) - return 0; + unsafe_get_user(c, (unsigned long __user *)src, efault); c |= aligned_byte_mask(align); for (;;) { @@ -61,8 +60,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, if (unlikely(max <= sizeof(unsigned long))) break; max -= sizeof(unsigned long); - if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res)))) - return 0; + unsafe_get_user(c, (unsigned long __user *)(src+res), efault); } res -= align; @@ -77,6 +75,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, * Nope: we hit the address space limit, and we still had more * characters the caller would have wanted. That's 0. */ +efault: return 0; } -- cgit v0.10.2 From e4630fdd47637168927905983205d7b7c5c08c09 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 8 Aug 2016 15:31:31 +0200 Subject: x86/power/64: Always create temporary identity mapping correctly The low-level resume-from-hibernation code on x86-64 uses kernel_ident_mapping_init() to create the temoprary identity mapping, but that function assumes that the offset between kernel virtual addresses and physical addresses is aligned on the PGD level. However, with a randomized identity mapping base, it may be aligned on the PUD level and if that happens, the temporary identity mapping created by set_up_temporary_mappings() will not reflect the actual kernel identity mapping and the image restoration will fail as a result (leading to a kernel panic most of the time). To fix this problem, rework kernel_ident_mapping_init() to support unaligned offsets between KVA and PA up to the PMD level and make set_up_temporary_mappings() use it as approprtiate. Reported-and-tested-by: Thomas Garnier Reported-by: Borislav Petkov Suggested-by: Yinghai Lu Signed-off-by: Rafael J. Wysocki Acked-by: Yinghai Lu diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h index 2230420..737da62 100644 --- a/arch/x86/include/asm/init.h +++ b/arch/x86/include/asm/init.h @@ -5,10 +5,10 @@ struct x86_mapping_info { void *(*alloc_pgt_page)(void *); /* allocate buf for page table */ void *context; /* context for alloc_pgt_page */ unsigned long pmd_flag; /* page flag for PMD entry */ - bool kernel_mapping; /* kernel mapping or ident mapping */ + unsigned long offset; /* ident mapping offset */ }; int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, - unsigned long addr, unsigned long end); + unsigned long pstart, unsigned long pend); #endif /* _ASM_X86_INIT_H */ diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c index ec21796..4473cb4 100644 --- a/arch/x86/mm/ident_map.c +++ b/arch/x86/mm/ident_map.c @@ -3,15 +3,17 @@ * included by both the compressed kernel and the regular kernel. */ -static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page, +static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page, unsigned long addr, unsigned long end) { addr &= PMD_MASK; for (; addr < end; addr += PMD_SIZE) { pmd_t *pmd = pmd_page + pmd_index(addr); - if (!pmd_present(*pmd)) - set_pmd(pmd, __pmd(addr | pmd_flag)); + if (pmd_present(*pmd)) + continue; + + set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag)); } } @@ -30,13 +32,13 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page, if (pud_present(*pud)) { pmd = pmd_offset(pud, 0); - ident_pmd_init(info->pmd_flag, pmd, addr, next); + ident_pmd_init(info, pmd, addr, next); continue; } pmd = (pmd_t *)info->alloc_pgt_page(info->context); if (!pmd) return -ENOMEM; - ident_pmd_init(info->pmd_flag, pmd, addr, next); + ident_pmd_init(info, pmd, addr, next); set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); } @@ -44,14 +46,15 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page, } int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, - unsigned long addr, unsigned long end) + unsigned long pstart, unsigned long pend) { + unsigned long addr = pstart + info->offset; + unsigned long end = pend + info->offset; unsigned long next; int result; - int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0; for (; addr < end; addr = next) { - pgd_t *pgd = pgd_page + pgd_index(addr) + off; + pgd_t *pgd = pgd_page + pgd_index(addr); pud_t *pud; next = (addr & PGDIR_MASK) + PGDIR_SIZE; diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index f0b5f2d..a3e3ccc 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c @@ -87,7 +87,7 @@ static int set_up_temporary_mappings(void) struct x86_mapping_info info = { .alloc_pgt_page = alloc_pgt_page, .pmd_flag = __PAGE_KERNEL_LARGE_EXEC, - .kernel_mapping = true, + .offset = __PAGE_OFFSET, }; unsigned long mstart, mend; pgd_t *pgd; -- cgit v0.10.2 From 66cf3504f4c528793b3d8986bab606f7cfb1c4bb Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Thu, 4 Aug 2016 16:07:58 -0400 Subject: net/ethernet: tundra: fix dump_eth_one warning in tsi108_eth The call site for this function appears as: #ifdef DEBUG data->msg_enable = DEBUG; dump_eth_one(dev); #endif ...leading to the following warning for !DEBUG builds: drivers/net/ethernet/tundra/tsi108_eth.c:169:13: warning: 'dump_eth_one' defined but not used [-Wunused-function] static void dump_eth_one(struct net_device *dev) ^ ...when using the arch/powerpc/configs/mpc7448_hpc2_defconfig Put the function definition under the same #ifdef as the call site to avoid the warning. Cc: "David S. Miller" Cc: netdev@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Signed-off-by: Paul Gortmaker Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 01a7714..8fd1312 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -166,6 +166,7 @@ static struct platform_driver tsi_eth_driver = { static void tsi108_timed_checker(unsigned long dev_ptr); +#ifdef DEBUG static void dump_eth_one(struct net_device *dev) { struct tsi108_prv_data *data = netdev_priv(dev); @@ -190,6 +191,7 @@ static void dump_eth_one(struct net_device *dev) TSI_READ(TSI108_EC_RXESTAT), TSI_READ(TSI108_EC_RXERR), data->rxpending); } +#endif /* Synchronization is needed between the thread and up/down events. * Note that the PHY is accessed through the same registers for both -- cgit v0.10.2 From a2bfe6bf09a5f38df2bd0b1734f5fdee76f9366f Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 5 Aug 2016 00:11:11 +0200 Subject: bpf: also call skb_postpush_rcsum on xmit occasions Follow-up to commit f8ffad69c9f8 ("bpf: add skb_postpush_rcsum and fix dev_forward_skb occasions") to fix an issue for dev_queue_xmit() redirect locations which need CHECKSUM_COMPLETE fixups on ingress. For the same reasons as described in f8ffad69c9f8 already, we of course also need this here, since dev_queue_xmit() on a veth device will let us end up in the dev_forward_skb() helper again to cross namespaces. Latter then calls into skb_postpull_rcsum() to pull out L2 header, so that netif_rx_internal() sees CHECKSUM_COMPLETE as it is expected. That is, CHECKSUM_COMPLETE on ingress covering L2 _payload_, not L2 headers. Also here we have to address bpf_redirect() and bpf_clone_redirect(). Fixes: 3896d655f4d4 ("bpf: introduce bpf_clone_redirect() helper") Fixes: 27b29f63058d ("bpf: add bpf_redirect() helper") Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller diff --git a/net/core/filter.c b/net/core/filter.c index 5708999..c46244f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1365,6 +1365,12 @@ static inline int bpf_try_make_writable(struct sk_buff *skb, return err; } +static inline void bpf_push_mac_rcsum(struct sk_buff *skb) +{ + if (skb_at_tc_ingress(skb)) + skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len); +} + static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) { struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp); @@ -1607,9 +1613,6 @@ static const struct bpf_func_proto bpf_csum_diff_proto = { static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) { - if (skb_at_tc_ingress(skb)) - skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len); - return dev_forward_skb(dev, skb); } @@ -1648,6 +1651,8 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5) if (unlikely(!skb)) return -ENOMEM; + bpf_push_mac_rcsum(skb); + return flags & BPF_F_INGRESS ? __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); } @@ -1693,6 +1698,8 @@ int skb_do_redirect(struct sk_buff *skb) return -EINVAL; } + bpf_push_mac_rcsum(skb); + return ri->flags & BPF_F_INGRESS ? __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); } -- cgit v0.10.2 From 479ffcccefd7b04442b0e949f8779b0612e8c75f Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 5 Aug 2016 00:11:12 +0200 Subject: bpf: fix checksum fixups on bpf_skb_store_bytes bpf_skb_store_bytes() invocations above L2 header need BPF_F_RECOMPUTE_CSUM flag for updates, so that CHECKSUM_COMPLETE will be fixed up along the way. Where we ran into an issue with bpf_skb_store_bytes() is when we did a single-byte update on the IPv6 hoplimit despite using BPF_F_RECOMPUTE_CSUM flag; simple ping via ICMPv6 triggered a hw csum failure as a result. The underlying issue has been tracked down to a buffer alignment issue. Meaning, that csum_partial() computations via skb_postpull_rcsum() and skb_postpush_rcsum() pair invoked had a wrong result since they operated on an odd address for the hoplimit, while other computations were done on an even address. This mix doesn't work as-is with skb_postpull_rcsum(), skb_postpush_rcsum() pair as it always expects at least half-word alignment of input buffers, which is normally the case. Thus, instead of these helpers using csum_sub() and (implicitly) csum_add(), we need to use csum_block_sub(), csum_block_add(), respectively. For unaligned offsets, they rotate the sum to align it to a half-word boundary again, otherwise they work the same as csum_sub() and csum_add(). Adding __skb_postpull_rcsum(), __skb_postpush_rcsum() variants that take the offset as an input and adapting bpf_skb_store_bytes() to them fixes the hw csum failures again. The skb_postpull_rcsum(), skb_postpush_rcsum() helpers use a 0 constant for offset so that the compiler optimizes the offset & 1 test away and generates the same code as with csum_sub()/_add(). Fixes: 608cd71a9c7c ("tc: bpf: generalize pedit action") Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 6f0b3e0..0f665cb 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2847,6 +2847,18 @@ static inline int skb_linearize_cow(struct sk_buff *skb) __skb_linearize(skb) : 0; } +static __always_inline void +__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len, + unsigned int off) +{ + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->csum = csum_block_sub(skb->csum, + csum_partial(start, len, 0), off); + else if (skb->ip_summed == CHECKSUM_PARTIAL && + skb_checksum_start_offset(skb) < 0) + skb->ip_summed = CHECKSUM_NONE; +} + /** * skb_postpull_rcsum - update checksum for received skb after pull * @skb: buffer to update @@ -2857,36 +2869,38 @@ static inline int skb_linearize_cow(struct sk_buff *skb) * update the CHECKSUM_COMPLETE checksum, or set ip_summed to * CHECKSUM_NONE so that it can be recomputed from scratch. */ - static inline void skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len) { - if (skb->ip_summed == CHECKSUM_COMPLETE) - skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); - else if (skb->ip_summed == CHECKSUM_PARTIAL && - skb_checksum_start_offset(skb) < 0) - skb->ip_summed = CHECKSUM_NONE; + __skb_postpull_rcsum(skb, start, len, 0); } -unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); +static __always_inline void +__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len, + unsigned int off) +{ + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->csum = csum_block_add(skb->csum, + csum_partial(start, len, 0), off); +} +/** + * skb_postpush_rcsum - update checksum for received skb after push + * @skb: buffer to update + * @start: start of data after push + * @len: length of data pushed + * + * After doing a push on a received packet, you need to call this to + * update the CHECKSUM_COMPLETE checksum. + */ static inline void skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len) { - /* For performing the reverse operation to skb_postpull_rcsum(), - * we can instead of ... - * - * skb->csum = csum_add(skb->csum, csum_partial(start, len, 0)); - * - * ... just use this equivalent version here to save a few - * instructions. Feeding csum of 0 in csum_partial() and later - * on adding skb->csum is equivalent to feed skb->csum in the - * first place. - */ - if (skb->ip_summed == CHECKSUM_COMPLETE) - skb->csum = csum_partial(start, len, skb->csum); + __skb_postpush_rcsum(skb, start, len, 0); } +unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); + /** * skb_push_rcsum - push skb and update receive checksum * @skb: buffer to update diff --git a/net/core/filter.c b/net/core/filter.c index c46244f..5ecd5c9 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1401,7 +1401,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) return -EFAULT; if (flags & BPF_F_RECOMPUTE_CSUM) - skb_postpull_rcsum(skb, ptr, len); + __skb_postpull_rcsum(skb, ptr, len, offset); memcpy(ptr, from, len); @@ -1410,7 +1410,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) skb_store_bits(skb, offset, ptr, len); if (flags & BPF_F_RECOMPUTE_CSUM) - skb_postpush_rcsum(skb, ptr, len); + __skb_postpush_rcsum(skb, ptr, len, offset); if (flags & BPF_F_INVALIDATE_HASH) skb_clear_hash(skb); -- cgit v0.10.2 From 8065694e6519d234ccee93d952127e3f05b81ba5 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 5 Aug 2016 00:11:13 +0200 Subject: bpf: fix checksum for vlan push/pop helper When having skbs on ingress with CHECKSUM_COMPLETE, tc BPF programs don't push rcsum of mac header back in and after BPF run back pull out again as opposed to some other subsystems (ovs, for example). For cases like q-in-q, meaning when a vlan tag for offloading is already present and we're about to push another one, then skb_vlan_push() pushes the inner one into the skb, increasing mac header and skb_postpush_rcsum()'ing the 4 bytes vlan header diff. Likewise, for the reverse operation in skb_vlan_pop() for the case where vlan header needs to be pulled out of the skb, we're decreasing the mac header and skb_postpull_rcsum()'ing the 4 bytes rcsum of the vlan header that was removed. However mangling the rcsum here will lead to hw csum failure for BPF case, since we're pulling or pushing data that was not part of the current rcsum. Changing tc BPF programs in general to push/pull rcsum around BPF_PROG_RUN() is also not really an option since current behaviour is ABI by now, but apart from that would also mean to do quite a bit of useless work in the sense that usually 12 bytes need to be rcsum pushed/pulled also when we don't need to touch this vlan related corner case. One way to fix it would be to push the necessary rcsum fixup down into vlan helpers that are (mostly) slow-path anyway. Fixes: 4e10df9a60d9 ("bpf: introduce bpf_skb_vlan_push/pop() helpers") Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller diff --git a/net/core/filter.c b/net/core/filter.c index 5ecd5c9..b5add4e 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1371,6 +1371,12 @@ static inline void bpf_push_mac_rcsum(struct sk_buff *skb) skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len); } +static inline void bpf_pull_mac_rcsum(struct sk_buff *skb) +{ + if (skb_at_tc_ingress(skb)) + skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len); +} + static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) { struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp); @@ -1763,7 +1769,10 @@ static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5) vlan_proto != htons(ETH_P_8021AD))) vlan_proto = htons(ETH_P_8021Q); + bpf_push_mac_rcsum(skb); ret = skb_vlan_push(skb, vlan_proto, vlan_tci); + bpf_pull_mac_rcsum(skb); + bpf_compute_data_end(skb); return ret; } @@ -1783,7 +1792,10 @@ static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) struct sk_buff *skb = (struct sk_buff *) (long) r1; int ret; + bpf_push_mac_rcsum(skb); ret = skb_vlan_pop(skb); + bpf_pull_mac_rcsum(skb); + bpf_compute_data_end(skb); return ret; } -- cgit v0.10.2 From 272d96a5ab10662691b4ec90c4a66fdbf30ea7ba Mon Sep 17 00:00:00 2001 From: pravin shelar Date: Fri, 5 Aug 2016 17:45:36 -0700 Subject: net: vxlan: lwt: Use source ip address during route lookup. LWT user can specify destination as well as source ip address for given tunnel endpoint. But vxlan is ignoring given source ip address. Following patch uses both ip address to route the tunnel packet. This consistent with other LWT implementations, like GENEVE and GRE. Fixes: ee122c79d42 ("vxlan: Flow based tunneling"). Signed-off-by: Pravin B Shelar Acked-by: Jiri Benc Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index da4e3d6..b812234 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1811,7 +1811,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, fl4.flowi4_mark = skb->mark; fl4.flowi4_proto = IPPROTO_UDP; fl4.daddr = daddr; - fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr; + fl4.saddr = *saddr; rt = ip_route_output_key(vxlan->net, &fl4); if (!IS_ERR(rt)) { @@ -1847,7 +1847,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = oif; fl6.daddr = *daddr; - fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; + fl6.saddr = *saddr; fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); fl6.flowi6_mark = skb->mark; fl6.flowi6_proto = IPPROTO_UDP; @@ -1920,7 +1920,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct rtable *rt = NULL; const struct iphdr *old_iph; union vxlan_addr *dst; - union vxlan_addr remote_ip; + union vxlan_addr remote_ip, local_ip; + union vxlan_addr *src; struct vxlan_metadata _md; struct vxlan_metadata *md = &_md; __be16 src_port = 0, dst_port; @@ -1938,6 +1939,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; vni = rdst->remote_vni; dst = &rdst->remote_ip; + src = &vxlan->cfg.saddr; dst_cache = &rdst->dst_cache; } else { if (!info) { @@ -1948,11 +1950,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; vni = vxlan_tun_id_to_vni(info->key.tun_id); remote_ip.sa.sa_family = ip_tunnel_info_af(info); - if (remote_ip.sa.sa_family == AF_INET) + if (remote_ip.sa.sa_family == AF_INET) { remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; - else + local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src; + } else { remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; + local_ip.sin6.sin6_addr = info->key.u.ipv6.src; + } dst = &remote_ip; + src = &local_ip; dst_cache = &info->dst_cache; } @@ -1992,15 +1998,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } if (dst->sa.sa_family == AF_INET) { - __be32 saddr; - if (!vxlan->vn4_sock) goto drop; sk = vxlan->vn4_sock->sock->sk; rt = vxlan_get_route(vxlan, skb, rdst ? rdst->remote_ifindex : 0, tos, - dst->sin.sin_addr.s_addr, &saddr, + dst->sin.sin_addr.s_addr, + &src->sin.sin_addr.s_addr, dst_cache, info); if (IS_ERR(rt)) { netdev_dbg(dev, "no route to %pI4\n", @@ -2043,13 +2048,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, if (err < 0) goto xmit_tx_error; - udp_tunnel_xmit_skb(rt, sk, skb, saddr, + udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr, dst->sin.sin_addr.s_addr, tos, ttl, df, src_port, dst_port, xnet, !udp_sum); #if IS_ENABLED(CONFIG_IPV6) } else { struct dst_entry *ndst; - struct in6_addr saddr; u32 rt6i_flags; if (!vxlan->vn6_sock) @@ -2058,7 +2062,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ndst = vxlan6_get_route(vxlan, skb, rdst ? rdst->remote_ifindex : 0, tos, - label, &dst->sin6.sin6_addr, &saddr, + label, &dst->sin6.sin6_addr, + &src->sin6.sin6_addr, dst_cache, info); if (IS_ERR(ndst)) { netdev_dbg(dev, "no route to %pI6\n", @@ -2104,7 +2109,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, return; } udp_tunnel6_xmit_skb(ndst, sk, skb, dev, - &saddr, &dst->sin6.sin6_addr, tos, ttl, + &src->sin6.sin6_addr, + &dst->sin6.sin6_addr, tos, ttl, label, src_port, dst_port, !udp_sum); #endif } -- cgit v0.10.2 From bbec7802c6948c8626b71a4fe31283cb4691c358 Mon Sep 17 00:00:00 2001 From: pravin shelar Date: Fri, 5 Aug 2016 17:45:37 -0700 Subject: net: vxlan: lwt: Fix vxlan local traffic. vxlan driver has bypass for local vxlan traffic, but that depends on information about all VNIs on local system in vxlan driver. This is not available in case of LWT. Therefore following patch disable encap bypass for LWT vxlan traffic. Fixes: ee122c79d42 ("vxlan: Flow based tunneling"). Reported-by: Jakub Libosvar Signed-off-by: Pravin B Shelar Acked-by: Jiri Benc Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index b812234..c0dda6f 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2022,7 +2022,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } /* Bypass encapsulation if the destination is local */ - if (rt->rt_flags & RTCF_LOCAL && + if (!info && rt->rt_flags & RTCF_LOCAL && !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { struct vxlan_dev *dst_vxlan; @@ -2082,7 +2082,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, /* Bypass encapsulation if the destination is local */ rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; - if (rt6i_flags & RTF_LOCAL && + if (!info && rt6i_flags & RTF_LOCAL && !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { struct vxlan_dev *dst_vxlan; -- cgit v0.10.2 From 1fe323aa1b2390a0c57fb0b06a782f128d49094c Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 7 Aug 2016 14:15:13 +0800 Subject: sctp: use event->chunk when it's valid Commit 52253db924d1 ("sctp: also point GSO head_skb to the sk when it's available") used event->chunk->head_skb to get the head_skb in sctp_ulpevent_set_owner(). But at that moment, the event->chunk was NULL, as it cloned the skb in sctp_ulpevent_make_rcvmsg(). Therefore, that patch didn't really work. This patch is to move the event->chunk initialization before calling sctp_ulpevent_receive_data() so that it uses event->chunk when it's valid. Fixes: 52253db924d1 ("sctp: also point GSO head_skb to the sk when it's available") Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Acked-by: Neil Horman Signed-off-by: David S. Miller diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 1bc4f71..d85b803 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -702,14 +702,14 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, */ sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff)); - sctp_ulpevent_receive_data(event, asoc); - /* And hold the chunk as we need it for getting the IP headers * later in recvmsg */ sctp_chunk_hold(chunk); event->chunk = chunk; + sctp_ulpevent_receive_data(event, asoc); + event->stream = ntohs(chunk->subh.data_hdr->stream); event->ssn = ntohs(chunk->subh.data_hdr->ssn); event->ppid = chunk->subh.data_hdr->ppid; -- cgit v0.10.2 From c8952a707556e04374d7b2fdb3a079d63ddf6f2f Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Mon, 8 Aug 2016 16:16:23 -0600 Subject: vfio/pci: Fix NULL pointer oops in error interrupt setup handling There are multiple cases in vfio_pci_set_ctx_trigger_single() where we assume we can safely read from our data pointer without actually checking whether the user has passed any data via the count field. VFIO_IRQ_SET_DATA_NONE in particular is entirely broken since we attempt to pull an int32_t file descriptor out before even checking the data type. The other data types assume the data pointer contains one element of their type as well. In part this is good news because we were previously restricted from doing much sanitization of parameters because it was missed in the past and we didn't want to break existing users. Clearly DATA_NONE is completely broken, so it must not have any users and we can fix it up completely. For DATA_BOOL and DATA_EVENTFD, we'll just protect ourselves, returning error when count is zero since we previously would have oopsed. Signed-off-by: Alex Williamson Reported-by: Chris Thompson Cc: stable@vger.kernel.org Reviewed-by: Eric Auger diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index 15ecfc9..152b438 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -564,67 +564,80 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev, } static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx, - uint32_t flags, void *data) + unsigned int count, uint32_t flags, + void *data) { - int32_t fd = *(int32_t *)data; - - if (!(flags & VFIO_IRQ_SET_DATA_TYPE_MASK)) - return -EINVAL; - /* DATA_NONE/DATA_BOOL enables loopback testing */ if (flags & VFIO_IRQ_SET_DATA_NONE) { - if (*ctx) - eventfd_signal(*ctx, 1); - return 0; + if (*ctx) { + if (count) { + eventfd_signal(*ctx, 1); + } else { + eventfd_ctx_put(*ctx); + *ctx = NULL; + } + return 0; + } } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { - uint8_t trigger = *(uint8_t *)data; + uint8_t trigger; + + if (!count) + return -EINVAL; + + trigger = *(uint8_t *)data; if (trigger && *ctx) eventfd_signal(*ctx, 1); - return 0; - } - /* Handle SET_DATA_EVENTFD */ - if (fd == -1) { - if (*ctx) - eventfd_ctx_put(*ctx); - *ctx = NULL; return 0; - } else if (fd >= 0) { - struct eventfd_ctx *efdctx; - efdctx = eventfd_ctx_fdget(fd); - if (IS_ERR(efdctx)) - return PTR_ERR(efdctx); - if (*ctx) - eventfd_ctx_put(*ctx); - *ctx = efdctx; + } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + int32_t fd; + + if (!count) + return -EINVAL; + + fd = *(int32_t *)data; + if (fd == -1) { + if (*ctx) + eventfd_ctx_put(*ctx); + *ctx = NULL; + } else if (fd >= 0) { + struct eventfd_ctx *efdctx; + + efdctx = eventfd_ctx_fdget(fd); + if (IS_ERR(efdctx)) + return PTR_ERR(efdctx); + + if (*ctx) + eventfd_ctx_put(*ctx); + + *ctx = efdctx; + } return 0; - } else - return -EINVAL; + } + + return -EINVAL; } static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev, unsigned index, unsigned start, unsigned count, uint32_t flags, void *data) { - if (index != VFIO_PCI_ERR_IRQ_INDEX) + if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1) return -EINVAL; - /* - * We should sanitize start & count, but that wasn't caught - * originally, so this IRQ index must forever ignore them :-( - */ - - return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, flags, data); + return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, + count, flags, data); } static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev, unsigned index, unsigned start, unsigned count, uint32_t flags, void *data) { - if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count != 1) + if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1) return -EINVAL; - return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, flags, data); + return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, + count, flags, data); } int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags, -- cgit v0.10.2 From e10aec652f31ec61d6a0b4d00d8ef8d2b66fa0fd Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Wed, 6 Jul 2016 12:05:44 +0200 Subject: drm/edid: Add 6 bpc quirk for display AEO model 0. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bugzilla https://bugzilla.kernel.org/show_bug.cgi?id=105331 reports that the "AEO model 0" display is driven with 8 bpc without dithering by default, which looks bad because that panel is apparently a 6 bpc DP panel with faulty EDID. A fix for this was made by commit 013dd9e03872 ("drm/i915/dp: fall back to 18 bpp when sink capability is unknown"). That commit triggers new regressions in precision for DP->DVI and DP->VGA displays. A patch is out to revert that commit, but it will revert video output for the AEO model 0 panel to 8 bpc without dithering. The EDID 1.3 of that panel, as decoded from the xrandr output attached to that bugzilla bug report, is somewhat faulty, and beyond other problems also sets the "DFP 1.x compliant TMDS" bit, which according to DFP spec means to drive the panel with 8 bpc and no dithering in absence of other colorimetry information. Try to make the original bug reporter happy despite the faulty EDID by adding a quirk to mark that panel as 6 bpc, so 6 bpc output with dithering creates a nice picture. Tested by injecting the edid from the fdo bug into a DP connector via drm_kms_helper.edid_firmware and verifying the 6 bpc + dithering is selected. This patch should be backported to stable. Signed-off-by: Mario Kleiner Cc: stable@vger.kernel.org Cc: Jani Nikula Cc: Ville Syrjälä Cc: Daniel Vetter Signed-off-by: Dave Airlie diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 7df26d4..2cb472b 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -74,6 +74,8 @@ #define EDID_QUIRK_FORCE_8BPC (1 << 8) /* Force 12bpc */ #define EDID_QUIRK_FORCE_12BPC (1 << 9) +/* Force 6bpc */ +#define EDID_QUIRK_FORCE_6BPC (1 << 10) struct detailed_mode_closure { struct drm_connector *connector; @@ -100,6 +102,9 @@ static struct edid_quirk { /* Unknown Acer */ { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, + /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */ + { "AEO", 0, EDID_QUIRK_FORCE_6BPC }, + /* Belinea 10 15 55 */ { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, @@ -4082,6 +4087,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) drm_add_display_info(edid, &connector->display_info, connector); + if (quirks & EDID_QUIRK_FORCE_6BPC) + connector->display_info.bpc = 6; + if (quirks & EDID_QUIRK_FORCE_8BPC) connector->display_info.bpc = 8; -- cgit v0.10.2 From 196f954e250943df414efd3d632254c29be38e59 Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Wed, 6 Jul 2016 12:05:45 +0200 Subject: drm/i915/dp: Revert "drm/i915/dp: fall back to 18 bpp when sink capability is unknown" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 013dd9e03872 ("drm/i915/dp: fall back to 18 bpp when sink capability is unknown") This commit introduced a regression into stable kernels, as it reduces output color depth to 6 bpc for any video sink connected to a Displayport connector if that sink doesn't report a specific color depth via EDID, or if our EDID parser doesn't actually recognize the proper bpc from EDID. Affected are active DisplayPort->VGA converters and active DisplayPort->DVI converters. Both should be able to handle 8 bpc, but are degraded to 6 bpc with this patch. The reverted commit was meant to fix Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=105331 A followup patch implements a fix for that specific bug, which is caused by a faulty EDID of the affected DP panel by adding a new EDID quirk for that panel. DP 18 bpp fallback handling and other improvements to DP sink bpc detection will be handled for future kernels in a separate series of patches. Please backport to stable. Signed-off-by: Mario Kleiner Acked-by: Jani Nikula Cc: stable@vger.kernel.org Cc: Ville Syrjälä Cc: Daniel Vetter Signed-off-by: Dave Airlie diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index baeb753..dcf93b3 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -12106,21 +12106,11 @@ connected_sink_compute_bpp(struct intel_connector *connector, pipe_config->pipe_bpp = connector->base.display_info.bpc*3; } - /* Clamp bpp to default limit on screens without EDID 1.4 */ - if (connector->base.display_info.bpc == 0) { - int type = connector->base.connector_type; - int clamp_bpp = 24; - - /* Fall back to 18 bpp when DP sink capability is unknown. */ - if (type == DRM_MODE_CONNECTOR_DisplayPort || - type == DRM_MODE_CONNECTOR_eDP) - clamp_bpp = 18; - - if (bpp > clamp_bpp) { - DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n", - bpp, clamp_bpp); - pipe_config->pipe_bpp = clamp_bpp; - } + /* Clamp bpp to 8 on screens without EDID 1.4 */ + if (connector->base.display_info.bpc == 0 && bpp > 24) { + DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", + bpp); + pipe_config->pipe_bpp = 24; } } -- cgit v0.10.2 From 210a021dab639694600450c14b877bf3e3240adc Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Wed, 6 Jul 2016 12:05:48 +0200 Subject: drm/edid: Set 8 bpc color depth for displays with "DFP 1.x compliant TMDS". MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit According to E-EDID spec 1.3, table 3.9, a digital video sink with the "DFP 1.x compliant TMDS" bit set is "signal compatible with VESA DFP 1.x TMDS CRGB, 1 pixel / clock, up to 8 bits / color MSB aligned". For such displays, the DFP spec 1.0, section 3.10 "EDID support" says: "If the DFP monitor only supports EDID 1.X (1.1, 1.2, etc.) without extensions, the host will make the following assumptions: 1. 24-bit MSB-aligned RGB TFT 2. DE polarity is active high 3. H and V syncs are active high 4. Established CRT timings will be used 5. Dithering will not be enabled on the host" So if we don't know the bit depth of the display from additional colorimetry info we should assume 8 bpc / 24 bpp by default. This patch adds info->bpc = 8 assignement for that case. Signed-off-by: Mario Kleiner Cc: Jani Nikula Cc: Ville Syrjälä Cc: Daniel Vetter Signed-off-by: Dave Airlie diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 2cb472b..637a0aa 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3867,6 +3867,20 @@ static void drm_add_display_info(struct edid *edid, /* HDMI deep color modes supported? Assign to info, if so */ drm_assign_hdmi_deep_color_info(edid, info, connector); + /* + * Digital sink with "DFP 1.x compliant TMDS" according to EDID 1.3? + * + * For such displays, the DFP spec 1.0, section 3.10 "EDID support" + * tells us to assume 8 bpc color depth if the EDID doesn't have + * extensions which tell otherwise. + */ + if ((info->bpc == 0) && (edid->revision < 4) && + (edid->input & DRM_EDID_DIGITAL_TYPE_DVI)) { + info->bpc = 8; + DRM_DEBUG("%s: Assigning DFP sink color depth as %d bpc.\n", + connector->name, info->bpc); + } + /* Only defined for 1.4 with digital displays */ if (edid->revision < 4) return; -- cgit v0.10.2 From d26e94149276f8c3d4911aa2c8395ba99b613c8d Mon Sep 17 00:00:00 2001 From: Emese Revfy Date: Fri, 17 Jun 2016 23:28:34 -0700 Subject: kbuild: no gcc-plugins during cc-option tests The gcc-plugins arguments should not be included when performing cc-option tests. Steps to reproduce: 1) make mrproper 2) make defconfig 3) enable GCC_PLUGINS, GCC_PLUGIN_CYC_COMPLEXITY 4) enable FUNCTION_TRACER (it will select other options as well) 5) make && make modules Build errors: MODPOST 18 modules ERROR: "__fentry__" [net/netfilter/xt_nat.ko] undefined! ERROR: "__fentry__" [net/netfilter/xt_mark.ko] undefined! ERROR: "__fentry__" [net/netfilter/xt_addrtype.ko] undefined! ERROR: "__fentry__" [net/netfilter/xt_LOG.ko] undefined! ERROR: "__fentry__" [net/netfilter/nf_nat_sip.ko] undefined! ERROR: "__fentry__" [net/netfilter/nf_nat_irc.ko] undefined! ERROR: "__fentry__" [net/netfilter/nf_nat_ftp.ko] undefined! ERROR: "__fentry__" [net/netfilter/nf_nat.ko] undefined! Reported-by: Laura Abbott Signed-off-by: Emese Revfy [kees: renamed variable, clarified commit message] Signed-off-by: Kees Cook diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index 15b196f..1792198 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -108,16 +108,20 @@ as-option = $(call try-run,\ as-instr = $(call try-run,\ printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3)) +# Do not attempt to build with gcc plugins during cc-option tests. +# (And this uses delayed resolution so the flags will be up to date.) +CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) + # cc-option # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586) cc-option = $(call try-run,\ - $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2)) + $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2)) # cc-option-yn # Usage: flag := $(call cc-option-yn,-march=winchip-c6) cc-option-yn = $(call try-run,\ - $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n) + $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n) # cc-option-align # Prefix align with either -falign or -malign @@ -127,7 +131,7 @@ cc-option-align = $(subst -functions=0,,\ # cc-disable-warning # Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable) cc-disable-warning = $(call try-run,\ - $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) + $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) # cc-name # Expands to either gcc or clang -- cgit v0.10.2 From ed58c0e9eefef517aa5a547b78658e2ab4422232 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 17 Jun 2016 23:11:12 -0700 Subject: gcc-plugins: abort builds cleanly when not supported When the compiler doesn't support gcc plugins (either due to missing headers or too old a version), report the problem and abort the build instead of emitting a warning and letting the build founder with arcane compiler errors. Signed-off-by: Kees Cook diff --git a/Makefile b/Makefile index 70de144..8c504f3 100644 --- a/Makefile +++ b/Makefile @@ -635,13 +635,6 @@ endif # Tell gcc to never replace conditional load with a non-conditional one KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) -PHONY += gcc-plugins -gcc-plugins: scripts_basic -ifdef CONFIG_GCC_PLUGINS - $(Q)$(MAKE) $(build)=scripts/gcc-plugins -endif - @: - include scripts/Makefile.gcc-plugins ifdef CONFIG_READABLE_ASM diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins index 5e22b60..bbca1f4 100644 --- a/scripts/Makefile.gcc-plugins +++ b/scripts/Makefile.gcc-plugins @@ -23,21 +23,37 @@ ifdef CONFIG_GCC_PLUGINS export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN SANCOV_PLUGIN + ifneq ($(PLUGINCC),) + # SANCOV_PLUGIN can be only in CFLAGS_KCOV because avoid duplication. + GCC_PLUGINS_CFLAGS := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGINS_CFLAGS)) + endif + + KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) + GCC_PLUGIN := $(gcc-plugin-y) +endif + +# If plugins aren't supported, abort the build before hard-to-read compiler +# errors start getting spewed by the main build. +PHONY += gcc-plugins-check +gcc-plugins-check: FORCE +ifdef CONFIG_GCC_PLUGINS ifeq ($(PLUGINCC),) ifneq ($(GCC_PLUGINS_CFLAGS),) ifeq ($(call cc-ifversion, -ge, 0405, y), y) - PLUGINCC := $(shell $(CONFIG_SHELL) -x $(srctree)/scripts/gcc-plugin.sh "$(__PLUGINCC)" "$(HOSTCXX)" "$(CC)") - $(warning warning: your gcc installation does not support plugins, perhaps the necessary headers are missing?) + $(Q)$(srctree)/scripts/gcc-plugin.sh --show-error "$(__PLUGINCC)" "$(HOSTCXX)" "$(CC)" || true + @echo "Cannot use CONFIG_GCC_PLUGINS: your gcc installation does not support plugins, perhaps the necessary headers are missing?" >&2 && exit 1 else - $(warning warning: your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least) + @echo "Cannot use CONFIG_GCC_PLUGINS: your gcc version does not support plugins, you should upgrade it to at least gcc 4.5" >&2 && exit 1 endif endif - else - # SANCOV_PLUGIN can be only in CFLAGS_KCOV because avoid duplication. - GCC_PLUGINS_CFLAGS := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGINS_CFLAGS)) endif +endif + @: - KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) - GCC_PLUGIN := $(gcc-plugin-y) - +# Actually do the build, if requested. +PHONY += gcc-plugins +gcc-plugins: scripts_basic gcc-plugins-check +ifdef CONFIG_GCC_PLUGINS + $(Q)$(MAKE) $(build)=scripts/gcc-plugins endif + @: diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh index fb92075..b65224b 100755 --- a/scripts/gcc-plugin.sh +++ b/scripts/gcc-plugin.sh @@ -1,5 +1,12 @@ #!/bin/sh srctree=$(dirname "$0") + +SHOW_ERROR= +if [ "$1" = "--show-error" ] ; then + SHOW_ERROR=1 + shift || true +fi + gccplugins_dir=$($3 -print-file-name=plugin) plugincc=$($1 -E -x c++ - -o /dev/null -I"${srctree}"/gcc-plugins -I"${gccplugins_dir}"/include 2>&1 <&2 + fi exit 1 fi @@ -48,4 +58,8 @@ then echo "$2" exit 0 fi + +if [ -n "$SHOW_ERROR" ] ; then + echo "${plugincc}" >&2 +fi exit 1 -- cgit v0.10.2 From 65d59ec8ad99e9d932ccca4daabb8e3fdafeb11b Mon Sep 17 00:00:00 2001 From: Emese Revfy Date: Mon, 20 Jun 2016 20:40:24 +0200 Subject: gcc-plugins: Add support for passing plugin arguments The latent_entropy plugin needs to pass arguments, so this adds the support. Signed-off-by: Emese Revfy Signed-off-by: Kees Cook diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins index bbca1f4..ffb928b 100644 --- a/scripts/Makefile.gcc-plugins +++ b/scripts/Makefile.gcc-plugins @@ -19,7 +19,7 @@ ifdef CONFIG_GCC_PLUGINS endif endif - GCC_PLUGINS_CFLAGS := $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) + GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y)) export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN SANCOV_PLUGIN -- cgit v0.10.2 From 7040c83bfbaf2340d2f336dc7641ce909c8c2b4c Mon Sep 17 00:00:00 2001 From: Emese Revfy Date: Sun, 26 Jun 2016 17:36:43 +0200 Subject: gcc-plugins: Automate make rule generation There's no reason to repeat the same names in the Makefile when the .so files have already been listed. The .o list can be generated from them. Reported-by: PaX Team Signed-off-by: Emese Revfy [kees: clarified commit message] Signed-off-by: Kees Cook diff --git a/scripts/gcc-plugins/Makefile b/scripts/gcc-plugins/Makefile index 88c8ec4..ee0a75d 100644 --- a/scripts/gcc-plugins/Makefile +++ b/scripts/gcc-plugins/Makefile @@ -21,7 +21,6 @@ endif $(HOSTLIBS)-y := $(GCC_PLUGIN) always := $($(HOSTLIBS)-y) -cyc_complexity_plugin-objs := cyc_complexity_plugin.o -sancov_plugin-objs := sancov_plugin.o +$(foreach p,$($(HOSTLIBS)-y:%.so=%),$(eval $(p)-objs := $(p).o)) clean-files += *.so -- cgit v0.10.2 From caefd8c9a9fb06811e07bf3571e5d4450846b16a Mon Sep 17 00:00:00 2001 From: Emese Revfy Date: Sun, 26 Jun 2016 17:38:20 +0200 Subject: gcc-plugins: Add support for plugin subdirectories This adds support for building more complex gcc plugins that live in a subdirectory instead of just in a single source file. Reported-by: PaX Team Signed-off-by: Emese Revfy [kees: clarified commit message] Signed-off-by: Kees Cook diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins index ffb928b..61f0e6d 100644 --- a/scripts/Makefile.gcc-plugins +++ b/scripts/Makefile.gcc-plugins @@ -21,7 +21,7 @@ ifdef CONFIG_GCC_PLUGINS GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y)) - export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN SANCOV_PLUGIN + export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN GCC_PLUGIN_SUBDIR SANCOV_PLUGIN ifneq ($(PLUGINCC),) # SANCOV_PLUGIN can be only in CFLAGS_KCOV because avoid duplication. @@ -30,6 +30,7 @@ ifdef CONFIG_GCC_PLUGINS KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) GCC_PLUGIN := $(gcc-plugin-y) + GCC_PLUGIN_SUBDIR := $(gcc-plugin-subdir-y) endif # If plugins aren't supported, abort the build before hard-to-read compiler diff --git a/scripts/gcc-plugins/Makefile b/scripts/gcc-plugins/Makefile index ee0a75d..8b29dc1 100644 --- a/scripts/gcc-plugins/Makefile +++ b/scripts/gcc-plugins/Makefile @@ -12,15 +12,18 @@ else export HOST_EXTRACXXFLAGS endif -export GCCPLUGINS_DIR HOSTLIBS - ifneq ($(CFLAGS_KCOV), $(SANCOV_PLUGIN)) GCC_PLUGIN := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGIN)) endif -$(HOSTLIBS)-y := $(GCC_PLUGIN) +export HOSTLIBS + +$(HOSTLIBS)-y := $(foreach p,$(GCC_PLUGIN),$(if $(findstring /,$(p)),,$(p))) always := $($(HOSTLIBS)-y) $(foreach p,$($(HOSTLIBS)-y:%.so=%),$(eval $(p)-objs := $(p).o)) +subdir-y := $(GCC_PLUGIN_SUBDIR) +subdir- += $(GCC_PLUGIN_SUBDIR) + clean-files += *.so -- cgit v0.10.2 From fa00c437eef8dc2e7b25f8cd868cfa405fcc2bb3 Mon Sep 17 00:00:00 2001 From: Dave Carroll Date: Fri, 5 Aug 2016 13:44:10 -0600 Subject: aacraid: Check size values after double-fetch from user In aacraid's ioctl_send_fib() we do two fetches from userspace, one the get the fib header's size and one for the fib itself. Later we use the size field from the second fetch to further process the fib. If for some reason the size from the second fetch is different than from the first fix, we may encounter an out-of- bounds access in aac_fib_send(). We also check the sender size to insure it is not out of bounds. This was reported in https://bugzilla.kernel.org/show_bug.cgi?id=116751 and was assigned CVE-2016-6480. Reported-by: Pengfei Wang Fixes: 7c00ffa31 '[SCSI] 2.6 aacraid: Variable FIB size (updated patch)' Cc: stable@vger.kernel.org Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index b381b37..5648b71 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) struct fib *fibptr; struct hw_fib * hw_fib = (struct hw_fib *)0; dma_addr_t hw_fib_pa = (dma_addr_t)0LL; - unsigned size; + unsigned int size, osize; int retval; if (dev->in_reset) { @@ -87,7 +87,8 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) * will not overrun the buffer when we copy the memory. Return * an error if we would. */ - size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr); + osize = size = le16_to_cpu(kfib->header.Size) + + sizeof(struct aac_fibhdr); if (size < le16_to_cpu(kfib->header.SenderSize)) size = le16_to_cpu(kfib->header.SenderSize); if (size > dev->max_fib_size) { @@ -118,6 +119,14 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) goto cleanup; } + /* Sanity check the second copy */ + if ((osize != le16_to_cpu(kfib->header.Size) + + sizeof(struct aac_fibhdr)) + || (size < le16_to_cpu(kfib->header.SenderSize))) { + retval = -EINVAL; + goto cleanup; + } + if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) { aac_adapter_interrupt(dev); /* -- cgit v0.10.2 From 36e9d08b58f44c3a02974c405ccaaa6ecfaf05b8 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Tue, 9 Aug 2016 02:16:18 +0200 Subject: drm/cirrus: Fix NULL pointer dereference when registering the fbdev cirrus_modeset_init() is initializing/registering the emulated fbdev and, since commit c61b93fe51b1 ("drm/atomic: Fix remaining places where !funcs->best_encoder is valid"), DRM internals can access/test some of the fields in mode_config->funcs as part of the fbdev registration process. Make sure dev->mode_config.funcs is properly set to avoid dereferencing a NULL pointer. Reported-by: Mike Marshall Reported-by: Eric W. Biederman Signed-off-by: Boris Brezillon Fixes: c61b93fe51b1 ("drm/atomic: Fix remaining places where !funcs->best_encoder is valid") Signed-off-by: Dave Airlie diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c index 80446e2..76bcb43 100644 --- a/drivers/gpu/drm/cirrus/cirrus_main.c +++ b/drivers/gpu/drm/cirrus/cirrus_main.c @@ -185,14 +185,23 @@ int cirrus_driver_load(struct drm_device *dev, unsigned long flags) goto out; } + /* + * cirrus_modeset_init() is initializing/registering the emulated fbdev + * and DRM internals can access/test some of the fields in + * mode_config->funcs as part of the fbdev registration process. + * Make sure dev->mode_config.funcs is properly set to avoid + * dereferencing a NULL pointer. + * FIXME: mode_config.funcs assignment should probably be done in + * cirrus_modeset_init() (that's a common pattern seen in other DRM + * drivers). + */ + dev->mode_config.funcs = &cirrus_mode_funcs; r = cirrus_modeset_init(cdev); if (r) { dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r); goto out; } - dev->mode_config.funcs = (void *)&cirrus_mode_funcs; - return 0; out: cirrus_driver_unload(dev); -- cgit v0.10.2 From c39b487f195b93235ee76384427467786f7bf29f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 9 Aug 2016 00:20:28 -0400 Subject: Revert "drm/amdgpu: work around lack of upstream ACPI support for D3cold" This reverts commit c63695cc5e5f685e924e25a8f9555f6e846f1fc6. Now that d3cold support is upstream, there is no more need for this workaround. bug: https://bugs.freedesktop.org/show_bug.cgi?id=97248 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 49de926..10b5ddf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -200,16 +200,7 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) atpx->is_hybrid = false; if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { printk("ATPX Hybrid Graphics\n"); -#if 1 - /* This is a temporary hack until the D3 cold support - * makes it upstream. The ATPX power_control method seems - * to still work on even if the system should be using - * the new standardized hybrid D3 cold ACPI interface. - */ - atpx->functions.power_cntl = true; -#else atpx->functions.power_cntl = false; -#endif atpx->is_hybrid = true; } -- cgit v0.10.2 From b817634276f7f68c9d1d6d4a27117ff3c2f16956 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 9 Aug 2016 00:21:45 -0400 Subject: Revert "drm/radeon: work around lack of upstream ACPI support for D3cold" This reverts commit bdfb76040068d960cb9e226876be8a508d741c4a. Now that d3cold is upstream, there is no more need for this workaround. diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 6de3428..ddef0d4 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -198,16 +198,7 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx) atpx->is_hybrid = false; if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { printk("ATPX Hybrid Graphics\n"); -#if 1 - /* This is a temporary hack until the D3 cold support - * makes it upstream. The ATPX power_control method seems - * to still work on even if the system should be using - * the new standardized hybrid D3 cold ACPI interface. - */ - atpx->functions.power_cntl = true; -#else atpx->functions.power_cntl = false; -#endif atpx->is_hybrid = true; } -- cgit v0.10.2 From d2cf5be07ff7c7cde8bef8551a30e8e86e2037a7 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 4 Aug 2016 16:38:15 +1000 Subject: crypto: crc32c-vpmsum - Convert to CPU feature based module autoloading This patch utilises the GENERIC_CPU_AUTOPROBE infrastructure to automatically load the crc32c-vpmsum module if the CPU supports it. Signed-off-by: Anton Blanchard Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c index bfe3d37..9fa046d 100644 --- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c +++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #define CHKSUM_BLOCK_SIZE 1 @@ -157,7 +158,7 @@ static void __exit crc32c_vpmsum_mod_fini(void) crypto_unregister_shash(&alg); } -module_init(crc32c_vpmsum_mod_init); +module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, crc32c_vpmsum_mod_init); module_exit(crc32c_vpmsum_mod_fini); MODULE_AUTHOR("Anton Blanchard "); -- cgit v0.10.2 From 880a3d6afd068682d6386a0528be1217541d3d8e Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 2 Aug 2016 12:39:43 +1000 Subject: powerpc/xics: Properly set Edge/Level type and enable resend This sets the type of the interrupt appropriately. We set it as follow: - If not mapped from the device-tree, we use edge. This is the case of the virtual interrupts and PCI MSIs for example. - If mapped from the device-tree and #interrupt-cells is 2 (PAPR compliant), we use the second cell to set the appropriate type - If mapped from the device-tree and #interrupt-cells is 1 (current OPAL on P8 does that), we assume level sensitive since those are typically going to be the PSI LSIs which are level sensitive. Additionally, we mark the interrupts requested via the opal_interrupts property all level. This is a bit fishy but the best we can do until we fix OPAL to properly expose them with a complete descriptor. It is also correct for the current HW anyway as OPAL interrupts are currently PCI error and PSI interrupts which are level. Finally now that edge interrupts are properly identified, we can enable CONFIG_HARDIRQS_SW_RESEND which will make the core re-send them if they occur while masked, which some drivers rely upon. This fixes issues with lost interrupts on some Mellanox adapters. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h index f5f729c..f0b2385 100644 --- a/arch/powerpc/include/asm/xics.h +++ b/arch/powerpc/include/asm/xics.h @@ -159,6 +159,8 @@ extern void xics_teardown_cpu(void); extern void xics_kexec_teardown_cpu(int secondary); extern void xics_migrate_irqs_away(void); extern void icp_native_eoi(struct irq_data *d); +extern int xics_set_irq_type(struct irq_data *d, unsigned int flow_type); +extern int xics_retrigger(struct irq_data *data); #ifdef CONFIG_SMP extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, unsigned int strict_check); diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c index e505223b..ed8bba6 100644 --- a/arch/powerpc/platforms/powernv/opal-irqchip.c +++ b/arch/powerpc/platforms/powernv/opal-irqchip.c @@ -228,7 +228,8 @@ int __init opal_event_init(void) } /* Install interrupt handler */ - rc = request_irq(virq, opal_interrupt, 0, "opal", NULL); + rc = request_irq(virq, opal_interrupt, IRQF_TRIGGER_LOW, + "opal", NULL); if (rc) { irq_dispose_mapping(virq); pr_warn("Error %d requesting irq %d (0x%x)\n", diff --git a/arch/powerpc/sysdev/xics/Kconfig b/arch/powerpc/sysdev/xics/Kconfig index 0031eda..385e7aa 100644 --- a/arch/powerpc/sysdev/xics/Kconfig +++ b/arch/powerpc/sysdev/xics/Kconfig @@ -1,6 +1,7 @@ config PPC_XICS def_bool n select PPC_SMP_MUXED_IPI + select HARDIRQS_SW_RESEND config PPC_ICP_NATIVE def_bool n diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c index 27c936c..1c6bf4b 100644 --- a/arch/powerpc/sysdev/xics/ics-opal.c +++ b/arch/powerpc/sysdev/xics/ics-opal.c @@ -156,7 +156,9 @@ static struct irq_chip ics_opal_irq_chip = { .irq_mask = ics_opal_mask_irq, .irq_unmask = ics_opal_unmask_irq, .irq_eoi = NULL, /* Patched at init time */ - .irq_set_affinity = ics_opal_set_affinity + .irq_set_affinity = ics_opal_set_affinity, + .irq_set_type = xics_set_irq_type, + .irq_retrigger = xics_retrigger, }; static int ics_opal_map(struct ics *ics, unsigned int virq); diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c index 3854dd4..78ee5c7 100644 --- a/arch/powerpc/sysdev/xics/ics-rtas.c +++ b/arch/powerpc/sysdev/xics/ics-rtas.c @@ -163,7 +163,9 @@ static struct irq_chip ics_rtas_irq_chip = { .irq_mask = ics_rtas_mask_irq, .irq_unmask = ics_rtas_unmask_irq, .irq_eoi = NULL, /* Patched at init time */ - .irq_set_affinity = ics_rtas_set_affinity + .irq_set_affinity = ics_rtas_set_affinity, + .irq_set_type = xics_set_irq_type, + .irq_retrigger = xics_retrigger, }; static int ics_rtas_map(struct ics *ics, unsigned int virq) diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c index a795a5f..9d530f4 100644 --- a/arch/powerpc/sysdev/xics/xics-common.c +++ b/arch/powerpc/sysdev/xics/xics-common.c @@ -328,8 +328,12 @@ static int xics_host_map(struct irq_domain *h, unsigned int virq, pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); - /* They aren't all level sensitive but we just don't really know */ - irq_set_status_flags(virq, IRQ_LEVEL); + /* + * Mark interrupts as edge sensitive by default so that resend + * actually works. The device-tree parsing will turn the LSIs + * back to level. + */ + irq_clear_status_flags(virq, IRQ_LEVEL); /* Don't call into ICS for IPIs */ if (hw == XICS_IPI) { @@ -351,13 +355,54 @@ static int xics_host_xlate(struct irq_domain *h, struct device_node *ct, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { - /* Current xics implementation translates everything - * to level. It is not technically right for MSIs but this - * is irrelevant at this point. We might get smarter in the future - */ *out_hwirq = intspec[0]; - *out_flags = IRQ_TYPE_LEVEL_LOW; + /* + * If intsize is at least 2, we look for the type in the second cell, + * we assume the LSB indicates a level interrupt. + */ + if (intsize > 1) { + if (intspec[1] & 1) + *out_flags = IRQ_TYPE_LEVEL_LOW; + else + *out_flags = IRQ_TYPE_EDGE_RISING; + } else + *out_flags = IRQ_TYPE_LEVEL_LOW; + + return 0; +} + +int xics_set_irq_type(struct irq_data *d, unsigned int flow_type) +{ + /* + * We only support these. This has really no effect other than setting + * the corresponding descriptor bits mind you but those will in turn + * affect the resend function when re-enabling an edge interrupt. + * + * Set set the default to edge as explained in map(). + */ + if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE) + flow_type = IRQ_TYPE_EDGE_RISING; + + if (flow_type != IRQ_TYPE_EDGE_RISING && + flow_type != IRQ_TYPE_LEVEL_LOW) + return -EINVAL; + + irqd_set_trigger_type(d, flow_type); + + return IRQ_SET_MASK_OK_NOCOPY; +} + +int xics_retrigger(struct irq_data *data) +{ + /* + * We need to push a dummy CPPR when retriggering, since the subsequent + * EOI will try to pop it. Passing 0 works, as the function hard codes + * the priority value anyway. + */ + xics_push_cppr(0); + + /* Tell the core to do a soft retrigger */ return 0; } -- cgit v0.10.2 From 54a94fcf2f00c9deb9491d5ab2b5a4022332f47e Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 4 Aug 2016 08:37:03 +0300 Subject: powerpc/cell: Add missing error code in spufs_mkgang() We should return -ENOMEM if alloc_spu_gang() fails. Signed-off-by: Dan Carpenter Acked-by: Arnd Bergmann Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 5be15cf..2975754 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -496,8 +496,10 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode) gang = alloc_spu_gang(); SPUFS_I(inode)->i_ctx = NULL; SPUFS_I(inode)->i_gang = gang; - if (!gang) + if (!gang) { + ret = -ENOMEM; goto out_iput; + } inode->i_op = &simple_dir_inode_operations; inode->i_fop = &simple_dir_operations; -- cgit v0.10.2 From 4d9021957b5218310e28767f25551ca8f5eac797 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Wed, 3 Aug 2016 18:40:45 +1000 Subject: powerpc/powernv/ioda: Fix TCE invalidate to work in real mode again Commit fd141d1a99a3 ("powerpc/powernv/pci: Rework accessing the TCE invalidate register") broke TCE invalidation on IODA2/PHB3 for real mode. This makes invalidate work again. Fixes: fd141d1a99a3 ("powerpc/powernv/pci: Rework accessing the TCE invalidate register") Signed-off-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 6b95283..dedbbe9 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1877,7 +1877,7 @@ static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm, unsigned shift, unsigned long index, unsigned long npages) { - __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false); + __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm); unsigned long start, end, inc; /* We'll invalidate DMA address in PE scope */ -- cgit v0.10.2 From e325d76f8bd2d222a1f577aba00dfb43cece4cbc Mon Sep 17 00:00:00 2001 From: Mahesh Salgaonkar Date: Fri, 5 Aug 2016 19:13:12 +0530 Subject: powerpc/powernv: Load correct TOC pointer while waking up from winkle. The function pnv_restore_hyp_resource() loads the TOC into r2 from the invalid PACA pointer before fixing r13 value. This do not affect POWER ISA 3.0 but it does have an impact on POWER ISA 2.07 or less leading CPU to get stuck forever. login: [ 471.830433] Processor 120 is stuck. This can be easily reproducible using following steps: - Turn off SMT $ ppc64_cpu --smt=off - offline/online any online cpu (Thread 0 of any core which is online) $ echo 0 > /sys/devices/system/cpu/cpu/online $ echo 1 > /sys/devices/system/cpu/cpu/online For POWER ISA 2.07 or less, the last bit of HSPRG0 is set indicating that thread is waking up from winkle. Hence, the last bit of HSPRG0(r13) needs to be clear before accessing it as PACA to avoid loading invalid values from invalid PACA pointer. Fix this by loading TOC after r13 register is corrected. Fixes: bcef83a00dc4 ("powerpc/powernv: Add platform support for stop instruction") Signed-off-by: Mahesh Salgaonkar Acked-by: Vaidyanathan Srinivasan Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index ba79d15..07a330e 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -363,8 +363,8 @@ _GLOBAL(power9_idle_stop) * cr3 - set to gt if waking up with partial/complete hypervisor state loss */ _GLOBAL(pnv_restore_hyp_resource) - ld r2,PACATOC(r13); BEGIN_FTR_SECTION + ld r2,PACATOC(r13); /* * POWER ISA 3. Use PSSCR to determine if we * are waking up from deep idle state @@ -395,6 +395,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) */ clrldi r5,r13,63 clrrdi r13,r13,1 + + /* Now that we are sure r13 is corrected, load TOC */ + ld r2,PACATOC(r13); cmpwi cr4,r5,1 mtspr SPRN_HSPRG0,r13 -- cgit v0.10.2 From 98d8821a47f3fd7354d3ab87adb50b10c9adb937 Mon Sep 17 00:00:00 2001 From: Mahesh Salgaonkar Date: Fri, 5 Aug 2016 17:34:04 +0530 Subject: powerpc/powernv: Move IDLE_STATE_ENTER_SEQ macro to cpuidle.h Move IDLE_STATE_ENTER_SEQ macro to cpuidle.h so that MCE handler changes in subsequent patch can use it. No functionality change. Signed-off-by: Mahesh Salgaonkar Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h index 3d7fc06..01b8a13 100644 --- a/arch/powerpc/include/asm/cpuidle.h +++ b/arch/powerpc/include/asm/cpuidle.h @@ -19,4 +19,17 @@ extern u64 pnv_first_deep_stop_state; #endif +/* Idle state entry routines */ +#ifdef CONFIG_PPC_P7_NAP +#define IDLE_STATE_ENTER_SEQ(IDLE_INST) \ + /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \ + std r0,0(r1); \ + ptesync; \ + ld r0,0(r1); \ +1: cmp cr0,r0,r0; \ + bne 1b; \ + IDLE_INST; \ + b . +#endif /* CONFIG_PPC_P7_NAP */ + #endif diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 07a330e..2265c63 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -44,18 +44,6 @@ PSSCR_PSLL_MASK | PSSCR_TR_MASK | \ PSSCR_MTL_MASK -/* Idle state entry routines */ - -#define IDLE_STATE_ENTER_SEQ(IDLE_INST) \ - /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \ - std r0,0(r1); \ - ptesync; \ - ld r0,0(r1); \ -1: cmp cr0,r0,r0; \ - bne 1b; \ - IDLE_INST; \ - b . - .text /* -- cgit v0.10.2 From c0c45a6bd7d054efd80c1033bf4285830c72835b Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Mon, 8 Aug 2016 21:57:40 -0400 Subject: qed: Remove the endian-ness conversion for pri_to_tc value. Endian-ness conversion is not needed for priority-to-TC field as the field is already being read/written by the driver in big-endian way. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index d0dc28f..6869330 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -483,7 +483,7 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn, bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]); tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]); tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]); - pri_map = be32_to_cpu(p_ets->pri_tc_tbl[0]); + pri_map = p_ets->pri_tc_tbl[0]; for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i]; p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i]; @@ -944,7 +944,6 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn, val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4)); p_ets->pri_tc_tbl[0] |= val; } - p_ets->pri_tc_tbl[0] = cpu_to_be32(p_ets->pri_tc_tbl[0]); for (i = 0; i < 2; i++) { p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]); p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]); -- cgit v0.10.2 From fb9ea8a9b70c79f38d2758c25d3acff4a2cd5bfb Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Mon, 8 Aug 2016 21:57:41 -0400 Subject: qed: Use ieee mfw-mask to get ethtype in ieee-dcbx mode. Ethtype value is being read incorrectly in ieee-dcbx mode. Use the correct mfw mask value. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 6869330..f07f0ac 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -52,16 +52,33 @@ static bool qed_dcbx_app_ethtype(u32 app_info_bitmap) DCBX_APP_SF_ETHTYPE); } +static bool qed_dcbx_ieee_app_ethtype(u32 app_info_bitmap) +{ + u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE); + + /* Old MFW */ + if (mfw_val == DCBX_APP_SF_IEEE_RESERVED) + return qed_dcbx_app_ethtype(app_info_bitmap); + + return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE); +} + static bool qed_dcbx_app_port(u32 app_info_bitmap) { return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == DCBX_APP_SF_PORT); } -static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id) +static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { - return !!(qed_dcbx_app_ethtype(app_info_bitmap) && - proto_id == QED_ETH_TYPE_DEFAULT); + bool ethtype; + + if (ieee) + ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); + else + ethtype = qed_dcbx_app_ethtype(app_info_bitmap); + + return !!(ethtype && (proto_id == QED_ETH_TYPE_DEFAULT)); } static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id) @@ -70,16 +87,28 @@ static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id) proto_id == QED_TCP_PORT_ISCSI); } -static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id) +static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { - return !!(qed_dcbx_app_ethtype(app_info_bitmap) && - proto_id == QED_ETH_TYPE_FCOE); + bool ethtype; + + if (ieee) + ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); + else + ethtype = qed_dcbx_app_ethtype(app_info_bitmap); + + return !!(ethtype && (proto_id == QED_ETH_TYPE_FCOE)); } -static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id) +static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { - return !!(qed_dcbx_app_ethtype(app_info_bitmap) && - proto_id == QED_ETH_TYPE_ROCE); + bool ethtype; + + if (ieee) + ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); + else + ethtype = qed_dcbx_app_ethtype(app_info_bitmap); + + return !!(ethtype && (proto_id == QED_ETH_TYPE_ROCE)); } static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id) @@ -164,15 +193,15 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, static bool qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, u32 app_prio_bitmap, - u16 id, enum dcbx_protocol_type *type) + u16 id, enum dcbx_protocol_type *type, bool ieee) { - if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id)) { + if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_FCOE; - } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id)) { + } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_ROCE; } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) { *type = DCBX_PROTOCOL_ISCSI; - } else if (qed_dcbx_default_tlv(app_prio_bitmap, id)) { + } else if (qed_dcbx_default_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_ETH; } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) { *type = DCBX_PROTOCOL_ROCE_V2; @@ -194,17 +223,18 @@ static int qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data, struct dcbx_app_priority_entry *p_tbl, - u32 pri_tc_tbl, int count, bool dcbx_enabled) + u32 pri_tc_tbl, int count, u8 dcbx_version) { u8 tc, priority_map; enum dcbx_protocol_type type; + bool enable, ieee; u16 protocol_id; int priority; - bool enable; int i; DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); + ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE); /* Parse APP TLV */ for (i = 0; i < count; i++) { protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, @@ -219,7 +249,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority); if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, - protocol_id, &type)) { + protocol_id, &type, ieee)) { /* ETH always have the enable bit reset, as it gets * vlan information per packet. For other protocols, * should be set according to the dcbx_enabled @@ -275,15 +305,12 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) struct dcbx_ets_feature *p_ets; struct qed_hw_info *p_info; u32 pri_tc_tbl, flags; - bool dcbx_enabled; + u8 dcbx_version; int num_entries; int rc = 0; - /* If DCBx version is non zero, then negotiation was - * successfuly performed - */ flags = p_hwfn->p_dcbx_info->operational.flags; - dcbx_enabled = !!QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); + dcbx_version = QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); p_app = &p_hwfn->p_dcbx_info->operational.features.app; p_tbl = p_app->app_pri_tbl; @@ -295,13 +322,13 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, - num_entries, dcbx_enabled); + num_entries, dcbx_version); if (rc) return rc; p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); data.pf_id = p_hwfn->rel_pf_id; - data.dcbx_enabled = dcbx_enabled; + data.dcbx_enabled = !!dcbx_version; qed_dcbx_dp_protocol(p_hwfn, &data); @@ -400,7 +427,7 @@ static void qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, struct dcbx_app_priority_feature *p_app, struct dcbx_app_priority_entry *p_tbl, - struct qed_dcbx_params *p_params) + struct qed_dcbx_params *p_params, bool ieee) { struct qed_app_entry *entry; u8 pri_map; @@ -422,7 +449,7 @@ qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, DCBX_APP_PROTOCOL_ID); qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, entry->proto_id, - &entry->proto_type); + &entry->proto_type, ieee); } DP_VERBOSE(p_hwfn, QED_MSG_DCB, @@ -500,9 +527,9 @@ qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn, struct dcbx_app_priority_feature *p_app, struct dcbx_app_priority_entry *p_tbl, struct dcbx_ets_feature *p_ets, - u32 pfc, struct qed_dcbx_params *p_params) + u32 pfc, struct qed_dcbx_params *p_params, bool ieee) { - qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params); + qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee); qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params); qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params); } @@ -516,7 +543,7 @@ qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn, p_feat = &p_hwfn->p_dcbx_info->local_admin.features; qed_dcbx_get_common_params(p_hwfn, &p_feat->app, p_feat->app.app_pri_tbl, &p_feat->ets, - p_feat->pfc, ¶ms->local.params); + p_feat->pfc, ¶ms->local.params, false); params->local.valid = true; } @@ -529,7 +556,7 @@ qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn, p_feat = &p_hwfn->p_dcbx_info->remote.features; qed_dcbx_get_common_params(p_hwfn, &p_feat->app, p_feat->app.app_pri_tbl, &p_feat->ets, - p_feat->pfc, ¶ms->remote.params); + p_feat->pfc, ¶ms->remote.params, false); params->remote.valid = true; } @@ -574,7 +601,8 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn, qed_dcbx_get_common_params(p_hwfn, &p_feat->app, p_feat->app.app_pri_tbl, &p_feat->ets, - p_feat->pfc, ¶ms->operational.params); + p_feat->pfc, ¶ms->operational.params, + p_operational->ieee); qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results); err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR); p_operational->err = err; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 5927840..6f9d3b8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -6850,6 +6850,14 @@ struct dcbx_app_priority_entry { #define DCBX_APP_SF_SHIFT 8 #define DCBX_APP_SF_ETHTYPE 0 #define DCBX_APP_SF_PORT 1 +#define DCBX_APP_SF_IEEE_MASK 0x0000f000 +#define DCBX_APP_SF_IEEE_SHIFT 12 +#define DCBX_APP_SF_IEEE_RESERVED 0 +#define DCBX_APP_SF_IEEE_ETHTYPE 1 +#define DCBX_APP_SF_IEEE_TCP_PORT 2 +#define DCBX_APP_SF_IEEE_UDP_PORT 3 +#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4 + #define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 #define DCBX_APP_PROTOCOL_ID_SHIFT 16 }; -- cgit v0.10.2 From 59bcb7972fc5d53a621ee6b2c3cf1654cebb3dc5 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Mon, 8 Aug 2016 21:57:42 -0400 Subject: qed: Add dcbx app support for IEEE Selection Field. MFW now supports the Selection field for IEEE mode. Add driver changes to use the newer MFW masks to read/write the port-id value. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index f07f0ac..b157a6a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -69,6 +69,17 @@ static bool qed_dcbx_app_port(u32 app_info_bitmap) DCBX_APP_SF_PORT); } +static bool qed_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type) +{ + u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE); + + /* Old MFW */ + if (mfw_val == DCBX_APP_SF_IEEE_RESERVED) + return qed_dcbx_app_port(app_info_bitmap); + + return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT); +} + static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { bool ethtype; @@ -81,10 +92,17 @@ static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) return !!(ethtype && (proto_id == QED_ETH_TYPE_DEFAULT)); } -static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id) +static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { - return !!(qed_dcbx_app_port(app_info_bitmap) && - proto_id == QED_TCP_PORT_ISCSI); + bool port; + + if (ieee) + port = qed_dcbx_ieee_app_port(app_info_bitmap, + DCBX_APP_SF_IEEE_TCP_PORT); + else + port = qed_dcbx_app_port(app_info_bitmap); + + return !!(port && (proto_id == QED_TCP_PORT_ISCSI)); } static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) @@ -111,10 +129,17 @@ static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) return !!(ethtype && (proto_id == QED_ETH_TYPE_ROCE)); } -static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id) +static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { - return !!(qed_dcbx_app_port(app_info_bitmap) && - proto_id == QED_UDP_PORT_TYPE_ROCE_V2); + bool port; + + if (ieee) + port = qed_dcbx_ieee_app_port(app_info_bitmap, + DCBX_APP_SF_IEEE_UDP_PORT); + else + port = qed_dcbx_app_port(app_info_bitmap); + + return !!(port && (proto_id == QED_UDP_PORT_TYPE_ROCE_V2)); } static void @@ -199,11 +224,11 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, *type = DCBX_PROTOCOL_FCOE; } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_ROCE; - } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) { + } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_ISCSI; } else if (qed_dcbx_default_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_ETH; - } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) { + } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_ROCE_V2; } else { *type = DCBX_MAX_PROTOCOL_TYPE; @@ -441,8 +466,39 @@ qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, DCBX_APP_NUM_ENTRIES); for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { entry = &p_params->app_entry[i]; - entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry, - DCBX_APP_SF)); + if (ieee) { + u8 sf_ieee; + u32 val; + + sf_ieee = QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_SF_IEEE); + switch (sf_ieee) { + case DCBX_APP_SF_IEEE_RESERVED: + /* Old MFW */ + val = QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_SF); + entry->sf_ieee = val ? + QED_DCBX_SF_IEEE_TCP_UDP_PORT : + QED_DCBX_SF_IEEE_ETHTYPE; + break; + case DCBX_APP_SF_IEEE_ETHTYPE: + entry->sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE; + break; + case DCBX_APP_SF_IEEE_TCP_PORT: + entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT; + break; + case DCBX_APP_SF_IEEE_UDP_PORT: + entry->sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT; + break; + case DCBX_APP_SF_IEEE_TCP_UDP_PORT: + entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT; + break; + } + } else { + entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_SF)); + } + pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP); entry->prio = ffs(pri_map) - 1; entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry, @@ -981,7 +1037,7 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn, static void qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, struct dcbx_app_priority_feature *p_app, - struct qed_dcbx_params *p_params) + struct qed_dcbx_params *p_params, bool ieee) { u32 *entry; int i; @@ -1002,12 +1058,36 @@ qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { entry = &p_app->app_pri_tbl[i].entry; - *entry &= ~DCBX_APP_SF_MASK; - if (p_params->app_entry[i].ethtype) - *entry |= ((u32)DCBX_APP_SF_ETHTYPE << - DCBX_APP_SF_SHIFT); - else - *entry |= ((u32)DCBX_APP_SF_PORT << DCBX_APP_SF_SHIFT); + if (ieee) { + *entry &= ~DCBX_APP_SF_IEEE_MASK; + switch (p_params->app_entry[i].sf_ieee) { + case QED_DCBX_SF_IEEE_ETHTYPE: + *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE << + DCBX_APP_SF_IEEE_SHIFT); + break; + case QED_DCBX_SF_IEEE_TCP_PORT: + *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT << + DCBX_APP_SF_IEEE_SHIFT); + break; + case QED_DCBX_SF_IEEE_UDP_PORT: + *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT << + DCBX_APP_SF_IEEE_SHIFT); + break; + case QED_DCBX_SF_IEEE_TCP_UDP_PORT: + *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT << + DCBX_APP_SF_IEEE_SHIFT); + break; + } + } else { + *entry &= ~DCBX_APP_SF_MASK; + if (p_params->app_entry[i].ethtype) + *entry |= ((u32)DCBX_APP_SF_ETHTYPE << + DCBX_APP_SF_SHIFT); + else + *entry |= ((u32)DCBX_APP_SF_PORT << + DCBX_APP_SF_SHIFT); + } + *entry &= ~DCBX_APP_PROTOCOL_ID_MASK; *entry |= ((u32)p_params->app_entry[i].proto_id << DCBX_APP_PROTOCOL_ID_SHIFT); @@ -1022,15 +1102,19 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn, struct dcbx_local_params *local_admin, struct qed_dcbx_set *params) { + bool ieee = false; + local_admin->flags = 0; memcpy(&local_admin->features, &p_hwfn->p_dcbx_info->operational.features, sizeof(local_admin->features)); - if (params->enabled) + if (params->enabled) { local_admin->config = params->ver_num; - else + ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE); + } else { local_admin->config = DCBX_CONFIG_VERSION_DISABLED; + } if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG) qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc, @@ -1042,7 +1126,7 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn, if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG) qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app, - ¶ms->config.params); + ¶ms->config.params, ieee); } int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index b1e3c57..d6c4177 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -70,8 +70,16 @@ struct qed_dbcx_pfc_params { u8 max_tc; }; +enum qed_dcbx_sf_ieee_type { + QED_DCBX_SF_IEEE_ETHTYPE, + QED_DCBX_SF_IEEE_TCP_PORT, + QED_DCBX_SF_IEEE_UDP_PORT, + QED_DCBX_SF_IEEE_TCP_UDP_PORT +}; + struct qed_app_entry { bool ethtype; + enum qed_dcbx_sf_ieee_type sf_ieee; bool enabled; u8 prio; u16 proto_id; -- cgit v0.10.2 From 1d7406ce7bdfc48cd7390f793d23ef81fff75880 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Mon, 8 Aug 2016 21:57:43 -0400 Subject: qed: Update app count when adding a new dcbx app entry to the table. App count is not updated while adding new app entry to the dcbx app table. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index b157a6a..226cb08 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -1707,8 +1707,10 @@ static int qed_dcbnl_setapp(struct qed_dev *cdev, if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) break; /* First empty slot */ - if (!entry->proto_id) + if (!entry->proto_id) { + dcbx_set.config.params.num_app_entries++; break; + } } if (i == QED_DCBX_MAX_APP_PROTOCOL) { @@ -2228,8 +2230,10 @@ int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app) (entry->proto_id == app->protocol)) break; /* First empty slot */ - if (!entry->proto_id) + if (!entry->proto_id) { + dcbx_set.config.params.num_app_entries++; break; + } } if (i == QED_DCBX_MAX_APP_PROTOCOL) { -- cgit v0.10.2 From bc14c49195e49b3231c01e4c44e3e5456c940b94 Mon Sep 17 00:00:00 2001 From: Mahesh Salgaonkar Date: Fri, 5 Aug 2016 17:34:13 +0530 Subject: powerpc/powernv: Fix MCE handler to avoid trashing CR0/CR1 registers. The current implementation of MCE early handling modifies CR0/1 registers without saving its old values. Fix this by moving early check for powersaving mode to machine_check_handle_early(). The power architecture 2.06 or later allows the possibility of getting machine check while in nap/sleep/winkle. The last bit of HSPRG0 is set to 1, if thread is woken up from winkle. Hence, clear the last bit of HSPRG0 (r13) before MCE handler starts using it as paca pointer. Also, the current code always puts the thread into nap state irrespective of whatever idle state it woke up from. Fix that by looking at paca->thread_idle_state and put the thread back into same state where it came from. Fixes: 1c51089f777b ("powerpc/book3s: Return from interrupt if coming from evil context.") Reported-by: Paul Mackerras Signed-off-by: Mahesh Salgaonkar Reviewed-by: Shreyas B. Prabhu Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 41091fd..df6d45e 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -144,29 +144,14 @@ machine_check_pSeries_1: * vector */ SET_SCRATCH0(r13) /* save r13 */ -#ifdef CONFIG_PPC_P7_NAP -BEGIN_FTR_SECTION - /* Running native on arch 2.06 or later, check if we are - * waking up from nap. We only handle no state loss and - * supervisor state loss. We do -not- handle hypervisor - * state loss at this time. + /* + * Running native on arch 2.06 or later, we may wakeup from winkle + * inside machine check. If yes, then last bit of HSPGR0 would be set + * to 1. Hence clear it unconditionally. */ - mfspr r13,SPRN_SRR1 - rlwinm. r13,r13,47-31,30,31 - OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) - beq 9f - - mfspr r13,SPRN_SRR1 - rlwinm. r13,r13,47-31,30,31 - /* waking up from powersave (nap) state */ - cmpwi cr1,r13,2 - /* Total loss of HV state is fatal. let's just stay stuck here */ - OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) - bgt cr1,. -9: - OPT_SET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) -END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) -#endif /* CONFIG_PPC_P7_NAP */ + GET_PACA(r13) + clrrdi r13,r13,1 + SET_PACA(r13) EXCEPTION_PROLOG_0(PACA_EXMC) BEGIN_FTR_SECTION b machine_check_powernv_early @@ -1273,25 +1258,51 @@ machine_check_handle_early: * Check if thread was in power saving mode. We come here when any * of the following is true: * a. thread wasn't in power saving mode - * b. thread was in power saving mode with no state loss or - * supervisor state loss + * b. thread was in power saving mode with no state loss, + * supervisor state loss or hypervisor state loss. * - * Go back to nap again if (b) is true. + * Go back to nap/sleep/winkle mode again if (b) is true. */ rlwinm. r11,r12,47-31,30,31 /* Was it in power saving mode? */ beq 4f /* No, it wasn;t */ /* Thread was in power saving mode. Go back to nap again. */ cmpwi r11,2 - bne 3f - /* Supervisor state loss */ + blt 3f + /* Supervisor/Hypervisor state loss */ li r0,1 stb r0,PACA_NAPSTATELOST(r13) 3: bl machine_check_queue_event MACHINE_CHECK_HANDLER_WINDUP GET_PACA(r13) ld r1,PACAR1(r13) - li r3,PNV_THREAD_NAP - b pnv_enter_arch207_idle_mode + /* + * Check what idle state this CPU was in and go back to same mode + * again. + */ + lbz r3,PACA_THREAD_IDLE_STATE(r13) + cmpwi r3,PNV_THREAD_NAP + bgt 10f + IDLE_STATE_ENTER_SEQ(PPC_NAP) + /* No return */ +10: + cmpwi r3,PNV_THREAD_SLEEP + bgt 2f + IDLE_STATE_ENTER_SEQ(PPC_SLEEP) + /* No return */ + +2: + /* + * Go back to winkle. Please note that this thread was woken up in + * machine check from winkle and have not restored the per-subcore + * state. Hence before going back to winkle, set last bit of HSPGR0 + * to 1. This will make sure that if this thread gets woken up + * again at reset vector 0x100 then it will get chance to restore + * the subcore state. + */ + ori r13,r13,1 + SET_PACA(r13) + IDLE_STATE_ENTER_SEQ(PPC_WINKLE) + /* No return */ 4: #endif /* -- cgit v0.10.2 From 9dc512819e4b723856773f5a018865c20441072f Mon Sep 17 00:00:00 2001 From: Alastair D'Silva Date: Mon, 1 Aug 2016 16:32:51 +1000 Subject: powerpc: Fix unused function warning 'lmb_to_memblock' This patch fixes the following warning: arch/powerpc/platforms/pseries/hotplug-memory.c:323:29: error: 'lmb_to_memblock' defined but not used [-Werror=unused-function] static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb) ^~~~~~~~~~~~~~~ The only consumer of this function is 'dlpar_remove_lmb', which is enabled with CONFIG_MEMORY_HOTREMOVE, so move it into the same ifdef block. Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 43f7beb..76ec104 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -320,19 +320,6 @@ static int dlpar_remove_device_tree_lmb(struct of_drconf_cell *lmb) return dlpar_update_device_tree_lmb(lmb); } -static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb) -{ - unsigned long section_nr; - struct mem_section *mem_sect; - struct memory_block *mem_block; - - section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr)); - mem_sect = __nr_to_section(section_nr); - - mem_block = find_memory_block(mem_sect); - return mem_block; -} - #ifdef CONFIG_MEMORY_HOTREMOVE static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) { @@ -420,6 +407,19 @@ static bool lmb_is_removable(struct of_drconf_cell *lmb) static int dlpar_add_lmb(struct of_drconf_cell *); +static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb) +{ + unsigned long section_nr; + struct mem_section *mem_sect; + struct memory_block *mem_block; + + section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr)); + mem_sect = __nr_to_section(section_nr); + + mem_block = find_memory_block(mem_sect); + return mem_block; +} + static int dlpar_remove_lmb(struct of_drconf_cell *lmb) { struct memory_block *mem_block; -- cgit v0.10.2 From 546c4402c57497a6ffdf5373aff75bc89f0866bc Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Sat, 6 Aug 2016 15:54:12 -0700 Subject: powerpc/vdso: Add missing include file Some powerpc builds fail with the following buld error. In file included from ./arch/powerpc/include/asm/mmu_context.h:11:0, from arch/powerpc/kernel/vdso.c:28: arch/powerpc/include/asm/cputhreads.h: In function 'get_tensr': arch/powerpc/include/asm/cputhreads.h:101:2: error: implicit declaration of function 'cpu_has_feature' Fixes: b92a226e5284 ("powerpc: Move cpu_has_feature() to a separate file") Signed-off-by: Guenter Roeck Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 6767605..4111d30 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -22,6 +22,7 @@ #include #include +#include #include #include #include -- cgit v0.10.2 From cbd74e1bc8129efb9908f130a8a6e60fd95d2106 Mon Sep 17 00:00:00 2001 From: Philippe Bergheaud Date: Fri, 5 Aug 2016 14:02:00 +0200 Subject: cxl: Use fixed width predefined types in data structure. This patch fixes a regression introduced by commit b810253bd934 ("cxl: Add mechanism for delivering AFU driver specific events"). It changes the type u8 to __u8 in the uapi header cxl.h, because the former is a kernel internal type, and may not be defined in userland build environments, in particular when cross-compiling libcxl on x86_64 linux machines (RHEL6.7 and Ubuntu 16.04). This patch also changes the size of the field data_size, and makes it constant, to support 32-bit userland applications running on big-endian ppc64 kernels transparently. mpe: This is an ABI change, however the ABI was only added during the 4.8 merge window so has never been part of a released kernel - therefore we give ourselves permission to change it. Fixes: b810253bd934 ("cxl: Add mechanism for delivering AFU driver specific events") Signed-off-by: Philippe Bergheaud Reviewed-by: Frederic Barrat Signed-off-by: Michael Ellerman diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h index cbae529..180d526 100644 --- a/include/uapi/misc/cxl.h +++ b/include/uapi/misc/cxl.h @@ -136,8 +136,8 @@ struct cxl_event_afu_driver_reserved { * * Of course the contents will be ABI, but that's up the AFU driver. */ - size_t data_size; - u8 data[]; + __u32 data_size; + __u8 data[]; }; struct cxl_event { -- cgit v0.10.2 From 164793379ad3b7ef5fc5a28260c111358892dff3 Mon Sep 17 00:00:00 2001 From: Andrew Donnellan Date: Thu, 28 Jul 2016 15:39:41 +1000 Subject: cxl: Fix NULL dereference in cxl_context_init() on PowerVM guests Commit f67a6722d650 ("cxl: Workaround PE=0 hardware limitation in Mellanox CX4") added a "min_pe" field to struct cxl_service_layer_ops, to allow us to work around a Mellanox CX-4 hardware limitation. When allocating the PE number in cxl_context_init(), we read from ctx->afu->adapter->native->sl_ops->min_pe to get the minimum PE number. Unsurprisingly, in a PowerVM guest ctx->afu->adapter->native is NULL, and guests don't have a cxl_service_layer_ops struct anywhere. Move min_pe from struct cxl_service_layer_ops to struct cxl so it's accessible in both native and PowerVM environments. For the Mellanox CX-4, set the min_pe value in set_sl_ops(). Fixes: f67a6722d650 ("cxl: Workaround PE=0 hardware limitation in Mellanox CX4") Reported-by: Frederic Barrat Signed-off-by: Andrew Donnellan Acked-by: Ian Munsie Reviewed-by: Frederic Barrat Signed-off-by: Michael Ellerman diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index bdee9a0..c466ee2 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c @@ -90,8 +90,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, */ mutex_lock(&afu->contexts_lock); idr_preload(GFP_KERNEL); - i = idr_alloc(&ctx->afu->contexts_idr, ctx, - ctx->afu->adapter->native->sl_ops->min_pe, + i = idr_alloc(&ctx->afu->contexts_idr, ctx, ctx->afu->adapter->min_pe, ctx->afu->num_procs, GFP_NOWAIT); idr_preload_end(); mutex_unlock(&afu->contexts_lock); diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index de09053..344a0ff 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -561,7 +561,6 @@ struct cxl_service_layer_ops { u64 (*timebase_read)(struct cxl *adapter); int capi_mode; bool needs_reset_before_disable; - int min_pe; }; struct cxl_native { @@ -603,6 +602,7 @@ struct cxl { struct bin_attribute cxl_attr; int adapter_num; int user_irqs; + int min_pe; u64 ps_size; u16 psl_rev; u16 base_image; diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index d152e2d..1d0347c3 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1521,14 +1521,15 @@ static const struct cxl_service_layer_ops xsl_ops = { .write_timebase_ctrl = write_timebase_ctrl_xsl, .timebase_read = timebase_read_xsl, .capi_mode = OPAL_PHB_CAPI_MODE_DMA, - .min_pe = 1, /* Workaround for Mellanox CX4 HW bug */ }; static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev) { if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) { + /* Mellanox CX-4 */ dev_info(&adapter->dev, "Device uses an XSL\n"); adapter->native->sl_ops = &xsl_ops; + adapter->min_pe = 1; /* Workaround for CX-4 hardware bug */ } else { dev_info(&adapter->dev, "Device uses a PSL\n"); adapter->native->sl_ops = &psl_ops; -- cgit v0.10.2 From 6fd40f192a9dba391b2d84882f1ed3169c52b714 Mon Sep 17 00:00:00 2001 From: Andrew Donnellan Date: Fri, 22 Jul 2016 19:01:36 +1000 Subject: cxl: Fix sparse warnings Make native_irq_wait() static and use NULL rather than 0 to initialise phb->cfg_data in cxl_pci_vphb_add() to remove sparse warnings. Signed-off-by: Andrew Donnellan Reviewed-by: Matthew R. Ochs Acked-by: Ian Munsie Signed-off-by: Michael Ellerman diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index 3bcdaee..e606fdc 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -924,7 +924,7 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data) return fail_psl_irq(afu, &irq_info); } -void native_irq_wait(struct cxl_context *ctx) +static void native_irq_wait(struct cxl_context *ctx) { u64 dsisr; int timeout = 1000; diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c index dee8def..7ada5f1 100644 --- a/drivers/misc/cxl/vphb.c +++ b/drivers/misc/cxl/vphb.c @@ -221,7 +221,7 @@ int cxl_pci_vphb_add(struct cxl_afu *afu) /* Setup the PHB using arch provided callback */ phb->ops = &cxl_pcie_pci_ops; phb->cfg_addr = NULL; - phb->cfg_data = 0; + phb->cfg_data = NULL; phb->private_data = afu; phb->controller_ops = cxl_pci_controller_ops; -- cgit v0.10.2 From 10560b9afc8abf349843dff88c45dd43223e803e Mon Sep 17 00:00:00 2001 From: "Guilherme G. Piccoli" Date: Fri, 22 Jul 2016 14:05:29 -0300 Subject: powerpc/eeh: Switch to conventional PCI address output in EEH log This is a very minor/trivial fix for the output of PCI address on EEH logs. The PCI address on "OF node" field currently is using ":" as a separator for the function, but the usual separator is ".". This patch changes the separator to dot, so the PCI address is printed as usual. Signed-off-by: Guilherme G. Piccoli Reviewed-by: Gavin Shan Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index c9bc78e..7429556 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -168,10 +168,10 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) int n = 0, l = 0; char buffer[128]; - n += scnprintf(buf+n, len-n, "%04x:%02x:%02x:%01x\n", + n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n", edev->phb->global_number, pdn->busno, PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); - pr_warn("EEH: of node=%04x:%02x:%02x:%01x\n", + pr_warn("EEH: of node=%04x:%02x:%02x.%01x\n", edev->phb->global_number, pdn->busno, PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); -- cgit v0.10.2 From 61e8a0d5a0270b91581f6c715036844b2ea98da1 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 5 Aug 2016 16:40:56 +1000 Subject: powerpc/pci: Fix endian bug in fixed PHB numbering The recent commit 63a72284b159 ("powerpc/pci: Assign fixed PHB number based on device-tree properties"), added code to read a 64-bit property from the device tree, and if not found read a 32-bit property (reg). There was a bug in the 32-bit case, on big endian machines, due to the use of the 64-bit value to read the 32-bit property. The cast of &prop means we end up writing to the high 32-bit of prop, leaving the low 32-bits containing whatever junk was on the stack. If that junk value was non-zero, and < MAX_PHBS, we would end up using it as the PHB id. This results in users seeing what appear to be random PHB ids. Fix it by reading into a u32 property and then assigning that to the u64 value, letting the CPU do the correct conversions for us. Fixes: 63a72284b159 ("powerpc/pci: Assign fixed PHB number based on device-tree properties") Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index a5c0153..7fdf324 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -78,6 +78,7 @@ EXPORT_SYMBOL(get_pci_dma_ops); static int get_phb_number(struct device_node *dn) { int ret, phb_id = -1; + u32 prop_32; u64 prop; /* @@ -86,8 +87,10 @@ static int get_phb_number(struct device_node *dn) * reading "ibm,opal-phbid", only present in OPAL environment. */ ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop); - if (ret) - ret = of_property_read_u32_index(dn, "reg", 1, (u32 *)&prop); + if (ret) { + ret = of_property_read_u32_index(dn, "reg", 1, &prop_32); + prop = prop_32; + } if (!ret) phb_id = (int)(prop & (MAX_PHBS - 1)); -- cgit v0.10.2 From 6dae61627d1004895bfcee81d24482ec64cbecc5 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 13 Jul 2016 08:36:03 +0100 Subject: drm/amdkfd: print doorbell offset as a hex value The doorbell offset is formatted with a 0x prefix to suggest it is a hexadecimal value, when in fact %d is being used and this is confusing. Use %X instead to match the proceeding 0x prefix. Signed-off-by: Colin Ian King Signed-off-by: Oded Gabbay diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index e621eba..a7d3cb3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c @@ -184,7 +184,7 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, sizeof(u32)) + inx; pr_debug("kfd: get kernel queue doorbell\n" - " doorbell offset == 0x%08d\n" + " doorbell offset == 0x%08X\n" " kernel address == 0x%08lX\n", *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx)); -- cgit v0.10.2 From f3b0946d629c8bfbd3e5f038e30cb9c711a35f10 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 13 Jul 2016 17:18:33 +0100 Subject: genirq/msi: Make sure PCI MSIs are activated early Bharat Kumar Gogada reported issues with the generic MSI code, where the end-point ended up with garbage in its MSI configuration (both for the vector and the message). It turns out that the two MSI paths in the kernel are doing slightly different things: generic MSI: disable MSI -> allocate MSI -> enable MSI -> setup EP PCI MSI: disable MSI -> allocate MSI -> setup EP -> enable MSI And it turns out that end-points are allowed to latch the content of the MSI configuration registers as soon as MSIs are enabled. In Bharat's case, the end-point ends up using whatever was there already, which is not what you want. In order to make things converge, we introduce a new MSI domain flag (MSI_FLAG_ACTIVATE_EARLY) that is unconditionally set for PCI/MSI. When set, this flag forces the programming of the end-point as soon as the MSIs are allocated. A consequence of this is that we have an extra activate in irq_startup, but that should be without much consequence. tglx: - Several people reported a VMWare regression with PCI/MSI-X passthrough. It turns out that the patch also cures that issue. - We need to have a look at the MSI disable interrupt path, where we write the msg to all zeros without disabling MSI in the PCI device. Is that correct? Fixes: 52f518a3a7c2 "x86/MSI: Use hierarchical irqdomains to manage MSI interrupts" Reported-and-tested-by: Bharat Kumar Gogada Reported-and-tested-by: Foster Snowhill Reported-by: Matthias Prager Reported-by: Jason Taylor Signed-off-by: Marc Zyngier Acked-by: Bjorn Helgaas Cc: linux-pci@vger.kernel.org Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/1468426713-31431-1-git-send-email-marc.zyngier@arm.com Signed-off-by: Thomas Gleixner diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index a02981e..eafa613 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1411,6 +1411,8 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) pci_msi_domain_update_chip_ops(info); + info->flags |= MSI_FLAG_ACTIVATE_EARLY; + domain = msi_create_irq_domain(fwnode, info, parent); if (!domain) return NULL; diff --git a/include/linux/msi.h b/include/linux/msi.h index 4f0bfe5..e8c81fb 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -270,6 +270,8 @@ enum { MSI_FLAG_MULTI_PCI_MSI = (1 << 2), /* Support PCI MSIX interrupts */ MSI_FLAG_PCI_MSIX = (1 << 3), + /* Needs early activate, required for PCI */ + MSI_FLAG_ACTIVATE_EARLY = (1 << 4), }; int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index 5499935..19e9dfb 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -359,6 +359,17 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, else dev_dbg(dev, "irq [%d-%d] for MSI\n", virq, virq + desc->nvec_used - 1); + /* + * This flag is set by the PCI layer as we need to activate + * the MSI entries before the PCI layer enables MSI in the + * card. Otherwise the card latches a random msi message. + */ + if (info->flags & MSI_FLAG_ACTIVATE_EARLY) { + struct irq_data *irq_data; + + irq_data = irq_domain_get_irq_data(domain, desc->irq); + irq_domain_activate_irq(irq_data); + } } return 0; -- cgit v0.10.2 From 46c8f0b077a838eb1f6169bb370aab8ed98f7630 Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Mon, 8 Aug 2016 16:29:07 -0400 Subject: timers: Fix get_next_timer_interrupt() computation The tick_nohz_stop_sched_tick() routine is not properly canceling the sched timer when nothing is pending, because get_next_timer_interrupt() is no longer returning KTIME_MAX in that case. This causes periodic interrupts when none are needed. When determining the next interrupt time, we first use __next_timer_interrupt() to get the first expiring timer in the timer wheel. If no timer is found, we return the base clock value plus NEXT_TIMER_MAX_DELTA to indicate there is no timer in the timer wheel. Back in get_next_timer_interrupt(), we set the "expires" value by converting the timer wheel expiry (in ticks) to a nsec value. But we don't want to do this if the timer wheel expiry value indicates no timer; we want to return KTIME_MAX. Prior to commit 500462a9de65 ("timers: Switch to a non-cascading wheel") we checked base->active_timers to see if any timers were active, and if not, we didn't touch the expiry value and so properly returned KTIME_MAX. Now we don't have active_timers. To fix this, we now just check the timer wheel expiry value to see if it is "now + NEXT_TIMER_MAX_DELTA", and if it is, we don't try to compute a new value based on it, but instead simply let the KTIME_MAX value in expires remain. Fixes: 500462a9de65 "timers: Switch to a non-cascading wheel" Signed-off-by: Chris Metcalf Cc: Frederic Weisbecker Cc: Christoph Lameter Cc: John Stultz Link: http://lkml.kernel.org/r/1470688147-22287-1-git-send-email-cmetcalf@mellanox.com Signed-off-by: Thomas Gleixner diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 555670a..32bf6f7 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1496,6 +1496,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); u64 expires = KTIME_MAX; unsigned long nextevt; + bool is_max_delta; /* * Pretend that there is no timer pending if the cpu is offline. @@ -1506,6 +1507,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) spin_lock(&base->lock); nextevt = __next_timer_interrupt(base); + is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); base->next_expiry = nextevt; /* * We have a fresh next event. Check whether we can forward the base: @@ -1519,7 +1521,8 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) expires = basem; base->is_idle = false; } else { - expires = basem + (nextevt - basej) * TICK_NSEC; + if (!is_max_delta) + expires = basem + (nextevt - basej) * TICK_NSEC; /* * If we expect to sleep more than a tick, mark the base idle: */ -- cgit v0.10.2 From b173a28f62cf929324a8a6adcc45adadce311d16 Mon Sep 17 00:00:00 2001 From: Liping Zhang Date: Mon, 8 Aug 2016 21:57:58 +0800 Subject: netfilter: nf_ct_expect: remove the redundant slash when policy name is empty The 'name' filed in struct nf_conntrack_expect_policy{} is not a pointer, so check it is NULL or not will always return true. Even if the name is empty, slash will always be displayed like follows: # cat /proc/net/nf_conntrack_expect 297 l3proto = 2 proto=6 src=1.1.1.1 dst=2.2.2.2 sport=1 dport=1025 ftp/ ^ Fixes: 3a8fc53a45c4 ("netfilter: nf_ct_helper: allocate 16 bytes for the helper and policy names") Signed-off-by: Liping Zhang Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 9e36931..f8dbacf 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -574,7 +574,7 @@ static int exp_seq_show(struct seq_file *s, void *v) helper = rcu_dereference(nfct_help(expect->master)->helper); if (helper) { seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name); - if (helper->expect_policy[expect->class].name) + if (helper->expect_policy[expect->class].name[0]) seq_printf(s, "/%s", helper->expect_policy[expect->class].name); } -- cgit v0.10.2 From b18bcb0019cf57a3db0917b77e65c29bd9b00f54 Mon Sep 17 00:00:00 2001 From: Liping Zhang Date: Mon, 8 Aug 2016 22:03:40 +0800 Subject: netfilter: nfnetlink_queue: fix memory leak when attach expectation successfully User can use NFQA_EXP to attach expectations to conntracks, but we forget to put back nf_conntrack_expect when it is inserted successfully, i.e. in this normal case, expect's use refcnt will be 3. So even we unlink it and put it back later, the use refcnt is still 1, then the memory will be leaked forever. Signed-off-by: Liping Zhang Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 050bb34..b9bfe64 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -2362,12 +2362,8 @@ ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct, return PTR_ERR(exp); err = nf_ct_expect_related_report(exp, portid, report); - if (err < 0) { - nf_ct_expect_put(exp); - return err; - } - - return 0; + nf_ct_expect_put(exp); + return err; } static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct, -- cgit v0.10.2 From 00a3101f561816e58de054a470484996f78eb5eb Mon Sep 17 00:00:00 2001 From: Liping Zhang Date: Mon, 8 Aug 2016 22:07:27 +0800 Subject: netfilter: nfnetlink_queue: reject verdict request from different portid Like NFQNL_MSG_VERDICT_BATCH do, we should also reject the verdict request when the portid is not same with the initial portid(maybe from another process). Fixes: 97d32cf9440d ("netfilter: nfnetlink_queue: batch verdict support") Signed-off-by: Liping Zhang Reviewed-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 5d36a09..f49f450 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -1145,10 +1145,8 @@ static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl, struct nfnl_queue_net *q = nfnl_queue_pernet(net); int err; - queue = instance_lookup(q, queue_num); - if (!queue) - queue = verdict_instance_lookup(q, queue_num, - NETLINK_CB(skb).portid); + queue = verdict_instance_lookup(q, queue_num, + NETLINK_CB(skb).portid); if (IS_ERR(queue)) return PTR_ERR(queue); -- cgit v0.10.2 From aa0c2c68abd1e2915656cc81afdb195bc8595dec Mon Sep 17 00:00:00 2001 From: Liping Zhang Date: Mon, 8 Aug 2016 22:10:26 +0800 Subject: netfilter: ctnetlink: reject new conntrack request with different l4proto Currently, user can add a conntrack with different l4proto via nfnetlink. For example, original tuple is TCP while reply tuple is SCTP. This is invalid combination, we should report EINVAL to userspace. Signed-off-by: Liping Zhang Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index b9bfe64..fdfc71f 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1894,6 +1894,8 @@ static int ctnetlink_new_conntrack(struct net *net, struct sock *ctnl, if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY]) return -EINVAL; + if (otuple.dst.protonum != rtuple.dst.protonum) + return -EINVAL; ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple, &rtuple, u3); -- cgit v0.10.2 From c74dd88e77d3ecbc9e55c78796d82c9aa21cabad Mon Sep 17 00:00:00 2001 From: Mahesh Salgaonkar Date: Tue, 9 Aug 2016 10:39:13 +0530 Subject: powerpc/book3s: Fix MCE console messages for unrecoverable MCE. When machine check occurs with MSR(RI=0), it means MC interrupt is unrecoverable and kernel goes down to panic path. But the console message still shows it as recovered. This patch fixes the MCE console messages. Fixes: 36df96f8acaf ("powerpc/book3s: Decode and save machine check event.") Signed-off-by: Mahesh Salgaonkar Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index ef267fd..5e7ece0 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -92,7 +92,8 @@ void save_mce_event(struct pt_regs *regs, long handled, mce->in_use = 1; mce->initiator = MCE_INITIATOR_CPU; - if (handled) + /* Mark it recovered if we have handled it and MSR(RI=1). */ + if (handled && (regs->msr & MSR_RI)) mce->disposition = MCE_DISPOSITION_RECOVERED; else mce->disposition = MCE_DISPOSITION_NOT_RECOVERED; diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 8b4fc68..6c9a65b 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -399,6 +399,7 @@ static int opal_recover_mce(struct pt_regs *regs, if (!(regs->msr & MSR_RI)) { /* If MSR_RI isn't set, we cannot recover */ + pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n"); recovered = 0; } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) { /* Platform corrected itself */ -- cgit v0.10.2 From 55cae7a403f3b52de3dd6e4a614582541c9631af Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 8 Aug 2016 12:13:45 +0200 Subject: rxrpc: fix uninitialized pointer dereference in debug code A newly added bugfix caused an uninitialized variable to be used for printing debug output. This is harmless as long as the debug setting is disabled, but otherwise leads to an immediate crash. gcc warns about this when -Wmaybe-uninitialized is enabled: net/rxrpc/call_object.c: In function 'rxrpc_release_call': net/rxrpc/call_object.c:496:163: error: 'sp' may be used uninitialized in this function [-Werror=maybe-uninitialized] The initialization was removed but one of the users remains. This adds back the initialization. Signed-off-by: Arnd Bergmann Fixes: 372ee16386bb ("rxrpc: Fix races between skb free, ACK generation and replying") Signed-off-by: David Howells diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index c47f14f..e8c953c 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -493,6 +493,7 @@ void rxrpc_release_call(struct rxrpc_call *call) (skb = skb_dequeue(&call->rx_oos_queue))) { spin_unlock_bh(&call->lock); + sp = rxrpc_skb(skb); _debug("- zap %s %%%u #%u", rxrpc_pkts[sp->hdr.type], sp->hdr.serial, sp->hdr.seq); -- cgit v0.10.2 From 5958d19a143eb229e9ece20bd4c781ad41cb7d24 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 8 Jul 2016 15:55:43 +1000 Subject: powerpc/pnv/pci: Fix incorrect PE reservation attempt on some 64-bit BARs The generic allocation code may sometimes decide to assign a prefetchable 64-bit BAR to the M32 window. In fact it may also decide to allocate a 64-bit non-prefetchable BAR to the M64 one ! So using the resource flags as a test to decide which window was used for PE allocation is just wrong and leads to insane PE numbers. Instead, compare the addresses to figure it out. Signed-off-by: Benjamin Herrenschmidt [mpe: Rename the function as agreed by Ben & Gavin] Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index dedbbe9..fd9444f 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -111,10 +111,17 @@ static int __init iommu_setup(char *str) } early_param("iommu", iommu_setup); -static inline bool pnv_pci_is_mem_pref_64(unsigned long flags) +static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r) { - return ((flags & (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)) == - (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)); + /* + * WARNING: We cannot rely on the resource flags. The Linux PCI + * allocation code sometimes decides to put a 64-bit prefetchable + * BAR in the 32-bit window, so we have to compare the addresses. + * + * For simplicity we only test resource start. + */ + return (r->start >= phb->ioda.m64_base && + r->start < (phb->ioda.m64_base + phb->ioda.m64_size)); } static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no) @@ -229,7 +236,7 @@ static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev, sgsz = phb->ioda.m64_segsize; for (i = 0; i <= PCI_ROM_RESOURCE; i++) { r = &pdev->resource[i]; - if (!r->parent || !pnv_pci_is_mem_pref_64(r->flags)) + if (!r->parent || !pnv_pci_is_m64(phb, r)) continue; start = _ALIGN_DOWN(r->start - base, sgsz); @@ -2863,7 +2870,7 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev) res = &pdev->resource[i + PCI_IOV_RESOURCES]; if (!res->flags || res->parent) continue; - if (!pnv_pci_is_mem_pref_64(res->flags)) { + if (!pnv_pci_is_m64(phb, res)) { dev_warn(&pdev->dev, "Don't support SR-IOV with" " non M64 VF BAR%d: %pR. \n", i, res); @@ -2958,7 +2965,7 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe, index++; } } else if ((res->flags & IORESOURCE_MEM) && - !pnv_pci_is_mem_pref_64(res->flags)) { + !pnv_pci_is_m64(phb, res)) { region.start = res->start - phb->hose->mem_offset[0] - phb->ioda.m32_pci_base; @@ -3083,9 +3090,12 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus, bridge = bridge->bus->self; } - /* We fail back to M32 if M64 isn't supported */ - if (phb->ioda.m64_segsize && - pnv_pci_is_mem_pref_64(type)) + /* + * We fall back to M32 if M64 isn't supported. We enforce the M64 + * alignment for any 64-bit resource, PCIe doesn't care and + * bridges only do 64-bit prefetchable anyway. + */ + if (phb->ioda.m64_segsize && (type & IORESOURCE_MEM_64)) return phb->ioda.m64_segsize; if (type & IORESOURCE_MEM) return phb->ioda.m32_segsize; @@ -3125,7 +3135,7 @@ static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus, w = NULL; if (r->flags & type & IORESOURCE_IO) w = &hose->io_resource; - else if (pnv_pci_is_mem_pref_64(r->flags) && + else if (pnv_pci_is_m64(phb, r) && (type & IORESOURCE_PREFETCH) && phb->ioda.m64_segsize) w = &hose->mem_resources[1]; -- cgit v0.10.2 From 50ee91bdef41c15b671dcd9446ee007a1d2f5ab7 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 9 Aug 2016 10:30:49 +0800 Subject: arm64: Support hard limit of cpu count by nr_cpus Enable the hard limit of cpu count by set boot options nr_cpus=x on arm64, and make a minor change about message when total number of cpu exceeds the limit. Reviewed-by: Suzuki K Poulose Reported-by: Shiyuan Hu Signed-off-by: Kefeng Wang Signed-off-by: Will Deacon diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 76a6d92..d93d433 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -661,9 +661,9 @@ void __init smp_init_cpus(void) acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, acpi_parse_gic_cpu_interface, 0); - if (cpu_count > NR_CPUS) - pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n", - cpu_count, NR_CPUS); + if (cpu_count > nr_cpu_ids) + pr_warn("Number of cores (%d) exceeds configured maximum of %d - clipping\n", + cpu_count, nr_cpu_ids); if (!bootcpu_valid) { pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); @@ -677,7 +677,7 @@ void __init smp_init_cpus(void) * with entries in cpu_logical_map while initializing the cpus. * If the cpu set-up fails, invalidate the cpu_logical_map entry. */ - for (i = 1; i < NR_CPUS; i++) { + for (i = 1; i < nr_cpu_ids; i++) { if (cpu_logical_map(i) != INVALID_HWID) { if (smp_cpu_setup(i)) cpu_logical_map(i) = INVALID_HWID; -- cgit v0.10.2 From c12abf346456416ca7f7ba45f363cf92d2480a99 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 9 Aug 2016 08:46:15 +1000 Subject: crypto: powerpc - CRYPT_CRC32C_VPMSUM should depend on ALTIVEC The optimised crc32c implementation depends on VMX (aka. Altivec) instructions, so the kernel must be built with Altivec support in order for the crc32c code to build. Fixes: 6dd7a82cc54e ("crypto: powerpc - Add POWER8 optimised crc32c") Acked-by: Anton Blanchard Signed-off-by: Michael Ellerman Signed-off-by: Herbert Xu diff --git a/crypto/Kconfig b/crypto/Kconfig index a9377be..84d7148 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -439,7 +439,7 @@ config CRYPTO_CRC32C_INTEL config CRYPT_CRC32C_VPMSUM tristate "CRC32c CRC algorithm (powerpc64)" - depends on PPC64 + depends on PPC64 && ALTIVEC select CRYPTO_HASH select CRC32 help -- cgit v0.10.2 From a0118c8b2be9297aed8e915c60b4013326b256d4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 9 Aug 2016 08:27:17 +0100 Subject: crypto: caam - fix non-hmac hashes Since 6de62f15b581 ("crypto: algif_hash - Require setkey before accept(2)"), the AF_ALG interface requires userspace to provide a key to any algorithm that has a setkey method. However, the non-HMAC algorithms are not keyed, so setting a key is unnecessary. Fix this by removing the setkey method from the non-keyed hash algorithms. Fixes: 6de62f15b581 ("crypto: algif_hash - Require setkey before accept(2)") Cc: Signed-off-by: Russell King Signed-off-by: Herbert Xu diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index f1ecc8d..36365b3 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1898,6 +1898,7 @@ caam_hash_alloc(struct caam_hash_template *template, template->name); snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", template->driver_name); + t_alg->ahash_alg.setkey = NULL; } alg->cra_module = THIS_MODULE; alg->cra_init = caam_hash_cra_init; -- cgit v0.10.2 From 58625edf9e2515ed41dac2a24fa8004030a87b87 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 2 Aug 2016 14:16:31 +0000 Subject: virtio: fix memory leak in virtqueue_add() When using the indirect buffers feature, 'desc' is allocated in virtqueue_add() but isn't freed before leaving on a ring full error, causing a memory leak. For example, it seems rather clear that this can trigger with virtio net if mergeable buffers are not used. Cc: stable@vger.kernel.org Signed-off-by: Wei Yongjun Signed-off-by: Michael S. Tsirkin diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 114a0c8..5ed228d 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -327,6 +327,8 @@ static inline int virtqueue_add(struct virtqueue *_vq, * host should service the ring ASAP. */ if (out_sgs) vq->notify(&vq->vq); + if (indirect) + kfree(desc); END_USE(vq); return -ENOSPC; } -- cgit v0.10.2 From 3cc36f6e34bd2d92d23be7b598ba5e639c47b01a Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 3 Aug 2016 07:18:51 +0300 Subject: virtio: fix error handling for debug builds On error, virtqueue_add calls START_USE but not END_USE. Thankfully that's normally empty anyway, but might not be when debugging. Fix it up. Signed-off-by: Michael S. Tsirkin diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 5ed228d..e383ecd 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -428,6 +428,7 @@ unmap_release: if (indirect) kfree(desc); + END_USE(vq); return -EIO; } -- cgit v0.10.2 From 1b8553c04bf95180eb91be94f089a1e8b38cfd62 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Wed, 3 Aug 2016 19:59:47 +0300 Subject: 9p/trans_virtio: use kvfree() for iov_iter_get_pages_alloc() The memory allocated by iov_iter_get_pages_alloc() can be allocated with vmalloc() if kmalloc() failed -- see get_pages_array(). In that case we need to free it with vfree(), so let's use kvfree(). The bug manifests like this: BUG: unable to handle kernel paging request at ffffeb0400072da0 IP: [] kfree+0x4b/0x140 PGD 0 Oops: 0000 [#1] PREEMPT SMP KASAN CPU: 2 PID: 675 Comm: trinity-c2 Not tainted 4.7.0-rc7+ #14 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Ubuntu-1.8.2-1ubuntu1 04/01/2014 task: ffff8800badef2c0 ti: ffff880069208000 task.ti: ffff880069208000 RIP: 0010:[] [] kfree+0x4b/0x140 RSP: 0000:ffff88006920f3f0 EFLAGS: 00010282 RAX: ffffea0000000000 RBX: ffffc90001cb6000 RCX: 0000000000000000 RDX: 0000000000000001 RSI: 0000000000000246 RDI: ffffc90001cb6000 RBP: ffff88006920f410 R08: 0000000000000000 R09: dffffc0000000000 R10: ffff8800badefa30 R11: 0000056a3d3b0d9f R12: ffff88006920f620 R13: ffffeb0400072d80 R14: ffff8800baa94078 R15: 0000000000000000 FS: 00007fbd2b437700(0000) GS:ffff88011af00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: ffffeb0400072da0 CR3: 000000006926d000 CR4: 00000000000006e0 Stack: 0000000000000001 ffff88006920f620 ffffed001755280f ffff8800baa94078 ffff88006920f6a8 ffffffff8310442b dffffc0000000000 ffff8800badefa30 ffff8800badefa28 ffff88011af1fba0 1ffff1000d241e98 ffff8800ba892150 Call Trace: [] p9_virtio_zc_request+0x72b/0xdb0 [] p9_client_zc_rpc.constprop.8+0x246/0xb10 [] p9_client_read+0x4c9/0x750 [] v9fs_fid_readpage+0x14c/0x320 [] v9fs_vfs_readpage+0x36/0x50 [] filemap_fault+0x9a3/0xe60 [] __do_fault+0x158/0x300 [] handle_mm_fault+0x1cf1/0x3c80 [] __do_page_fault+0x30a/0x8e0 [] do_page_fault+0x2f/0x80 [] do_async_page_fault+0x27/0xa0 [] async_page_fault+0x28/0x30 Code: 00 80 41 54 53 49 01 fd 48 0f 42 05 b0 39 67 02 48 89 fb 49 01 c5 48 b8 00 00 00 00 00 ea ff ff 49 c1 ed 0c 49 c1 e5 06 49 01 c5 <49> 8b 45 20 48 8d 50 ff a8 01 4c 0f 45 ea 49 8b 55 20 48 8d 42 RIP [] kfree+0x4b/0x140 RSP CR2: ffffeb0400072da0 ---[ end trace f3d59a04bafec038 ]--- Cc: Al Viro Signed-off-by: Vegard Nossum Signed-off-by: Michael S. Tsirkin diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 4acb1d5..f24b25c 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -507,8 +507,8 @@ err_out: /* wakeup anybody waiting for slots to pin pages */ wake_up(&vp_wq); } - kfree(in_pages); - kfree(out_pages); + kvfree(in_pages); + kvfree(out_pages); return err; } -- cgit v0.10.2 From 3fda5d6e580193fa005014355b3a61498f1b3ae0 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Thu, 4 Aug 2016 14:52:53 +0100 Subject: vhost/vsock: fix vhost virtio_vsock_pkt use-after-free Stash the packet length in a local variable before handing over ownership of the packet to virtio_transport_recv_pkt() or virtio_transport_free_pkt(). This patch solves the use-after-free since pkt is no longer guaranteed to be alive. Reported-by: Dan Carpenter Signed-off-by: Stefan Hajnoczi Signed-off-by: Michael S. Tsirkin diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 0ddf3a2..e3b30ea 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -307,6 +307,8 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) vhost_disable_notify(&vsock->dev, vq); for (;;) { + u32 len; + if (!vhost_vsock_more_replies(vsock)) { /* Stop tx until the device processes already * pending replies. Leave tx virtqueue @@ -334,13 +336,15 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) continue; } + len = pkt->len; + /* Only accept correctly addressed packets */ if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid) virtio_transport_recv_pkt(pkt); else virtio_transport_free_pkt(pkt); - vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len); + vhost_add_used(vq, head, sizeof(pkt->hdr) + len); added = true; } -- cgit v0.10.2 From 28ad55578b8a76390d966b09da8c7fa3644f5140 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Fri, 5 Aug 2016 13:52:09 +0100 Subject: virtio-vsock: fix include guard typo Signed-off-by: Stefan Hajnoczi Signed-off-by: Michael S. Tsirkin diff --git a/include/uapi/linux/virtio_vsock.h b/include/uapi/linux/virtio_vsock.h index 6b011c1..1d57ed3 100644 --- a/include/uapi/linux/virtio_vsock.h +++ b/include/uapi/linux/virtio_vsock.h @@ -32,7 +32,7 @@ */ #ifndef _UAPI_LINUX_VIRTIO_VSOCK_H -#define _UAPI_LINUX_VIRTIO_VOSCK_H +#define _UAPI_LINUX_VIRTIO_VSOCK_H #include #include -- cgit v0.10.2 From 347a529398e8e723338cca5d8a8ae2d9e7e93448 Mon Sep 17 00:00:00 2001 From: Minfei Huang Date: Tue, 9 Aug 2016 16:39:20 +0800 Subject: virtio_blk: Fix a slient kernel panic We do a lot of memory allocation in function init_vq, and don't handle the allocation failure properly. Then this function will return 0, although initialization fails due to lacking memory. At that moment, kernel will panic in guest machine, if virtio is used to drive disk. To fix this bug, we should take care of allocation failure, and return correct value to let caller know what happen. Tested-by: Chao Fan Signed-off-by: Minfei Huang Signed-off-by: Minfei Huang Reviewed-by: Cornelia Huck Reviewed-by: Stefan Hajnoczi Signed-off-by: Michael S. Tsirkin diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 1523e05..93b1aaa 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -391,22 +391,16 @@ static int init_vq(struct virtio_blk *vblk) num_vqs = 1; vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL); - if (!vblk->vqs) { - err = -ENOMEM; - goto out; - } + if (!vblk->vqs) + return -ENOMEM; names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL); - if (!names) - goto err_names; - callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL); - if (!callbacks) - goto err_callbacks; - vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL); - if (!vqs) - goto err_vqs; + if (!names || !callbacks || !vqs) { + err = -ENOMEM; + goto out; + } for (i = 0; i < num_vqs; i++) { callbacks[i] = virtblk_done; @@ -417,7 +411,7 @@ static int init_vq(struct virtio_blk *vblk) /* Discover virtqueues and write information to configuration. */ err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); if (err) - goto err_find_vqs; + goto out; for (i = 0; i < num_vqs; i++) { spin_lock_init(&vblk->vqs[i].lock); @@ -425,16 +419,12 @@ static int init_vq(struct virtio_blk *vblk) } vblk->num_vqs = num_vqs; - err_find_vqs: +out: kfree(vqs); - err_vqs: kfree(callbacks); - err_callbacks: kfree(names); - err_names: if (err) kfree(vblk->vqs); - out: return err; } -- cgit v0.10.2 From 2ab0d56aadbcd120b8fa524b4a1142e8b06e13c8 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Thu, 7 Jul 2016 17:07:56 +0200 Subject: virtio/s390: keep early_put_chars In case the registration of the hvc tty never happens AND the kernel thinks that hvc0 is the preferred console we should keep the early printk function to avoid a kernel panic due to code being removed. Signed-off-by: Christian Borntraeger Signed-off-by: Jing Liu Signed-off-by: Cornelia Huck Signed-off-by: Michael S. Tsirkin diff --git a/drivers/s390/virtio/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c index 1d060fd..b0a849f 100644 --- a/drivers/s390/virtio/kvm_virtio.c +++ b/drivers/s390/virtio/kvm_virtio.c @@ -482,7 +482,7 @@ static int __init kvm_devices_init(void) } /* code for early console output with virtio_console */ -static __init int early_put_chars(u32 vtermno, const char *buf, int count) +static int early_put_chars(u32 vtermno, const char *buf, int count) { char scratch[17]; unsigned int len = count; -- cgit v0.10.2 From 3b2fbb3f06efe5bd2dfdce2a1db703e23c1a78af Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Thu, 7 Jul 2016 17:07:57 +0200 Subject: virtio/s390: deprecate old transport There only ever have been two host implementations of the old s390-virtio (pre-ccw) transport: the experimental kuli userspace, and qemu. As qemu switched its default to ccw with 2.4 (with most users having used ccw well before that) and removed the old transport entirely in 2.6, s390-virtio probably hasn't been in active use for quite some time and is therefore likely to bitrot. Let's start the slow march towards removing the code by deprecating it. Note that this also deprecates the early virtio console code, which has been causing trouble in the guest without being wired up in any relevant hypervisor code. Acked-by: Christian Borntraeger Reviewed-by: Dong Jia Shi Reviewed-by: Sascha Silbe Signed-off-by: Cornelia Huck Signed-off-by: Michael S. Tsirkin diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 9e607bf..5a2907e 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -871,4 +871,17 @@ config S390_GUEST Select this option if you want to run the kernel as a guest under the KVM hypervisor. +config S390_GUEST_OLD_TRANSPORT + def_bool y + prompt "Guest support for old s390 virtio transport (DEPRECATED)" + depends on S390_GUEST + help + Enable this option to add support for the old s390-virtio + transport (i.e. virtio devices NOT based on virtio-ccw). This + type of virtio devices is only available on the experimental + kuli userspace or with old (< 2.6) qemu. If you are running + with a modern version of qemu (which supports virtio-ccw since + 1.4 and uses it by default since version 2.4), you probably won't + need this. + endmenu diff --git a/drivers/s390/virtio/Makefile b/drivers/s390/virtio/Makefile index 241891a..df40692 100644 --- a/drivers/s390/virtio/Makefile +++ b/drivers/s390/virtio/Makefile @@ -6,4 +6,8 @@ # it under the terms of the GNU General Public License (version 2 only) # as published by the Free Software Foundation. -obj-$(CONFIG_S390_GUEST) += kvm_virtio.o virtio_ccw.o +s390-virtio-objs := virtio_ccw.o +ifdef CONFIG_S390_GUEST_OLD_TRANSPORT +s390-virtio-objs += kvm_virtio.o +endif +obj-$(CONFIG_S390_GUEST) += $(s390-virtio-objs) diff --git a/drivers/s390/virtio/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c index b0a849f..5e5c11f 100644 --- a/drivers/s390/virtio/kvm_virtio.c +++ b/drivers/s390/virtio/kvm_virtio.c @@ -458,6 +458,8 @@ static int __init kvm_devices_init(void) if (test_devices_support(total_memory_size) < 0) return -ENODEV; + pr_warn("The s390-virtio transport is deprecated. Please switch to a modern host providing virtio-ccw.\n"); + rc = vmem_add_mapping(total_memory_size, PAGE_SIZE); if (rc) return rc; -- cgit v0.10.2 From 97b1d23f7bcbee00a5b85bf2c022c612ea67429d Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 5 Aug 2016 12:08:36 +0100 Subject: metag: Drop show_mem() from mem_init() The recent commit 599d0c954f91 ("mm, vmscan: move LRU lists to node"), changed memory management code so that show_mem() is no longer safe to call prior to setup_per_cpu_pageset(), as pgdat->per_cpu_nodestats will still be NULL. This causes an oops on metag due to the call to show_mem() from mem_init(): node_page_state_snapshot(...) + 0x48 pgdat_reclaimable(struct pglist_data * pgdat = 0x402517a0) show_free_areas(unsigned int filter = 0) + 0x2cc show_mem(unsigned int filter = 0) + 0x18 mem_init() mm_init() start_kernel() + 0x204 This wasn't a problem before with zone_reclaimable() as zone_pcp_init() was already setting zone->pageset to &boot_pageset, via setup_arch() and paging_init(), which happens before mm_init(): zone_pcp_init(...) free_area_init_core(...) + 0x138 free_area_init_node(int nid = 0, ...) + 0x1a0 free_area_init_nodes(...) + 0x440 paging_init(unsigned long mem_end = 0x4fe00000) + 0x378 setup_arch(char ** cmdline_p = 0x4024e038) + 0x2b8 start_kernel() + 0x54 No other arches appear to call show_mem() during boot, and it doesn't really add much value to the log, so lets just drop it from mem_init(). Signed-off-by: James Hogan Acked-by: Mel Gorman Cc: linux-metag@vger.kernel.org diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c index 11fa51c..c0ec116 100644 --- a/arch/metag/mm/init.c +++ b/arch/metag/mm/init.c @@ -390,7 +390,6 @@ void __init mem_init(void) free_all_bootmem(); mem_init_print_info(NULL); - show_mem(0); } void free_initmem(void) -- cgit v0.10.2 From 6bb47e8ab98accb1319bd43c64966340ba3bba9a Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 4 Aug 2016 13:32:22 -0700 Subject: usb: hub: Fix unbalanced reference count/memory leak/deadlocks Memory leak and unbalanced reference count: If the hub gets disconnected while the core is still activating it, this can result in leaking memory of few USB structures. This will happen if we have done a kref_get() from hub_activate() and scheduled a delayed work item for HUB_INIT2/3. Now if hub_disconnect() gets called before the delayed work expires, then we will cancel the work from hub_quiesce(), but wouldn't do a kref_put(). And so the unbalance. kmemleak reports this as (with the commit e50293ef9775 backported to 3.10 kernel with other changes, though the same is true for mainline as well): unreferenced object 0xffffffc08af5b800 (size 1024): comm "khubd", pid 73, jiffies 4295051211 (age 6482.350s) hex dump (first 32 bytes): 30 68 f3 8c c0 ff ff ff 00 a0 b2 2e c0 ff ff ff 0h.............. 01 00 00 00 00 00 00 00 00 94 7d 40 c0 ff ff ff ..........}@.... backtrace: [] create_object+0x148/0x2a0 [] kmemleak_alloc+0x80/0xbc [] kmem_cache_alloc_trace+0x120/0x1ac [] hub_probe+0x120/0xb84 [] usb_probe_interface+0x1ec/0x298 [] driver_probe_device+0x160/0x374 [] __device_attach+0x28/0x4c [] bus_for_each_drv+0x78/0xac [] device_attach+0x6c/0x9c [] bus_probe_device+0x28/0xa0 [] device_add+0x324/0x604 [] usb_set_configuration+0x660/0x6cc [] generic_probe+0x44/0x84 [] usb_probe_device+0x54/0x74 [] driver_probe_device+0x160/0x374 [] __device_attach+0x28/0x4c Deadlocks: If the hub gets disconnected early enough (i.e. before INIT2/INIT3 are finished and the init_work is still queued), the core may call hub_quiesce() after acquiring interface device locks and it will wait for the work to be cancelled synchronously. But if the work handler is already running in parallel, it may try to acquire the same interface device lock and this may result in deadlock. Fix both the issues by removing the call to cancel_delayed_work_sync(). CC: #4.4+ Fixes: e50293ef9775 ("USB: fix invalid memory access in hub_activate()") Reported-by: Manu Gautam Signed-off-by: Viresh Kumar Acked-by: Alan Stern Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index bee1351..3ccffac 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1315,8 +1315,6 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type) struct usb_device *hdev = hub->hdev; int i; - cancel_delayed_work_sync(&hub->init_work); - /* hub_wq and related activity won't re-trigger */ hub->quiescing = 1; -- cgit v0.10.2 From ca5cbc8b02f9b21cc8cd1ab36668763ec34f9ee8 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Fri, 5 Aug 2016 11:49:45 -0400 Subject: USB: hub: fix up early-exit pathway in hub_activate The early-exit pathway in hub_activate, added by commit e50293ef9775 ("USB: fix invalid memory access in hub_activate()") needs improvement. It duplicates code that is already present at the end of the subroutine, and it neglects to undo the effect of a usb_autopm_get_interface_no_resume() call. This patch fixes both problems by making the early-exit pathway jump directly to the end of the subroutine. It simplifies the code at the end by merging two conditionals that actually test the same condition although they appear different: If type < HUB_INIT3 then type must be either HUB_INIT2 or HUB_INIT, and it can't be HUB_INIT because in that case the subroutine would have exited earlier. Signed-off-by: Alan Stern CC: #4.4+ Reviewed-by: Viresh Kumar Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 3ccffac..bb69d26 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1055,11 +1055,8 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) device_lock(hub->intfdev); /* Was the hub disconnected while we were waiting? */ - if (hub->disconnected) { - device_unlock(hub->intfdev); - kref_put(&hub->kref, hub_release); - return; - } + if (hub->disconnected) + goto disconnected; if (type == HUB_INIT2) goto init2; goto init3; @@ -1281,12 +1278,12 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) /* Scan all ports that need attention */ kick_hub_wq(hub); - /* Allow autosuspend if it was suppressed */ - if (type <= HUB_INIT3) + if (type == HUB_INIT2 || type == HUB_INIT3) { + /* Allow autosuspend if it was suppressed */ + disconnected: usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); - - if (type == HUB_INIT2 || type == HUB_INIT3) device_unlock(hub->intfdev); + } kref_put(&hub->kref, hub_release); } -- cgit v0.10.2 From 07d316a22e119fa301fd7dba7f1e1adfd4f72c05 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Fri, 5 Aug 2016 11:51:30 -0400 Subject: USB: hub: change the locking in hub_activate The locking in hub_activate() is not adequate to provide full mutual exclusion with hub_quiesce(). The subroutine locks the hub's usb_interface, but the callers of hub_quiesce() (such as hub_pre_reset() and hub_event()) hold the lock to the hub's usb_device. This patch changes hub_activate() to make it acquire the same lock as those other routines. Signed-off-by: Alan Stern CC: #4.4+ Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index bb69d26..1d5fc32 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1052,7 +1052,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) /* Continue a partial initialization */ if (type == HUB_INIT2 || type == HUB_INIT3) { - device_lock(hub->intfdev); + device_lock(&hdev->dev); /* Was the hub disconnected while we were waiting? */ if (hub->disconnected) @@ -1259,7 +1259,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) queue_delayed_work(system_power_efficient_wq, &hub->init_work, msecs_to_jiffies(delay)); - device_unlock(hub->intfdev); + device_unlock(&hdev->dev); return; /* Continues at init3: below */ } else { msleep(delay); @@ -1282,7 +1282,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) /* Allow autosuspend if it was suppressed */ disconnected: usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); - device_unlock(hub->intfdev); + device_unlock(&hdev->dev); } kref_put(&hub->kref, hub_release); -- cgit v0.10.2 From 28324936f3d672bbf83472fece8f36a158a52276 Mon Sep 17 00:00:00 2001 From: Peter Chen Date: Tue, 26 Jul 2016 16:01:30 +0800 Subject: usb: misc: usbtest: usbtest_do_ioctl may return positive integer For case 14 and case 21, their correct return value is the number of bytes transferred, so it is a positive integer. But in usbtest_ioctl, it takes non-zero as false return value for usbtest_do_ioctl, so it will treat the correct test as wrong test, then the time on tests will be the minus value. Signed-off-by: Peter Chen Cc: stable Fixes: 18fc4ebdc705 ("usb: misc: usbtest: Remove timeval usage") Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 6b978f0..5e3464e 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -2602,7 +2602,7 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf) ktime_get_ts64(&start); retval = usbtest_do_ioctl(intf, param_32); - if (retval) + if (retval < 0) goto free_mutex; ktime_get_ts64(&end); -- cgit v0.10.2 From 9c6256a5e707a9eb8b91962b550050b13aa75334 Mon Sep 17 00:00:00 2001 From: Xiao Han Date: Tue, 14 Jun 2016 16:22:54 +0200 Subject: usb: misc: ftdi-elan: Fix off-by-one memory corruptions This patch fixes fives off-by-one bugs in the ftdi-elan driver code. The bug can be triggered by plugging a USB adapter for CardBus 3G cards (model U132 manufactured by Elan Digital Systems, Ltd), causing a kernel panic. The fix was tested on Ubuntu 14.04.4 with 4.7.0-rc14.2.0-27-generic+ and 4.4.0-22-generic+ kernel. In the ftdi_elan_synchronize function, an off-by-one memory corruption occurs when packet_bytes is equal or bigger than m. After having read m bytes, that is bytes_read is equal to m, " ..\x00" is still copied to the stack variable causing an out bounds write of 4 bytes, which overwrites the stack canary and results in a kernel panic. This off-by-one requires physical access to the machine. It is not exploitable since we have no control on the overwritten data. Similar off-by-one bugs have been observed in 4 other functions: ftdi_elan_stuck_waiting, ftdi_elan_read, ftdi_elan_edset_output and ftdi_elan_flush_input_fifo. Reported-by: Alex Palesandro Signed-off-by: Xiao Han Tested-by: Paul Chaignon Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c index 52c27ca..9b5b3b2 100644 --- a/drivers/usb/misc/ftdi-elan.c +++ b/drivers/usb/misc/ftdi-elan.c @@ -665,7 +665,7 @@ static ssize_t ftdi_elan_read(struct file *file, char __user *buffer, { char data[30 *3 + 4]; char *d = data; - int m = (sizeof(data) - 1) / 3; + int m = (sizeof(data) - 1) / 3 - 1; int bytes_read = 0; int retry_on_empty = 10; int retry_on_timeout = 5; @@ -1684,7 +1684,7 @@ wait:if (ftdi->disconnected > 0) { int i = 0; char data[30 *3 + 4]; char *d = data; - int m = (sizeof(data) - 1) / 3; + int m = (sizeof(data) - 1) / 3 - 1; int l = 0; struct u132_target *target = &ftdi->target[ed]; struct u132_command *command = &ftdi->command[ @@ -1876,7 +1876,7 @@ more:{ if (packet_bytes > 2) { char diag[30 *3 + 4]; char *d = diag; - int m = (sizeof(diag) - 1) / 3; + int m = (sizeof(diag) - 1) / 3 - 1; char *b = ftdi->bulk_in_buffer; int bytes_read = 0; diag[0] = 0; @@ -2053,7 +2053,7 @@ static int ftdi_elan_synchronize(struct usb_ftdi *ftdi) if (packet_bytes > 2) { char diag[30 *3 + 4]; char *d = diag; - int m = (sizeof(diag) - 1) / 3; + int m = (sizeof(diag) - 1) / 3 - 1; char *b = ftdi->bulk_in_buffer; int bytes_read = 0; unsigned char c = 0; @@ -2155,7 +2155,7 @@ more:{ if (packet_bytes > 2) { char diag[30 *3 + 4]; char *d = diag; - int m = (sizeof(diag) - 1) / 3; + int m = (sizeof(diag) - 1) / 3 - 1; char *b = ftdi->bulk_in_buffer; int bytes_read = 0; diag[0] = 0; -- cgit v0.10.2 From bcdc09af3ef30ef071677544ce23a1c8873a2dda Mon Sep 17 00:00:00 2001 From: Brendan Gregg Date: Wed, 3 Aug 2016 02:47:49 +0000 Subject: perf script: Add 'bpf-output' field to usage message This adds the 'bpf-output' field to the perf script usage message, and docs. Signed-off-by: Brendan Gregg Cc: Alexander Shishkin Cc: Alexei Starovoitov Cc: Peter Zijlstra Cc: Wang Nan Link: http://lkml.kernel.org/r/1470192469-11910-4-git-send-email-bgregg@netflix.com Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt index 1f6c705..053bbbd 100644 --- a/tools/perf/Documentation/perf-script.txt +++ b/tools/perf/Documentation/perf-script.txt @@ -116,8 +116,8 @@ OPTIONS --fields:: Comma separated list of fields to print. Options are: comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff, - srcline, period, iregs, brstack, brstacksym, flags. - Field list can be prepended with the type, trace, sw or hw, + srcline, period, iregs, brstack, brstacksym, flags, bpf-output, + callindent. Field list can be prepended with the type, trace, sw or hw, to indicate to which event type the field list applies. e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 971ff91..9c640a8 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -2116,7 +2116,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused) "Valid types: hw,sw,trace,raw. " "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso," "addr,symoff,period,iregs,brstack,brstacksym,flags," - "callindent", parse_output_fields), + "bpf-output,callindent", parse_output_fields), OPT_BOOLEAN('a', "all-cpus", &system_wide, "system-wide collection from all CPUs"), OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", -- cgit v0.10.2 From 9a8a5dcf20eee254ce490f12d579ef80ee776eb6 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 9 Aug 2016 15:46:46 +0200 Subject: iommu/mediatek: Mark static functions in headers inline This was an oversight while merging these functions. Fix it. Cc: Honghui Zhang Fixes: 9ca340c98c0d ('iommu/mediatek: move the common struct into header file') Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index 9ed0a84..3dab13b 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -55,19 +55,19 @@ struct mtk_iommu_data { bool enable_4GB; }; -static int compare_of(struct device *dev, void *data) +static inline int compare_of(struct device *dev, void *data) { return dev->of_node == data; } -static int mtk_iommu_bind(struct device *dev) +static inline int mtk_iommu_bind(struct device *dev) { struct mtk_iommu_data *data = dev_get_drvdata(dev); return component_bind_all(dev, &data->smi_imu); } -static void mtk_iommu_unbind(struct device *dev) +static inline void mtk_iommu_unbind(struct device *dev) { struct mtk_iommu_data *data = dev_get_drvdata(dev); -- cgit v0.10.2 From 887fa86d6fd7a45cee2d0f9d5f75026786d61df2 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 5 Aug 2016 12:37:21 -0300 Subject: perf hists: Trim libtraceevent trace_seq buffers When we use libtraceevent to format trace event fields into printable strings to use in hist entries it is important to trim it from the default 4 KiB it starts with to what is really used, to reduce the memory footprint, so use realloc(seq.buffer, seq.len + 1) when returning the seq.buffer formatted with the fields contents. Reported-and-Tested-by: Wang Nan Cc: Adrian Hunter Cc: David Ahern Cc: Jiri Olsa Cc: Namhyung Kim Link: http://lkml.kernel.org/n/tip-t3hl7uxmilrkigzmc90rlhk2@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 947d21f..3d3cb83 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -588,7 +588,11 @@ static char *get_trace_output(struct hist_entry *he) } else { pevent_event_info(&seq, evsel->tp_format, &rec); } - return seq.buffer; + /* + * Trim the buffer, it starts at 4KB and we're not going to + * add anything more to this buffer. + */ + return realloc(seq.buffer, seq.len + 1); } static int64_t -- cgit v0.10.2 From 8e34189b347d76acf48ce05831176582201b664d Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 6 Aug 2016 19:29:48 +0900 Subject: perf probe: Adjust map->reloc offset when finding kernel symbol from map Adjust map->reloc offset for the unmapped address when finding alternative symbol address from map, because KASLR can relocate the kernel symbol address. The same adjustment has been done when finding appropriate kernel symbol address from map which was introduced by commit f90acac75713 ("perf probe: Find given address from offline dwarf") Reported-by: Arnaldo Carvalho de Melo Signed-off-by: Masami Hiramatsu Cc: Alexei Starovoitov Cc: Wang Nan Link: http://lkml.kernel.org/r/20160806192948.e366f3fbc4b194de600f8326@kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 953dc1a..d5ccb65 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -385,7 +385,7 @@ static int find_alternative_probe_point(struct debuginfo *dinfo, if (uprobes) address = sym->start; else - address = map->unmap_ip(map, sym->start); + address = map->unmap_ip(map, sym->start) - map->reloc; break; } if (!address) { -- cgit v0.10.2 From cb3f3378cd09aa3fe975b4ad5ee0229dc76315bb Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Fri, 5 Aug 2016 15:22:36 +0300 Subject: perf probe: Fix module name matching If module is "module" then dso->short_name is "[module]". Substring comparing is't enough: "raid10" matches to "[raid1]". This patch also checks terminating zero in module name. Signed-off-by: Konstantin Khlebnikov Acked-by: Masami Hiramatsu Link: http://lkml.kernel.org/r/147039975648.715620.12985971832789032159.stgit@buzz Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index d5ccb65..1201f73 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -170,8 +170,10 @@ static struct map *kernel_get_module_map(const char *module) module = "kernel"; for (pos = maps__first(maps); pos; pos = map__next(pos)) { + /* short_name is "[module]" */ if (strncmp(pos->dso->short_name + 1, module, - pos->dso->short_name_len - 2) == 0) { + pos->dso->short_name_len - 2) == 0 && + module[pos->dso->short_name_len - 2] == '\0') { return pos; } } -- cgit v0.10.2 From 3df33eff2ba96be4f1535db4f672013d756dc9b1 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 9 Aug 2016 14:04:29 +0100 Subject: perf stat: Avoid skew when reading events When we don't have a tracee (i.e. we're attaching to a task or CPU), counters can still be running after our workload finishes, and can still be running as we read their values. As we read events one-by-one, there can be arbitrary skew between values of events, even within a group. This means that ratios within an event group are not reliable. This skew can be seen if measuring a group of identical events, e.g: # perf stat -a -C0 -e '{cycles,cycles}' sleep 1 To avoid this, we must stop groups from counting before we read the values of any constituent events. This patch adds and makes use of a new disable_counters() helper, which disables group leaders (and thus each group as a whole). This mirrors the use of enable_counters() for starting event groups in the absence of a tracee. Closing a group leader splits the group, and without a disabled group leader the newly split events will begin counting. Thus to ensure counts are reliable we must defer closing group leaders until all counts have been read. To do so this patch removes the event closing logic from the read_counters() helper, explicitly closes the events using perf_evlist__close(), which also aids legibility. Signed-off-by: Mark Rutland Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1470747869-3567-1-git-send-email-mark.rutland@arm.com Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 0c16d20..3c7452b 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -331,7 +331,7 @@ static int read_counter(struct perf_evsel *counter) return 0; } -static void read_counters(bool close_counters) +static void read_counters(void) { struct perf_evsel *counter; @@ -341,11 +341,6 @@ static void read_counters(bool close_counters) if (perf_stat_process_counter(&stat_config, counter)) pr_warning("failed to process counter %s\n", counter->name); - - if (close_counters) { - perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), - thread_map__nr(evsel_list->threads)); - } } } @@ -353,7 +348,7 @@ static void process_interval(void) { struct timespec ts, rs; - read_counters(false); + read_counters(); clock_gettime(CLOCK_MONOTONIC, &ts); diff_timespec(&rs, &ts, &ref_time); @@ -380,6 +375,17 @@ static void enable_counters(void) perf_evlist__enable(evsel_list); } +static void disable_counters(void) +{ + /* + * If we don't have tracee (attaching to task or cpu), counters may + * still be running. To get accurate group ratios, we must stop groups + * from counting before reading their constituent counters. + */ + if (!target__none(&target)) + perf_evlist__disable(evsel_list); +} + static volatile int workload_exec_errno; /* @@ -657,11 +663,20 @@ try_again: } } + disable_counters(); + t1 = rdclock(); update_stats(&walltime_nsecs_stats, t1 - t0); - read_counters(true); + /* + * Closing a group leader splits the group, and as we only disable + * group leaders, results in remaining events becoming enabled. To + * avoid arbitrary skew, we must read all counters before closing any + * group leaders. + */ + read_counters(); + perf_evlist__close(evsel_list); return WEXITSTATUS(status); } -- cgit v0.10.2 From c87edb36118664f1fa275107c1138b6f47793240 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Fri, 5 Aug 2016 12:41:52 -0400 Subject: tracing: Fix tick_stop tracepoint symbols for user export The symbols used in the tick_stop tracepoint were not being converted properly into integers in the trace_stop format file. Instead we had this: print fmt: "success=%d dependency=%s", REC->success, __print_symbolic(REC->dependency, { 0, "NONE" }, { (1 << TICK_DEP_BIT_POSIX_TIMER), "POSIX_TIMER" }, { (1 << TICK_DEP_BIT_PERF_EVENTS), "PERF_EVENTS" }, { (1 << TICK_DEP_BIT_SCHED), "SCHED" }, { (1 << TICK_DEP_BIT_CLOCK_UNSTABLE), "CLOCK_UNSTABLE" }) User space tools have no idea how to parse "TICK_DEP_BIT_SCHED" or the other symbols used to do the bit shifting. The reason is that the conversion was done with using the TICK_DEP_MASK_* symbols which are just macros that convert to the BIT shift itself (with the exception of NONE, which was converted properly, because it doesn't use bits, and is defined as zero). The TICK_DEP_BIT_* needs to be denoted by TRACE_DEFINE_ENUM() in order to have this properly converted for user space tools to parse this event. Cc: stable@vger.kernel.org Cc: Frederic Weisbecker Fixes: e6e6cc22e067 ("nohz: Use enum code for tick stop failure tracing message") Reported-by: Luiz Capitulino Tested-by: Luiz Capitulino Signed-off-by: Steven Rostedt diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 5144013..28c5da6 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -330,24 +330,32 @@ TRACE_EVENT(itimer_expire, #ifdef CONFIG_NO_HZ_COMMON #define TICK_DEP_NAMES \ - tick_dep_name(NONE) \ + tick_dep_mask_name(NONE) \ tick_dep_name(POSIX_TIMER) \ tick_dep_name(PERF_EVENTS) \ tick_dep_name(SCHED) \ tick_dep_name_end(CLOCK_UNSTABLE) #undef tick_dep_name +#undef tick_dep_mask_name #undef tick_dep_name_end -#define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep); -#define tick_dep_name_end(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep); +/* The MASK will convert to their bits and they need to be processed too */ +#define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_BIT_##sdep); \ + TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep); +#define tick_dep_name_end(sdep) TRACE_DEFINE_ENUM(TICK_DEP_BIT_##sdep); \ + TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep); +/* NONE only has a mask defined for it */ +#define tick_dep_mask_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep); TICK_DEP_NAMES #undef tick_dep_name +#undef tick_dep_mask_name #undef tick_dep_name_end #define tick_dep_name(sdep) { TICK_DEP_MASK_##sdep, #sdep }, +#define tick_dep_mask_name(sdep) { TICK_DEP_MASK_##sdep, #sdep }, #define tick_dep_name_end(sdep) { TICK_DEP_MASK_##sdep, #sdep } #define show_tick_dep_name(val) \ -- cgit v0.10.2 From 19f00b011729417f69e4df53cc3fe5ecc25134a4 Mon Sep 17 00:00:00 2001 From: Naohiro Aota Date: Tue, 9 Aug 2016 11:40:08 +0900 Subject: perf probe: Support signedness casting The 'perf probe' tool detects a variable's type and use the detected type to add a new probe. Then, kprobes prints its variable in hexadecimal format if the variable is unsigned and prints in decimal if it is signed. We sometimes want to see unsigned variable in decimal format (i.e. sector_t or size_t). In that case, we need to investigate the variable's size manually to specify just signedness. This patch add signedness casting support. By specifying "s" or "u" as a type, perf-probe will investigate variable size as usual and use the specified signedness. E.g. without this: $ perf probe -a 'submit_bio bio->bi_iter.bi_sector' Added new event: probe:submit_bio (on submit_bio with bi_sector=bio->bi_iter.bi_sector) You can now use it in all perf tools, such as: perf record -e probe:submit_bio -aR sleep 1 $ cat trace_pipe|head dbench-9692 [003] d..1 971.096633: submit_bio: (submit_bio+0x0/0x140) bi_sector=0x3a3d00 dbench-9692 [003] d..1 971.096685: submit_bio: (submit_bio+0x0/0x140) bi_sector=0x1a3d80 dbench-9692 [003] d..1 971.096687: submit_bio: (submit_bio+0x0/0x140) bi_sector=0x3a3d80 ... // need to investigate the variable size $ perf probe -a 'submit_bio bio->bi_iter.bi_sector:s64' Added new event: probe:submit_bio (on submit_bio with bi_sector=bio->bi_iter.bi_sector:s64) You can now use it in all perf tools, such as: perf record -e probe:submit_bio -aR sleep 1 With this: // just use "s" to cast its signedness $ perf probe -v -a 'submit_bio bio->bi_iter.bi_sector:s' Added new event: probe:submit_bio (on submit_bio with bi_sector=bio->bi_iter.bi_sector:s) You can now use it in all perf tools, such as: perf record -e probe:submit_bio -aR sleep 1 $ cat trace_pipe|head dbench-9689 [001] d..1 1212.391237: submit_bio: (submit_bio+0x0/0x140) bi_sector=128 dbench-9689 [001] d..1 1212.391252: submit_bio: (submit_bio+0x0/0x140) bi_sector=131072 dbench-9697 [006] d..1 1212.398611: submit_bio: (submit_bio+0x0/0x140) bi_sector=30208 This commit also update perf-probe.txt to describe "types". Most parts are based on existing documentation: Documentation/trace/kprobetrace.txt Committer note: Testing using 'perf trace': # perf probe -a 'submit_bio bio->bi_iter.bi_sector' Added new event: probe:submit_bio (on submit_bio with bi_sector=bio->bi_iter.bi_sector) You can now use it in all perf tools, such as: perf record -e probe:submit_bio -aR sleep 1 # trace --no-syscalls --ev probe:submit_bio 0.000 probe:submit_bio:(ffffffffac3aee00) bi_sector=0xc133c0) 3181.861 probe:submit_bio:(ffffffffac3aee00) bi_sector=0x6cffb8) 3181.881 probe:submit_bio:(ffffffffac3aee00) bi_sector=0x6cffc0) 3184.488 probe:submit_bio:(ffffffffac3aee00) bi_sector=0x6cffc8) 4717.927 probe:submit_bio:(ffffffffac3aee00) bi_sector=0x4dc7a88) 4717.970 probe:submit_bio:(ffffffffac3aee00) bi_sector=0x4dc7880) ^C[root@jouet ~]# Now, using this new feature: [root@jouet ~]# perf probe -a 'submit_bio bio->bi_iter.bi_sector:s' Added new event: probe:submit_bio (on submit_bio with bi_sector=bio->bi_iter.bi_sector:s) You can now use it in all perf tools, such as: perf record -e probe:submit_bio -aR sleep 1 [root@jouet ~]# trace --no-syscalls --ev probe:submit_bio 0.000 probe:submit_bio:(ffffffffac3aee00) bi_sector=7145704) 0.017 probe:submit_bio:(ffffffffac3aee00) bi_sector=7145712) 0.019 probe:submit_bio:(ffffffffac3aee00) bi_sector=7145720) 2.567 probe:submit_bio:(ffffffffac3aee00) bi_sector=7145728) 5631.919 probe:submit_bio:(ffffffffac3aee00) bi_sector=0) 5631.941 probe:submit_bio:(ffffffffac3aee00) bi_sector=8) 5631.945 probe:submit_bio:(ffffffffac3aee00) bi_sector=16) 5631.948 probe:submit_bio:(ffffffffac3aee00) bi_sector=24) ^C# With callchains: # trace --no-syscalls --ev probe:submit_bio/max-stack=10/ 0.000 probe:submit_bio:(ffffffffac3aee00) bi_sector=50662544) submit_bio+0xa8200001 ([kernel.kallsyms]) submit_bh+0xa8200013 ([kernel.kallsyms]) jbd2_journal_commit_transaction+0xa8200691 ([kernel.kallsyms]) kjournald2+0xa82000ca ([kernel.kallsyms]) kthread+0xa82000d8 ([kernel.kallsyms]) ret_from_fork+0xa820001f ([kernel.kallsyms]) 0.023 probe:submit_bio:(ffffffffac3aee00) bi_sector=50662552) submit_bio+0xa8200001 ([kernel.kallsyms]) submit_bh+0xa8200013 ([kernel.kallsyms]) jbd2_journal_commit_transaction+0xa8200691 ([kernel.kallsyms]) kjournald2+0xa82000ca ([kernel.kallsyms]) kthread+0xa82000d8 ([kernel.kallsyms]) ret_from_fork+0xa820001f ([kernel.kallsyms]) 0.027 probe:submit_bio:(ffffffffac3aee00) bi_sector=50662560) submit_bio+0xa8200001 ([kernel.kallsyms]) submit_bh+0xa8200013 ([kernel.kallsyms]) jbd2_journal_commit_transaction+0xa8200691 ([kernel.kallsyms]) kjournald2+0xa82000ca ([kernel.kallsyms]) kthread+0xa82000d8 ([kernel.kallsyms]) ret_from_fork+0xa820001f ([kernel.kallsyms]) 2.593 probe:submit_bio:(ffffffffac3aee00) bi_sector=50662568) submit_bio+0xa8200001 ([kernel.kallsyms]) submit_bh+0xa8200013 ([kernel.kallsyms]) journal_submit_commit_record+0xa82001ac ([kernel.kallsyms]) jbd2_journal_commit_transaction+0xa82012e8 ([kernel.kallsyms]) kjournald2+0xa82000ca ([kernel.kallsyms]) kthread+0xa82000d8 ([kernel.kallsyms]) ret_from_fork+0xa820001f ([kernel.kallsyms]) ^C# Signed-off-by: Naohiro Aota Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Hemant Kumar Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Wang Nan Link: http://lkml.kernel.org/r/1470710408-23515-1-git-send-email-naohiro.aota@hgst.com Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt index 736da44..b303bcd 100644 --- a/tools/perf/Documentation/perf-probe.txt +++ b/tools/perf/Documentation/perf-probe.txt @@ -176,10 +176,18 @@ Each probe argument follows below syntax. 'NAME' specifies the name of this argument (optional). You can use the name of local variable, local data structure member (e.g. var->field, var.field2), local array with fixed index (e.g. array[1], var->array[0], var->pointer[2]), or kprobe-tracer argument format (e.g. $retval, %ax, etc). Note that the name of this argument will be set as the last member name if you specify a local data structure member (e.g. field2 for 'var->field1.field2'.) '$vars' and '$params' special arguments are also available for NAME, '$vars' is expanded to the local variables (including function parameters) which can access at given probe point. '$params' is expanded to only the function parameters. -'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type. +'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. Currently, basic types (u8/u16/u32/u64/s8/s16/s32/s64), signedness casting (u/s), "string" and bitfield are supported. (see TYPES for detail) On x86 systems %REG is always the short form of the register: for example %AX. %RAX or %EAX is not valid. +TYPES +----- +Basic types (u8/u16/u32/u64/s8/s16/s32/s64) are integer types. Prefix 's' and 'u' means those types are signed and unsigned respectively. Traced arguments are shown in decimal (signed) or hex (unsigned). You can also use 's' or 'u' to specify only signedness and leave its size auto-detected by perf probe. +String type is a special type, which fetches a "null-terminated" string from kernel space. This means it will fail and store NULL if the string container has been paged out. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type. +Bitfield is another special type, which takes 3 parameters, bit-width, bit-offset, and container-size (usually 32). The syntax is; + + b@/ + LINE SYNTAX ----------- Line range is described by following syntax. diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index f2d9ff0..5c290c6 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -297,10 +297,13 @@ static int convert_variable_type(Dwarf_Die *vr_die, char sbuf[STRERR_BUFSIZE]; int bsize, boffs, total; int ret; + char sign; /* TODO: check all types */ - if (cast && strcmp(cast, "string") != 0) { + if (cast && strcmp(cast, "string") != 0 && + strcmp(cast, "s") != 0 && strcmp(cast, "u") != 0) { /* Non string type is OK */ + /* and respect signedness cast */ tvar->type = strdup(cast); return (tvar->type == NULL) ? -ENOMEM : 0; } @@ -361,6 +364,13 @@ static int convert_variable_type(Dwarf_Die *vr_die, return (tvar->type == NULL) ? -ENOMEM : 0; } + if (cast && (strcmp(cast, "u") == 0)) + sign = 'u'; + else if (cast && (strcmp(cast, "s") == 0)) + sign = 's'; + else + sign = die_is_signed_type(&type) ? 's' : 'u'; + ret = dwarf_bytesize(&type); if (ret <= 0) /* No size ... try to use default type */ @@ -373,8 +383,7 @@ static int convert_variable_type(Dwarf_Die *vr_die, dwarf_diename(&type), MAX_BASIC_TYPE_BITS); ret = MAX_BASIC_TYPE_BITS; } - ret = snprintf(buf, 16, "%c%d", - die_is_signed_type(&type) ? 's' : 'u', ret); + ret = snprintf(buf, 16, "%c%d", sign, ret); formatted: if (ret < 0 || ret >= 16) { -- cgit v0.10.2 From aed9d65ac3278d4febd8665bd7db59ef53e825fe Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Mon, 1 Aug 2016 15:25:56 -0400 Subject: USB: validate wMaxPacketValue entries in endpoint descriptors Erroneous or malicious endpoint descriptors may have non-zero bits in reserved positions, or out-of-bounds values. This patch helps prevent these from causing problems by bounds-checking the wMaxPacketValue entries in endpoint descriptors and capping the values at the maximum allowed. This issue was first discovered and tests were conducted by Jake Lamberson , an intern working for Rosie Hall. Signed-off-by: Alan Stern Reported-by: roswest Tested-by: roswest CC: Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 31ccdcc..0511631 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -171,6 +171,31 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, ep, buffer, size); } +static const unsigned short low_speed_maxpacket_maxes[4] = { + [USB_ENDPOINT_XFER_CONTROL] = 8, + [USB_ENDPOINT_XFER_ISOC] = 0, + [USB_ENDPOINT_XFER_BULK] = 0, + [USB_ENDPOINT_XFER_INT] = 8, +}; +static const unsigned short full_speed_maxpacket_maxes[4] = { + [USB_ENDPOINT_XFER_CONTROL] = 64, + [USB_ENDPOINT_XFER_ISOC] = 1023, + [USB_ENDPOINT_XFER_BULK] = 64, + [USB_ENDPOINT_XFER_INT] = 64, +}; +static const unsigned short high_speed_maxpacket_maxes[4] = { + [USB_ENDPOINT_XFER_CONTROL] = 64, + [USB_ENDPOINT_XFER_ISOC] = 1024, + [USB_ENDPOINT_XFER_BULK] = 512, + [USB_ENDPOINT_XFER_INT] = 1023, +}; +static const unsigned short super_speed_maxpacket_maxes[4] = { + [USB_ENDPOINT_XFER_CONTROL] = 512, + [USB_ENDPOINT_XFER_ISOC] = 1024, + [USB_ENDPOINT_XFER_BULK] = 1024, + [USB_ENDPOINT_XFER_INT] = 1024, +}; + static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, int asnum, struct usb_host_interface *ifp, int num_ep, unsigned char *buffer, int size) @@ -179,6 +204,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, struct usb_endpoint_descriptor *d; struct usb_host_endpoint *endpoint; int n, i, j, retval; + unsigned int maxp; + const unsigned short *maxpacket_maxes; d = (struct usb_endpoint_descriptor *) buffer; buffer += d->bLength; @@ -286,6 +313,42 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, endpoint->desc.wMaxPacketSize = cpu_to_le16(8); } + /* Validate the wMaxPacketSize field */ + maxp = usb_endpoint_maxp(&endpoint->desc); + + /* Find the highest legal maxpacket size for this endpoint */ + i = 0; /* additional transactions per microframe */ + switch (to_usb_device(ddev)->speed) { + case USB_SPEED_LOW: + maxpacket_maxes = low_speed_maxpacket_maxes; + break; + case USB_SPEED_FULL: + maxpacket_maxes = full_speed_maxpacket_maxes; + break; + case USB_SPEED_HIGH: + /* Bits 12..11 are allowed only for HS periodic endpoints */ + if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) { + i = maxp & (BIT(12) | BIT(11)); + maxp &= ~i; + } + /* fallthrough */ + default: + maxpacket_maxes = high_speed_maxpacket_maxes; + break; + case USB_SPEED_SUPER: + case USB_SPEED_SUPER_PLUS: + maxpacket_maxes = super_speed_maxpacket_maxes; + break; + } + j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)]; + + if (maxp > j) { + dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n", + cfgno, inum, asnum, d->bEndpointAddress, maxp, j); + maxp = j; + endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp); + } + /* * Some buggy high speed devices have bulk endpoints using * maxpacket sizes other than 512. High speed HCDs may not @@ -293,9 +356,6 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, */ if (to_usb_device(ddev)->speed == USB_SPEED_HIGH && usb_endpoint_xfer_bulk(d)) { - unsigned maxp; - - maxp = usb_endpoint_maxp(&endpoint->desc) & 0x07ff; if (maxp != 512) dev_warn(ddev, "config %d interface %d altsetting %d " "bulk endpoint 0x%X has invalid maxpacket %d\n", -- cgit v0.10.2 From bc337b51508beb2d039aff5074a76cfe1c212030 Mon Sep 17 00:00:00 2001 From: Marc Ohlf Date: Wed, 3 Aug 2016 11:51:54 +0200 Subject: usb: ehci: change order of register cleanup during shutdown In ehci_turn_off_all_ports() all EHCI port registers are cleared to zero. On some hardware, this can lead to an system hang, when ehci_port_power() accesses the already cleared registers. This patch changes the order of cleanup. First call ehci_port_power() which respects the current bits in port status registers and afterwards cleanup the hard way by setting everything to zero. Signed-off-by: Marc Ohlf Acked-by: Alan Stern CC: Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index a962b89..1e5f529 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -332,11 +332,11 @@ static void ehci_turn_off_all_ports(struct ehci_hcd *ehci) int port = HCS_N_PORTS(ehci->hcs_params); while (port--) { - ehci_writel(ehci, PORT_RWC_BITS, - &ehci->regs->port_status[port]); spin_unlock_irq(&ehci->lock); ehci_port_power(ehci, port, false); spin_lock_irq(&ehci->lock); + ehci_writel(ehci, PORT_RWC_BITS, + &ehci->regs->port_status[port]); } } -- cgit v0.10.2 From 70f7ca9a0262784d0b80727860a63d64ab228e7b Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Wed, 15 Jun 2016 15:56:11 +0200 Subject: usb: devio, do not warn when allocation fails usbdev_mmap allocates a buffer. The size of the buffer is determined by a user. So with this code (no need to be root): int fd = open("/dev/bus/usb/001/001", O_RDONLY); mmap(NULL, 0x800000, PROT_READ, MAP_SHARED, fd, 0); we can see a warning: WARNING: CPU: 0 PID: 21771 at ../mm/page_alloc.c:3563 __alloc_pages_slowpath+0x1036/0x16e0() ... Call Trace: [] ? warn_slowpath_null+0x2e/0x40 [] ? __alloc_pages_slowpath+0x1036/0x16e0 [] ? warn_alloc_failed+0x250/0x250 [] ? get_page_from_freelist+0x75b/0x28b0 [] ? __alloc_pages_nodemask+0x583/0x6b0 [] ? __alloc_pages_slowpath+0x16e0/0x16e0 [] ? dma_generic_alloc_coherent+0x104/0x220 [] ? hcd_buffer_alloc+0x1d6/0x3e0 [usbcore] [] ? hcd_buffer_destroy+0xa0/0xa0 [usbcore] [] ? usb_alloc_coherent+0x65/0x90 [usbcore] [] ? usbdev_mmap+0x1a5/0x770 [usbcore] ... Allocations like this one should be marked as __GFP_NOWARN. So do so. The size could be also clipped by something like: if (size >= (1 << (MAX_ORDER + PAGE_SHIFT - 1))) return -ENOMEM; But I think the overall limit of 16M (by usbfs_increase_memory_usage) is enough, so that we only silence the warning here. Signed-off-by: Jiri Slaby Cc: Greg Kroah-Hartman Cc: Alan Stern Cc: Steinar H. Gunderson Cc: Markus Rechberger Fixes: f7d34b445a (USB: Add support for usbfs zerocopy.) Cc: 4.6+ Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index e9f5043..472cbcd 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -241,7 +241,8 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) goto error_decrease_mem; } - mem = usb_alloc_coherent(ps->dev, size, GFP_USER, &dma_handle); + mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN, + &dma_handle); if (!mem) { ret = -ENOMEM; goto error_free_usbm; -- cgit v0.10.2 From 5cce438298a0d2a7a857a4a3c3e26aeb8f77b941 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Fri, 10 Jun 2016 14:42:55 -0400 Subject: USB: remove race condition in usbfs/libusb when using reap-after-disconnect Hans de Goede has reported a difficulty in the Linux port of libusb. When a device is removed, the poll() system call in usbfs starts returning POLLERR as soon as udev->state is set to USB_STATE_NOTATTACHED, but the outstanding URBs are not available for reaping until some time later (after usbdev_remove() has been called). This is awkward for libusb or other usbfs clients, although not an insuperable problem. At any rate, it's easy to change usbfs so that it returns POLLHUP as soon as the state becomes USB_STATE_NOTATTACHED but it doesn't return POLLERR until after the outstanding URBs have completed. That's what this patch does; it uses the fact that ps->list is always on the dev->filelist list until usbdev_remove() takes it off, which happens after all the outstanding URBs have been cancelled. Signed-off-by: Alan Stern Reported-by: Hans de Goede Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 472cbcd..e6a6d67 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -2583,7 +2583,9 @@ static unsigned int usbdev_poll(struct file *file, if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed)) mask |= POLLOUT | POLLWRNORM; if (!connected(ps)) - mask |= POLLERR | POLLHUP; + mask |= POLLHUP; + if (list_empty(&ps->list)) + mask |= POLLERR; return mask; } -- cgit v0.10.2 From 59b71f774fc2ec2d985251e72fde0f9f88164547 Mon Sep 17 00:00:00 2001 From: Jaewon Kim Date: Thu, 21 Jul 2016 22:20:53 +0900 Subject: usb: host: max3421-hcd: fix mask of IO control register GPIO control register is divided into IOPINS1 and IOPINS2. And low 4-bit of register is controls output. So, this patch fixes wrong mask of GPIO output. Signed-off-by: Jaewon Kim Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c index c369c29..2f76900 100644 --- a/drivers/usb/host/max3421-hcd.c +++ b/drivers/usb/host/max3421-hcd.c @@ -1675,7 +1675,7 @@ max3421_gpout_set_value(struct usb_hcd *hcd, u8 pin_number, u8 value) if (pin_number > 7) return; - mask = 1u << pin_number; + mask = 1u << (pin_number % 4); idx = pin_number / 4; if (value) -- cgit v0.10.2 From bebfb730125863aac74607dff24f1bdb9fb02a90 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 9 Aug 2016 11:21:57 -0300 Subject: tools: Sync cpufeatures.h and vmx.h with the kernel There were changes related to the deprecation of the "pcommit" instruction: fd1d961dd681 ("x86/insn: remove pcommit") dfa169bbee00 ("Revert "KVM: x86: add pcommit support"") No need to update anything in the tools, as "pcommit" wasn't being listed on the VMX_EXIT_REASONS in the tools/perf/arch/x86/util/kvm-stat.c file. Just grab fresh copies of these files to silence the file cache coherency detector: $ make -C tools/perf O=/tmp/build/perf install-bin make: Entering directory '/home/acme/git/linux/tools/perf' BUILD: Doing 'make -j4' parallel build Warning: tools/arch/x86/include/asm/cpufeatures.h differs from kernel Warning: tools/arch/x86/include/uapi/asm/vmx.h differs from kernel INSTALL GTK UI # Cc: Adrian Hunter Cc: Dan Williams Cc: David Ahern Cc: Jiri Olsa Cc: Namhyung Kim Cc: Paolo Bonzini Cc: Ross Zwisler Cc: Wang Nan Cc: Xiao Guangrong Link: http://lkml.kernel.org/n/tip-07pmcc1ysydhyyxbmp1vt0l4@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 4a41348..92a8308 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -225,7 +225,6 @@ #define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ #define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ #define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ -#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */ #define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ #define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ #define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ @@ -301,10 +300,6 @@ #define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ #define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ #define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ -#define X86_BUG_NULL_SEG X86_BUG(9) /* Nulling a selector preserves the base */ -#define X86_BUG_SWAPGS_FENCE X86_BUG(10) /* SWAPGS without input dep on GS */ - - #ifdef CONFIG_X86_32 /* * 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional @@ -312,5 +307,7 @@ */ #define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */ #endif - +#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */ +#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ +#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h index 5b15d94..37fee27 100644 --- a/tools/arch/x86/include/uapi/asm/vmx.h +++ b/tools/arch/x86/include/uapi/asm/vmx.h @@ -78,7 +78,6 @@ #define EXIT_REASON_PML_FULL 62 #define EXIT_REASON_XSAVES 63 #define EXIT_REASON_XRSTORS 64 -#define EXIT_REASON_PCOMMIT 65 #define VMX_EXIT_REASONS \ { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \ @@ -127,8 +126,7 @@ { EXIT_REASON_INVVPID, "INVVPID" }, \ { EXIT_REASON_INVPCID, "INVPCID" }, \ { EXIT_REASON_XSAVES, "XSAVES" }, \ - { EXIT_REASON_XRSTORS, "XRSTORS" }, \ - { EXIT_REASON_PCOMMIT, "PCOMMIT" } + { EXIT_REASON_XRSTORS, "XRSTORS" } #define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1 #define VMX_ABORT_LOAD_HOST_MSR_FAIL 4 -- cgit v0.10.2 From 791cceb89f7987c0375ff2e8971928a47f62ccae Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 9 Aug 2016 11:48:07 -0300 Subject: toops: Sync tools/include/uapi/linux/bpf.h with the kernel The way we're using kernel headers in tools/ now, with a copy that is made to the same path prefixed by "tools/" plus checking if that copy got stale, i.e. if the kernel counterpart changed, helps in keeping track with new features that may be useful for tools to exploit. For instance, looking at all the changes to bpf.h since it was last copied to tools/include brings this to toolers' attention: Need to investigate this one to check how to run a program via perf, setting up a BPF event, that will take advantage of the way perf already calls clang/LLVM, sets up the event and runs the workload in a single command line, helping in debugging such semi cooperative programs: 96ae52279594 ("bpf: Add bpf_probe_write_user BPF helper to be called in tracers") This one needs further investigation about using the feature it improves in 'perf trace' to do some tcpdumpin' mixed with syscalls, tracepoints, probe points, callgraphs, etc: 555c8a8623a3 ("bpf: avoid stack copy and use skb ctx for event output") Add tracing just packets that are related to some container to that mix: 4a482f34afcc ("cgroup: bpf: Add bpf_skb_in_cgroup_proto") 4ed8ec521ed5 ("cgroup: bpf: Add BPF_MAP_TYPE_CGROUP_ARRAY") Definetely needs to have example programs accessing task_struct from a bpf proggie started from 'perf trace': 606274c5abd8 ("bpf: introduce bpf_get_current_task() helper") Core networking related, XDP: 6ce96ca348a9 ("bpf: add XDP_TX xdp_action for direct forwarding") 6a773a15a1e8 ("bpf: add XDP prog type for early driver filter") 13c5c240f789 ("bpf: add bpf_get_hash_recalc helper") d2485c4242a8 ("bpf: add bpf_skb_change_type helper") 6578171a7ff0 ("bpf: add bpf_skb_change_proto helper") Changes detected by the tools build system: $ make -C tools/perf O=/tmp/build/perf install-bin make: Entering directory '/home/acme/git/linux/tools/perf' BUILD: Doing 'make -j4' parallel build Warning: tools/include/uapi/linux/bpf.h differs from kernel INSTALL GTK UI CC /tmp/build/perf/bench/mem-memcpy-x86-64-asm.o $ Cc: Adrian Hunter Cc: Alexei Starovoitov Cc: Brenden Blanco Cc: Daniel Borkmann Cc: David Ahern Cc: David S. Miller Cc: Jiri Olsa Cc: Martin KaFai Lau Cc: Namhyung Kim Cc: Sargun Dhillon Cc: Wang Nan Link: http://lkml.kernel.org/n/tip-difq4ts1xvww6eyfs9e7zlft@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 406459b..da218fe 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -84,6 +84,7 @@ enum bpf_map_type { BPF_MAP_TYPE_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_ARRAY, BPF_MAP_TYPE_STACK_TRACE, + BPF_MAP_TYPE_CGROUP_ARRAY, }; enum bpf_prog_type { @@ -93,6 +94,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_SCHED_CLS, BPF_PROG_TYPE_SCHED_ACT, BPF_PROG_TYPE_TRACEPOINT, + BPF_PROG_TYPE_XDP, }; #define BPF_PSEUDO_MAP_FD 1 @@ -313,6 +315,66 @@ enum bpf_func_id { */ BPF_FUNC_skb_get_tunnel_opt, BPF_FUNC_skb_set_tunnel_opt, + + /** + * bpf_skb_change_proto(skb, proto, flags) + * Change protocol of the skb. Currently supported is + * v4 -> v6, v6 -> v4 transitions. The helper will also + * resize the skb. eBPF program is expected to fill the + * new headers via skb_store_bytes and lX_csum_replace. + * @skb: pointer to skb + * @proto: new skb->protocol type + * @flags: reserved + * Return: 0 on success or negative error + */ + BPF_FUNC_skb_change_proto, + + /** + * bpf_skb_change_type(skb, type) + * Change packet type of skb. + * @skb: pointer to skb + * @type: new skb->pkt_type type + * Return: 0 on success or negative error + */ + BPF_FUNC_skb_change_type, + + /** + * bpf_skb_in_cgroup(skb, map, index) - Check cgroup2 membership of skb + * @skb: pointer to skb + * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type + * @index: index of the cgroup in the bpf_map + * Return: + * == 0 skb failed the cgroup2 descendant test + * == 1 skb succeeded the cgroup2 descendant test + * < 0 error + */ + BPF_FUNC_skb_in_cgroup, + + /** + * bpf_get_hash_recalc(skb) + * Retrieve and possibly recalculate skb->hash. + * @skb: pointer to skb + * Return: hash + */ + BPF_FUNC_get_hash_recalc, + + /** + * u64 bpf_get_current_task(void) + * Returns current task_struct + * Return: current + */ + BPF_FUNC_get_current_task, + + /** + * bpf_probe_write_user(void *dst, void *src, int len) + * safely attempt to write to a location + * @dst: destination address in userspace + * @src: source address on stack + * @len: number of bytes to copy + * Return: 0 on success or negative error + */ + BPF_FUNC_probe_write_user, + __BPF_FUNC_MAX_ID, }; @@ -347,9 +409,11 @@ enum bpf_func_id { #define BPF_F_ZERO_CSUM_TX (1ULL << 1) #define BPF_F_DONT_FRAGMENT (1ULL << 2) -/* BPF_FUNC_perf_event_output flags. */ +/* BPF_FUNC_perf_event_output and BPF_FUNC_perf_event_read flags. */ #define BPF_F_INDEX_MASK 0xffffffffULL #define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK +/* BPF_FUNC_perf_event_output for sk_buff input context. */ +#define BPF_F_CTXLEN_MASK (0xfffffULL << 32) /* user accessible mirror of in-kernel sk_buff. * new fields can only be added to the end of this structure @@ -386,4 +450,24 @@ struct bpf_tunnel_key { __u32 tunnel_label; }; +/* User return codes for XDP prog type. + * A valid XDP program must return one of these defined values. All other + * return codes are reserved for future use. Unknown return codes will result + * in packet drop. + */ +enum xdp_action { + XDP_ABORTED = 0, + XDP_DROP, + XDP_PASS, + XDP_TX, +}; + +/* user accessible metadata for XDP packet hook + * new fields must be added to the end of this structure + */ +struct xdp_md { + __u32 data; + __u32 data_end; +}; + #endif /* _UAPI__LINUX_BPF_H__ */ -- cgit v0.10.2 From 840b49ba554b40fa8301ad2716abd2fe3d9e382a Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 9 Aug 2016 11:56:33 -0300 Subject: tools: Sync cpufeatures headers with the kernel Due to: 1e61f78baf89 ("x86/cpufeature: Make sure DISABLED/REQUIRED macros are updated") No changes to tools using those headers (tools/arch/x86/lib/mem{set,cpu}_64.S) seems necessary. Detected by the tools build header drift checker: $ make -C tools/perf O=/tmp/build/perf make: Entering directory '/home/acme/git/linux/tools/perf' BUILD: Doing 'make -j4' parallel build GEN /tmp/build/perf/common-cmds.h Warning: tools/arch/x86/include/asm/disabled-features.h differs from kernel Warning: tools/arch/x86/include/asm/required-features.h differs from kernel Warning: tools/arch/x86/include/asm/cpufeatures.h differs from kernel CC /tmp/build/perf/util/probe-finder.o CC /tmp/build/perf/builtin-help.o ^C$ Cc: Adrian Hunter Cc: Dave Hansen Cc: David Ahern Cc: Jiri Olsa Cc: Namhyung Kim Cc: Wang Nan Link: http://lkml.kernel.org/n/tip-ja75m7zk8j0jkzmrv16i5ehw@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h index 911e935..85599ad 100644 --- a/tools/arch/x86/include/asm/disabled-features.h +++ b/tools/arch/x86/include/asm/disabled-features.h @@ -56,5 +56,7 @@ #define DISABLED_MASK14 0 #define DISABLED_MASK15 0 #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE) +#define DISABLED_MASK17 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h index 4916144..fac9a5c 100644 --- a/tools/arch/x86/include/asm/required-features.h +++ b/tools/arch/x86/include/asm/required-features.h @@ -99,5 +99,7 @@ #define REQUIRED_MASK14 0 #define REQUIRED_MASK15 0 #define REQUIRED_MASK16 0 +#define REQUIRED_MASK17 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ -- cgit v0.10.2 From d820456dc70b231d62171ba46b43db0045e9bd57 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Tue, 9 Aug 2016 01:23:24 -0500 Subject: perf probe: Add function to post process kernel trace events Instead of inline code, introduce function to post process kernel probe trace events. Signed-off-by: Ravi Bangoria Acked-by: Masami Hiramatsu Cc: Alexander Shishkin Cc: Ananth N Mavinakayanahalli Cc: Balbir Singh Cc: Namhyung Kim Cc: Naveen N. Rao Cc: Peter Zijlstra Cc: Wang Nan Link: http://lkml.kernel.org/r/1470723805-5081-1-git-send-email-ravi.bangoria@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 1201f73..234fbfb 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -666,22 +666,14 @@ static int add_module_to_probe_trace_events(struct probe_trace_event *tevs, return ret; } -/* Post processing the probe events */ -static int post_process_probe_trace_events(struct probe_trace_event *tevs, - int ntevs, const char *module, - bool uprobe) +static int +post_process_kernel_probe_trace_events(struct probe_trace_event *tevs, + int ntevs) { struct ref_reloc_sym *reloc_sym; char *tmp; int i, skipped = 0; - if (uprobe) - return add_exec_to_probe_trace_events(tevs, ntevs, module); - - /* Note that currently ref_reloc_sym based probe is not for drivers */ - if (module) - return add_module_to_probe_trace_events(tevs, ntevs, module); - reloc_sym = kernel_get_ref_reloc_sym(); if (!reloc_sym) { pr_warning("Relocated base symbol is not found!\n"); @@ -713,6 +705,21 @@ static int post_process_probe_trace_events(struct probe_trace_event *tevs, return skipped; } +/* Post processing the probe events */ +static int post_process_probe_trace_events(struct probe_trace_event *tevs, + int ntevs, const char *module, + bool uprobe) +{ + if (uprobe) + return add_exec_to_probe_trace_events(tevs, ntevs, module); + + if (module) + /* Currently ref_reloc_sym based probe is not for drivers */ + return add_module_to_probe_trace_events(tevs, ntevs, module); + + return post_process_kernel_probe_trace_events(tevs, ntevs); +} + /* Try to find perf_probe_event with debuginfo */ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, struct probe_trace_event **tevs) -- cgit v0.10.2 From 99e608b5954c9e1ebadbf9660b74697d9dfd9f20 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Tue, 9 Aug 2016 01:23:25 -0500 Subject: perf probe ppc64le: Fix probe location when using DWARF Powerpc has Global Entry Point and Local Entry Point for functions. LEP catches call from both the GEP and the LEP. Symbol table of ELF contains GEP and Offset from which we can calculate LEP, but debuginfo does not have LEP info. Currently, perf prioritize symbol table over dwarf to probe on LEP for ppc64le. But when user tries to probe with function parameter, we fall back to using dwarf(i.e. GEP) and when function called via LEP, probe will never hit. For example: $ objdump -d vmlinux ... do_sys_open(): c0000000002eb4a0: e8 00 4c 3c addis r2,r12,232 c0000000002eb4a4: 60 00 42 38 addi r2,r2,96 c0000000002eb4a8: a6 02 08 7c mflr r0 c0000000002eb4ac: d0 ff 41 fb std r26,-48(r1) $ sudo ./perf probe do_sys_open $ sudo cat /sys/kernel/debug/tracing/kprobe_events p:probe/do_sys_open _text+3060904 $ sudo ./perf probe 'do_sys_open filename:string' $ sudo cat /sys/kernel/debug/tracing/kprobe_events p:probe/do_sys_open _text+3060896 filename_string=+0(%gpr4):string For second case, perf probed on GEP. So when function will be called via LEP, probe won't hit. $ sudo ./perf record -a -e probe:do_sys_open ls [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.195 MB perf.data ] To resolve this issue, let's not prioritize symbol table, let perf decide what it wants to use. Perf is already converting GEP to LEP when it uses symbol table. When perf uses debuginfo, let it find LEP offset form symbol table. This way we fall back to probe on LEP for all cases. After patch: $ sudo ./perf probe 'do_sys_open filename:string' $ sudo cat /sys/kernel/debug/tracing/kprobe_events p:probe/do_sys_open _text+3060904 filename_string=+0(%gpr4):string $ sudo ./perf record -a -e probe:do_sys_open ls [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.197 MB perf.data (11 samples) ] Signed-off-by: Ravi Bangoria Acked-by: Masami Hiramatsu Cc: Alexander Shishkin Cc: Ananth N Mavinakayanahalli Cc: Balbir Singh Cc: Namhyung Kim Cc: Naveen N. Rao Cc: Peter Zijlstra Cc: Wang Nan Link: http://lkml.kernel.org/r/1470723805-5081-2-git-send-email-ravi.bangoria@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c index c6d0f91..8d4dc97 100644 --- a/tools/perf/arch/powerpc/util/sym-handling.c +++ b/tools/perf/arch/powerpc/util/sym-handling.c @@ -54,10 +54,6 @@ int arch__compare_symbol_names(const char *namea, const char *nameb) #endif #if defined(_CALL_ELF) && _CALL_ELF == 2 -bool arch__prefers_symtab(void) -{ - return true; -} #ifdef HAVE_LIBELF_SUPPORT void arch__sym_update(struct symbol *s, GElf_Sym *sym) @@ -100,4 +96,27 @@ void arch__fix_tev_from_maps(struct perf_probe_event *pev, tev->point.offset += lep_offset; } } + +void arch__post_process_probe_trace_events(struct perf_probe_event *pev, + int ntevs) +{ + struct probe_trace_event *tev; + struct map *map; + struct symbol *sym = NULL; + struct rb_node *tmp; + int i = 0; + + map = get_target_map(pev->target, pev->uprobes); + if (!map || map__load(map, NULL) < 0) + return; + + for (i = 0; i < ntevs; i++) { + tev = &pev->tevs[i]; + map__for_each_symbol(map, sym, tmp) { + if (map->unmap_ip(map, sym->start) == tev->point.address) + arch__fix_tev_from_maps(pev, tev, map, sym); + } + } +} + #endif diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 234fbfb..2873396 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -180,7 +180,7 @@ static struct map *kernel_get_module_map(const char *module) return NULL; } -static struct map *get_target_map(const char *target, bool user) +struct map *get_target_map(const char *target, bool user) { /* Init maps of given executable or kernel */ if (user) @@ -705,19 +705,32 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs, return skipped; } +void __weak +arch__post_process_probe_trace_events(struct perf_probe_event *pev __maybe_unused, + int ntevs __maybe_unused) +{ +} + /* Post processing the probe events */ -static int post_process_probe_trace_events(struct probe_trace_event *tevs, +static int post_process_probe_trace_events(struct perf_probe_event *pev, + struct probe_trace_event *tevs, int ntevs, const char *module, bool uprobe) { - if (uprobe) - return add_exec_to_probe_trace_events(tevs, ntevs, module); + int ret; - if (module) + if (uprobe) + ret = add_exec_to_probe_trace_events(tevs, ntevs, module); + else if (module) /* Currently ref_reloc_sym based probe is not for drivers */ - return add_module_to_probe_trace_events(tevs, ntevs, module); + ret = add_module_to_probe_trace_events(tevs, ntevs, module); + else + ret = post_process_kernel_probe_trace_events(tevs, ntevs); - return post_process_kernel_probe_trace_events(tevs, ntevs); + if (ret >= 0) + arch__post_process_probe_trace_events(pev, ntevs); + + return ret; } /* Try to find perf_probe_event with debuginfo */ @@ -758,7 +771,7 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, if (ntevs > 0) { /* Succeeded to find trace events */ pr_debug("Found %d probe_trace_events.\n", ntevs); - ret = post_process_probe_trace_events(*tevs, ntevs, + ret = post_process_probe_trace_events(pev, *tevs, ntevs, pev->target, pev->uprobes); if (ret < 0 || ret == ntevs) { clear_probe_trace_events(*tevs, ntevs); @@ -2945,8 +2958,6 @@ errout: return err; } -bool __weak arch__prefers_symtab(void) { return false; } - /* Concatinate two arrays */ static void *memcat(void *a, size_t sz_a, void *b, size_t sz_b) { @@ -3167,12 +3178,6 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev, if (ret > 0 || pev->sdt) /* SDT can be found only in the cache */ return ret == 0 ? -ENOENT : ret; /* Found in probe cache */ - if (arch__prefers_symtab() && !perf_probe_event_need_dwarf(pev)) { - ret = find_probe_trace_events_from_map(pev, tevs); - if (ret > 0) - return ret; /* Found in symbol table */ - } - /* Convert perf_probe_event with debuginfo */ ret = try_to_find_probe_trace_events(pev, tevs); if (ret != 0) diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h index e18ea9f..f4f45db 100644 --- a/tools/perf/util/probe-event.h +++ b/tools/perf/util/probe-event.h @@ -158,7 +158,6 @@ int show_line_range(struct line_range *lr, const char *module, bool user); int show_available_vars(struct perf_probe_event *pevs, int npevs, struct strfilter *filter); int show_available_funcs(const char *module, struct strfilter *filter, bool user); -bool arch__prefers_symtab(void); void arch__fix_tev_from_maps(struct perf_probe_event *pev, struct probe_trace_event *tev, struct map *map, struct symbol *sym); @@ -173,4 +172,9 @@ int e_snprintf(char *str, size_t size, const char *format, ...) int copy_to_probe_trace_arg(struct probe_trace_arg *tvar, struct perf_probe_arg *pvar); +struct map *get_target_map(const char *target, bool user); + +void arch__post_process_probe_trace_events(struct perf_probe_event *pev, + int ntevs); + #endif /*_PROBE_EVENT_H */ -- cgit v0.10.2 From d8734849d8007dacaa40b31ba7319ed28077141d Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Mon, 8 Aug 2016 15:24:02 +0200 Subject: rbd: nuke the 32-bit pool id check ceph_file_layout::pool_id is now s64. rbd_add_get_pool_id() and ceph_pg_poolid_by_name() both return an int, so it's bogus anyway. Signed-off-by: Ilya Dryomov Reviewed-by: Alex Elder diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 07668a6..6c6519f 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -5337,15 +5337,6 @@ static ssize_t do_rbd_add(struct bus_type *bus, } spec->pool_id = (u64)rc; - /* The ceph file layout needs to fit pool id in 32 bits */ - - if (spec->pool_id > (u64)U32_MAX) { - rbd_warn(NULL, "pool id too large (%llu > %u)", - (unsigned long long)spec->pool_id, U32_MAX); - rc = -EIO; - goto err_out_client; - } - rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts); if (!rbd_dev) { rc = -ENOMEM; -- cgit v0.10.2 From 4eacd4cb3a7c4794688ef4fad5d01f3a532a58e9 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Tue, 9 Aug 2016 16:12:09 +0200 Subject: ceph: initialize pathbase in the !dentry case in encode_caps_cb() pathbase is the base inode; set it to 0 if we've got no path. Coverity-id: 146348 Signed-off-by: Ilya Dryomov Reviewed-by: Alex Elder diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index fa59a85..f72d4ae 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2759,6 +2759,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, } else { path = NULL; pathlen = 0; + pathbase = 0; } spin_lock(&ci->i_ceph_lock); -- cgit v0.10.2 From 3ec60043f7c02e1f79e4a90045ff2d2e80042941 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Tue, 9 Aug 2016 16:23:17 +0100 Subject: iommu/dma: Don't put uninitialised IOVA domains Due to the limitations of having to wait until we see a device's DMA restrictions before we know how we want an IOVA domain initialised, there is a window for error if a DMA ops domain is allocated but later freed without ever being used. In that case, init_iova_domain() was never called, so calling put_iova_domain() from iommu_put_dma_cookie() ends up trying to take an uninitialised lock and crashing. Make things robust by skipping the call unless the IOVA domain actually has been initialised, as we probably should have done from the start. Fixes: 0db2e5d18f76 ("iommu: Implement common IOMMU ops for DMA mapping") Cc: stable@vger.kernel.org Reported-by: Nate Watterson Reviewed-by: Nate Watterson Tested-by: Nate Watterson Reviewed-by: Eric Auger Tested-by: Eric Auger Signed-off-by: Robin Murphy Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 08a1e2f..7d991c8 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -68,7 +68,8 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) if (!iovad) return; - put_iova_domain(iovad); + if (iovad->granule) + put_iova_domain(iovad); kfree(iovad); domain->iova_cookie = NULL; } -- cgit v0.10.2 From 17b963e319449f709e78dc1ef445d797a13eecbc Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 8 Aug 2016 13:06:41 +0100 Subject: rxrpc: Need to flag call as being released on connect failure If rxrpc_new_client_call() fails to make a connection, the call record that it allocated needs to be marked as RXRPC_CALL_RELEASED before it is passed to rxrpc_put_call() to indicate that it no longer has any attachment to the AF_RXRPC socket. Without this, an assertion failure may occur at: net/rxrpc/call_object:635 Signed-off-by: David Howells diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index e8c953c..ae057e0 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -275,6 +275,7 @@ error: list_del_init(&call->link); write_unlock_bh(&rxrpc_call_lock); + set_bit(RXRPC_CALL_RELEASED, &call->flags); call->state = RXRPC_CALL_DEAD; rxrpc_put_call(call); _leave(" = %d", ret); @@ -287,6 +288,7 @@ error: */ found_user_ID_now_present: write_unlock(&rx->call_lock); + set_bit(RXRPC_CALL_RELEASED, &call->flags); call->state = RXRPC_CALL_DEAD; rxrpc_put_call(call); _leave(" = -EEXIST [%p]", call); -- cgit v0.10.2 From f9dc575725baeaad8eb5262589f9aebeeaf13433 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 8 Aug 2016 10:27:26 +0100 Subject: rxrpc: Don't access connection from call if pointer is NULL The call state machine processor sets up the message parameters for a UDP message that it might need to transmit in advance on the basis that there's a very good chance it's going to have to transmit either an ACK or an ABORT. This requires it to look in the connection struct to retrieve some of the parameters. However, if the call is complete, the call connection pointer may be NULL to dissuade the processor from transmitting a message. However, there are some situations where the processor is still going to be called - and it's still going to set up message parameters whether it needs them or not. This results in a NULL pointer dereference at: net/rxrpc/call_event.c:837 To fix this, skip the message pre-initialisation if there's no connection attached. Signed-off-by: David Howells diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index f5e9916..e60cf65 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -837,6 +837,9 @@ void rxrpc_process_call(struct work_struct *work) return; } + if (!call->conn) + goto skip_msg_init; + /* there's a good chance we're going to have to send a message, so set * one up in advance */ msg.msg_name = &call->conn->params.peer->srx.transport; @@ -859,6 +862,7 @@ void rxrpc_process_call(struct work_struct *work) memset(iov, 0, sizeof(iov)); iov[0].iov_base = &whdr; iov[0].iov_len = sizeof(whdr); +skip_msg_init: /* deal with events of a final nature */ if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) { -- cgit v0.10.2 From 2e7e9758b2b680fa615d5cf70b7af416e96162d2 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 9 Aug 2016 10:11:48 +0100 Subject: rxrpc: Once packet posted in data_ready, don't retry posting Once a packet has been posted to a connection in the data_ready handler, we mustn't try reposting if we then find that the connection is dying as the refcount has been given over to the dying connection and the packet might no longer exist. Losing the packet isn't a problem as the peer will retransmit. Signed-off-by: David Howells diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 9e0f58e..04afdc0 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -567,13 +567,13 @@ done: * post connection-level events to the connection * - this includes challenges, responses and some aborts */ -static bool rxrpc_post_packet_to_conn(struct rxrpc_connection *conn, +static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn, struct sk_buff *skb) { _enter("%p,%p", conn, skb); skb_queue_tail(&conn->rx_queue, skb); - return rxrpc_queue_conn(conn); + rxrpc_queue_conn(conn); } /* @@ -694,7 +694,6 @@ void rxrpc_data_ready(struct sock *sk) rcu_read_lock(); -retry_find_conn: conn = rxrpc_find_connection_rcu(local, skb); if (!conn) goto cant_route_call; @@ -702,8 +701,7 @@ retry_find_conn: if (sp->hdr.callNumber == 0) { /* Connection-level packet */ _debug("CONN %p {%d}", conn, conn->debug_id); - if (!rxrpc_post_packet_to_conn(conn, skb)) - goto retry_find_conn; + rxrpc_post_packet_to_conn(conn, skb); } else { /* Call-bound packets are routed by connection channel. */ unsigned int channel = sp->hdr.cid & RXRPC_CHANNELMASK; -- cgit v0.10.2 From 50fd85a1f94753b86a7f540794dfd4f799e81ac2 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 9 Aug 2016 11:30:43 +0100 Subject: rxrpc: Fix a use-after-push in data_ready handler Fix a use of a packet after it has been enqueued onto the packet processing queue in the data_ready handler. Once on a call's Rx queue, we mustn't touch it any more as it may be dequeued and freed by the call processor running on a work queue. Save the values we need before enqueuing. Without this, we can get an oops like the following: BUG: unable to handle kernel NULL pointer dereference at 000000000000009c IP: [] rxrpc_fast_process_packet+0x724/0xa11 [af_rxrpc] PGD 0 Oops: 0000 [#1] SMP Modules linked in: kafs(E) af_rxrpc(E) [last unloaded: af_rxrpc] CPU: 2 PID: 0 Comm: swapper/2 Tainted: G E 4.7.0-fsdevel+ #1336 Hardware name: ASUS All Series/H97-PLUS, BIOS 2306 10/09/2014 task: ffff88040d6863c0 task.stack: ffff88040d68c000 RIP: 0010:[] [] rxrpc_fast_process_packet+0x724/0xa11 [af_rxrpc] RSP: 0018:ffff88041fb03a78 EFLAGS: 00010246 RAX: ffffffffffffffff RBX: ffff8803ff195b00 RCX: 0000000000000001 RDX: ffffffffa01854d1 RSI: 0000000000000008 RDI: ffff8803ff195b00 RBP: ffff88041fb03ab0 R08: 0000000000000000 R09: 0000000000000001 R10: ffff88041fb038c8 R11: 0000000000000000 R12: ffff880406874800 R13: 0000000000000001 R14: 0000000000000000 R15: 0000000000000000 FS: 0000000000000000(0000) GS:ffff88041fb00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000000000000009c CR3: 0000000001c14000 CR4: 00000000001406e0 Stack: ffff8803ff195ea0 ffff880408348800 ffff880406874800 ffff8803ff195b00 ffff880408348800 ffff8803ff195ed8 0000000000000000 ffff88041fb03af0 ffffffffa0186072 0000000000000000 ffff8804054da000 0000000000000000 Call Trace: [] rxrpc_data_ready+0x89d/0xbae [af_rxrpc] [] __sock_queue_rcv_skb+0x24c/0x2b2 [] __udp_queue_rcv_skb+0x4b/0x1bd [] udp_queue_rcv_skb+0x281/0x4db [] __udp4_lib_rcv+0x7ed/0x963 [] udp_rcv+0x15/0x17 [] ip_local_deliver_finish+0x1c3/0x318 [] ip_local_deliver+0xbb/0xc4 [] ? inet_del_offload+0x40/0x40 [] ip_rcv_finish+0x3ce/0x42c [] ip_rcv+0x304/0x33d [] ? ip_local_deliver_finish+0x318/0x318 [] __netif_receive_skb_core+0x601/0x6e8 [] __netif_receive_skb+0x13/0x54 [] netif_receive_skb_internal+0xbb/0x17c [] napi_gro_receive+0xf9/0x1bd [] rtl8169_poll+0x32b/0x4a8 [] net_rx_action+0xe8/0x357 [] __do_softirq+0x1aa/0x414 [] irq_exit+0x3d/0xb0 [] do_IRQ+0xe4/0xfc [] common_interrupt+0x93/0x93 [] ? cpuidle_enter_state+0x1ad/0x2be [] ? cpuidle_enter_state+0x1a8/0x2be [] cpuidle_enter+0x12/0x14 [] call_cpuidle+0x39/0x3b [] cpu_startup_entry+0x230/0x35d [] start_secondary+0xf4/0xf7 Signed-off-by: David Howells diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 04afdc0..7897190 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -124,11 +124,15 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call, struct rxrpc_skb_priv *sp; bool terminal; int ret, ackbit, ack; + u32 serial; + u8 flags; _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq); sp = rxrpc_skb(skb); ASSERTCMP(sp->call, ==, NULL); + flags = sp->hdr.flags; + serial = sp->hdr.serial; spin_lock(&call->lock); @@ -192,8 +196,8 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call, sp->call = call; rxrpc_get_call(call); atomic_inc(&call->skb_count); - terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) && - !(sp->hdr.flags & RXRPC_CLIENT_INITIATED)); + terminal = ((flags & RXRPC_LAST_PACKET) && + !(flags & RXRPC_CLIENT_INITIATED)); ret = rxrpc_queue_rcv_skb(call, skb, false, terminal); if (ret < 0) { if (ret == -ENOMEM || ret == -ENOBUFS) { @@ -205,12 +209,13 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call, } skb = NULL; + sp = NULL; _debug("post #%u", seq); ASSERTCMP(call->rx_data_post, ==, seq); call->rx_data_post++; - if (sp->hdr.flags & RXRPC_LAST_PACKET) + if (flags & RXRPC_LAST_PACKET) set_bit(RXRPC_CALL_RCVD_LAST, &call->flags); /* if we've reached an out of sequence packet then we need to drain @@ -226,7 +231,7 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call, spin_unlock(&call->lock); atomic_inc(&call->ackr_not_idle); - rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, sp->hdr.serial, false); + rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false); _leave(" = 0 [posted]"); return 0; @@ -239,7 +244,7 @@ out: discard_and_ack: _debug("discard and ACK packet %p", skb); - __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true); + __rxrpc_propose_ACK(call, ack, serial, true); discard: spin_unlock(&call->lock); rxrpc_free_skb(skb); @@ -247,7 +252,7 @@ discard: return 0; enqueue_and_ack: - __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true); + __rxrpc_propose_ACK(call, ack, serial, true); enqueue_packet: _net("defer skb %p", skb); spin_unlock(&call->lock); -- cgit v0.10.2 From 992c273af9d9f12a2e470731f69bb3baa694562b Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 9 Aug 2016 16:58:42 +0100 Subject: rxrpc: Free packets discarded in data_ready Under certain conditions, the data_ready handler will discard a packet. These need to be freed. Signed-off-by: David Howells diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 7897190..70bb778 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -744,6 +744,8 @@ cant_route_call: if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) { _debug("reject type %d",sp->hdr.type); rxrpc_reject_packet(local, skb); + } else { + rxrpc_free_skb(skb); } _leave(" [no call]"); return; -- cgit v0.10.2 From a026bb12cc57d758e045126a252e12e868076cb4 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Wed, 3 Aug 2016 18:08:55 +0100 Subject: drivers/perf: arm-pmu: convert arm_pmu_mutex to spinlock arm_pmu_mutex is never held long and we don't want to sleep while the lock is being held as it's executed in the context of hotplug notifiers. So it can be converted to a simple spinlock instead. Without this patch we get the following warning: BUG: sleeping function called from invalid context at kernel/locking/mutex.c:620 in_atomic(): 1, irqs_disabled(): 128, pid: 0, name: swapper/2 no locks held by swapper/2/0. irq event stamp: 381314 hardirqs last enabled at (381313): _raw_spin_unlock_irqrestore+0x7c/0x88 hardirqs last disabled at (381314): cpu_die+0x28/0x48 softirqs last enabled at (381294): _local_bh_enable+0x28/0x50 softirqs last disabled at (381293): irq_enter+0x58/0x78 CPU: 2 PID: 0 Comm: swapper/2 Not tainted 4.7.0 #12 Call trace: dump_backtrace+0x0/0x220 show_stack+0x24/0x30 dump_stack+0xb4/0xf0 ___might_sleep+0x1d8/0x1f0 __might_sleep+0x5c/0x98 mutex_lock_nested+0x54/0x400 arm_perf_starting_cpu+0x34/0xb0 cpuhp_invoke_callback+0x88/0x3d8 notify_cpu_starting+0x78/0x98 secondary_start_kernel+0x108/0x1a8 This patch converts the mutex to spinlock to eliminate the above warnings. This constraints pmu->reset to be non-blocking call which is the case with all the ARM PMU backends. Cc: Stephen Boyd Fixes: 37b502f121ad ("arm/perf: Fix hotplug state machine conversion") Acked-by: Mark Rutland Signed-off-by: Sudeep Holla Signed-off-by: Will Deacon diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 6ccb994..4c9a537 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -688,7 +688,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) return 0; } -static DEFINE_MUTEX(arm_pmu_mutex); +static DEFINE_SPINLOCK(arm_pmu_lock); static LIST_HEAD(arm_pmu_list); /* @@ -701,7 +701,7 @@ static int arm_perf_starting_cpu(unsigned int cpu) { struct arm_pmu *pmu; - mutex_lock(&arm_pmu_mutex); + spin_lock(&arm_pmu_lock); list_for_each_entry(pmu, &arm_pmu_list, entry) { if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) @@ -709,7 +709,7 @@ static int arm_perf_starting_cpu(unsigned int cpu) if (pmu->reset) pmu->reset(pmu); } - mutex_unlock(&arm_pmu_mutex); + spin_unlock(&arm_pmu_lock); return 0; } @@ -821,9 +821,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) if (!cpu_hw_events) return -ENOMEM; - mutex_lock(&arm_pmu_mutex); + spin_lock(&arm_pmu_lock); list_add_tail(&cpu_pmu->entry, &arm_pmu_list); - mutex_unlock(&arm_pmu_mutex); + spin_unlock(&arm_pmu_lock); err = cpu_pm_pmu_register(cpu_pmu); if (err) @@ -859,9 +859,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) return 0; out_unregister: - mutex_lock(&arm_pmu_mutex); + spin_lock(&arm_pmu_lock); list_del(&cpu_pmu->entry); - mutex_unlock(&arm_pmu_mutex); + spin_unlock(&arm_pmu_lock); free_percpu(cpu_hw_events); return err; } @@ -869,9 +869,9 @@ out_unregister: static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) { cpu_pm_pmu_unregister(cpu_pmu); - mutex_lock(&arm_pmu_mutex); + spin_lock(&arm_pmu_lock); list_del(&cpu_pmu->entry); - mutex_unlock(&arm_pmu_mutex); + spin_unlock(&arm_pmu_lock); free_percpu(cpu_pmu->hw_events); } -- cgit v0.10.2 From 7f1d642fbb5c356519617c24757a0cbed7f800a8 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 19 Jul 2016 15:39:02 +0100 Subject: drivers/perf: arm-pmu: Fix handling of SPI lacking "interrupt-affinity" property Patch 19a469a58720 ("drivers/perf: arm-pmu: Handle per-interrupt affinity mask") added support for partitionned PPI setups, but inadvertently broke setups using SPIs without the "interrupt-affinity" property (which is the case for UP platforms). This patch restore the broken functionnality by testing whether the interrupt is percpu or not instead of relying on the using_spi flag that really means "SPI *and* interrupt-affinity property". Acked-by: Mark Rutland Reported-by: Geert Uytterhoeven Tested-by: Geert Uytterhoeven Fixes: 19a469a58720 ("drivers/perf: arm-pmu: Handle per-interrupt affinity mask") Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 4c9a537..c494613 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -967,11 +967,12 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu) /* If we didn't manage to parse anything, try the interrupt affinity */ if (cpumask_weight(&pmu->supported_cpus) == 0) { - if (!using_spi) { + int irq = platform_get_irq(pdev, 0); + + if (irq_is_percpu(irq)) { /* If using PPIs, check the affinity of the partition */ - int ret, irq; + int ret; - irq = platform_get_irq(pdev, 0); ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); if (ret) { kfree(irqs); -- cgit v0.10.2 From c4159a75b64c0e67caededf4d7372c1b58a5f42a Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Mon, 8 Aug 2016 23:03:12 +0300 Subject: mm: memcontrol: only mark charged pages with PageKmemcg To distinguish non-slab pages charged to kmemcg we mark them PageKmemcg, which sets page->_mapcount to -512. Currently, we set/clear PageKmemcg in __alloc_pages_nodemask()/free_pages_prepare() for any page allocated with __GFP_ACCOUNT, including those that aren't actually charged to any cgroup, i.e. allocated from the root cgroup context. To avoid overhead in case cgroups are not used, we only do that if memcg_kmem_enabled() is true. The latter is set iff there are kmem-enabled memory cgroups (online or offline). The root cgroup is not considered kmem-enabled. As a result, if a page is allocated with __GFP_ACCOUNT for the root cgroup when there are kmem-enabled memory cgroups and is freed after all kmem-enabled memory cgroups were removed, e.g. # no memory cgroups has been created yet, create one mkdir /sys/fs/cgroup/memory/test # run something allocating pages with __GFP_ACCOUNT, e.g. # a program using pipe dmesg | tail # remove the memory cgroup rmdir /sys/fs/cgroup/memory/test we'll get bad page state bug complaining about page->_mapcount != -1: BUG: Bad page state in process swapper/0 pfn:1fd945c page:ffffea007f651700 count:0 mapcount:-511 mapping: (null) index:0x0 flags: 0x1000000000000000() To avoid that, let's mark with PageKmemcg only those pages that are actually charged to and hence pin a non-root memory cgroup. Fixes: 4949148ad433 ("mm: charge/uncharge kmemcg from generic page allocator paths") Reported-and-tested-by: Eric Dumazet Signed-off-by: Vladimir Davydov Signed-off-by: Linus Torvalds diff --git a/fs/pipe.c b/fs/pipe.c index 4b32928..4ebe6b2 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -144,10 +144,8 @@ static int anon_pipe_buf_steal(struct pipe_inode_info *pipe, struct page *page = buf->page; if (page_count(page) == 1) { - if (memcg_kmem_enabled()) { + if (memcg_kmem_enabled()) memcg_kmem_uncharge(page, 0); - __ClearPageKmemcg(page); - } __SetPageLocked(page); return 0; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 66beca1..e74d708 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2337,8 +2337,11 @@ int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) return 0; memcg = get_mem_cgroup_from_mm(current->mm); - if (!mem_cgroup_is_root(memcg)) + if (!mem_cgroup_is_root(memcg)) { ret = memcg_kmem_charge_memcg(page, gfp, order, memcg); + if (!ret) + __SetPageKmemcg(page); + } css_put(&memcg->css); return ret; } @@ -2365,6 +2368,11 @@ void memcg_kmem_uncharge(struct page *page, int order) page_counter_uncharge(&memcg->memsw, nr_pages); page->mem_cgroup = NULL; + + /* slab pages do not have PageKmemcg flag set */ + if (PageKmemcg(page)) + __ClearPageKmemcg(page); + css_put_many(&memcg->css, nr_pages); } #endif /* !CONFIG_SLOB */ @@ -5537,8 +5545,10 @@ static void uncharge_list(struct list_head *page_list) else nr_file += nr_pages; pgpgout++; - } else + } else { nr_kmem += 1 << compound_order(page); + __ClearPageKmemcg(page); + } page->mem_cgroup = NULL; } while (next != page_list); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index fb975ce..ee744fa 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1008,10 +1008,8 @@ static __always_inline bool free_pages_prepare(struct page *page, } if (PageMappingFlags(page)) page->mapping = NULL; - if (memcg_kmem_enabled() && PageKmemcg(page)) { + if (memcg_kmem_enabled() && PageKmemcg(page)) memcg_kmem_uncharge(page, order); - __ClearPageKmemcg(page); - } if (check_free) bad += free_pages_check(page); if (bad) @@ -3756,12 +3754,10 @@ no_zone: } out: - if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page) { - if (unlikely(memcg_kmem_charge(page, gfp_mask, order))) { - __free_pages(page, order); - page = NULL; - } else - __SetPageKmemcg(page); + if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page && + unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) { + __free_pages(page, order); + page = NULL; } if (kmemcheck_enabled && page) -- cgit v0.10.2 From a3d1ddd932bc86f0f7027079754ab0aefd259109 Mon Sep 17 00:00:00 2001 From: Brian King Date: Mon, 8 Aug 2016 17:53:12 -0500 Subject: ipr: Fix sync scsi scan Commit b195d5e2bffd ("ipr: Wait to do async scan until scsi host is initialized") fixed async scan for ipr, but broke sync scan for ipr. This fixes sync scan back up. Signed-off-by: Brian King Reported-and-tested-by: Michael Ellerman Signed-off-by: Linus Torvalds diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index bf85974..17d04c7 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -10410,8 +10410,11 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) __ipr_remove(pdev); return rc; } + spin_lock_irqsave(ioa_cfg->host->host_lock, flags); + ioa_cfg->scan_enabled = 1; + schedule_work(&ioa_cfg->work_q); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); - scsi_scan_host(ioa_cfg->host); ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { @@ -10421,10 +10424,8 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) } } - spin_lock_irqsave(ioa_cfg->host->host_lock, flags); - ioa_cfg->scan_enabled = 1; - schedule_work(&ioa_cfg->work_q); - spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); + scsi_scan_host(ioa_cfg->host); + return 0; } -- cgit v0.10.2 From beab47d55b883e85bec7771a899ac791c6f92c80 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Tue, 19 Jul 2016 22:42:22 +0200 Subject: of: Delete an unnecessary check before the function call "of_node_put" The of_node_put() function tests whether its argument is NULL and then returns immediately. Thus the test around the call is not needed. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Signed-off-by: Rob Herring diff --git a/drivers/of/base.c b/drivers/of/base.c index 7792266..cb255a0 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -1631,8 +1631,7 @@ static int __of_parse_phandle_with_args(const struct device_node *np, */ err: - if (it.node) - of_node_put(it.node); + of_node_put(it.node); return rc; } -- cgit v0.10.2 From 89c67752ae4dedc1244344b266c837ddb4053ebd Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Mon, 1 Aug 2016 17:17:53 +1000 Subject: drivers/of: Validate device node in __unflatten_device_tree() @mynodes is set to NULL when __unflatten_device_tree() is called to unflatten device sub-tree in PCI hot add scenario on PowerPC PowerNV platform. Marking @mynodes detached unconditionally causes kernel crash as below backtrace shows: Unable to handle kernel paging request for data at address 0x00000000 Faulting instruction address: 0xc000000000b26f64 cpu 0x0: Vector: 300 (Data Access) at [c000003fcc7cf740] pc: c000000000b26f64: __unflatten_device_tree+0xf4/0x190 lr: c000000000b26f40: __unflatten_device_tree+0xd0/0x190 sp: c000003fcc7cf9c0 msr: 900000000280b033 dar: 0 dsisr: 40000000 current = 0xc000003fcc281680 paca = 0xc00000000ff00000 softe: 0 irq_happened: 0x01 pid = 2724, comm = sh Linux version 4.7.0-gavin-07754-g92a6836 (gwshan@gwshan) (gcc version \ 4.9.3 (Buildroot 2016.02-rc2-00093-g5ea3bce) ) #539 SMP Mon Aug 1 \ 12:40:29 AEST 2016 enter ? for help [c000003fcc7cfa50] c000000000b27060 of_fdt_unflatten_tree+0x60/0x90 [c000003fcc7cfaa0] c0000000004c6288 pnv_php_set_slot_power_state+0x118/0x440 [c000003fcc7cfb80] c0000000004c6a10 pnv_php_enable+0xc0/0x170 [c000003fcc7cfbd0] c0000000004c4d80 power_write_file+0xa0/0x190 [c000003fcc7cfc50] c0000000004be93c pci_slot_attr_store+0x3c/0x60 [c000003fcc7cfc70] c0000000002d3fd4 sysfs_kf_write+0x94/0xc0 [c000003fcc7cfcb0] c0000000002d2c30 kernfs_fop_write+0x180/0x260 [c000003fcc7cfd00] c000000000230fe0 __vfs_write+0x40/0x190 [c000003fcc7cfd90] c000000000232278 vfs_write+0xc8/0x240 [c000003fcc7cfde0] c000000000233d90 SyS_write+0x60/0x110 [c000003fcc7cfe30] c000000000009524 system_call+0x38/0x108 This avoids the kernel crash by marking @mynodes detached only when @mynodes is dereferencing valid device node in __unflatten_device_tree(). Fixes: 1d1bde550ea3 ("of: fdt: mark unflattened tree as detached") Reported-by: Meng Li Signed-off-by: Gavin Shan Signed-off-by: Rob Herring diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 55f1b83..085c638 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -517,7 +517,7 @@ static void *__unflatten_device_tree(const void *blob, pr_warning("End of tree marker overwritten: %08x\n", be32_to_cpup(mem + size)); - if (detached) { + if (detached && mynodes) { of_node_set_flag(*mynodes, OF_DETACHED); pr_debug("unflattened tree is detached\n"); } -- cgit v0.10.2 From e55aeb6ba4e8cc3549bff1e75ea1d029324bce21 Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Tue, 9 Aug 2016 16:18:51 +0200 Subject: of/irq: Mark interrupt controllers as populated before initialisation That way the init callback may clear the flag again, in case of drivers split between early irq chip and a normal platform driver. Fixes: 15cc2ed6dcf9 ("of/irq: Mark initialised interrupt controllers as populated") Suggested-by: Rob Herring Signed-off-by: Philipp Zabel Acked-by: Jon Hunter Signed-off-by: Rob Herring diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 89a71c6..a2e68f7 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -544,12 +544,15 @@ void __init of_irq_init(const struct of_device_id *matches) list_del(&desc->list); + of_node_set_flag(desc->dev, OF_POPULATED); + pr_debug("of_irq_init: init %s (%p), parent %p\n", desc->dev->full_name, desc->dev, desc->interrupt_parent); ret = desc->irq_init_cb(desc->dev, desc->interrupt_parent); if (ret) { + of_node_clear_flag(desc->dev, OF_POPULATED); kfree(desc); continue; } @@ -559,8 +562,6 @@ void __init of_irq_init(const struct of_device_id *matches) * its children can get processed in a subsequent pass. */ list_add_tail(&desc->list, &intc_parent_list); - - of_node_set_flag(desc->dev, OF_POPULATED); } /* Get the next pending parent that might have children */ -- cgit v0.10.2 From 255c0397bc402e3fcc8833c214f49b2108f04d1a Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Tue, 9 Aug 2016 16:18:52 +0200 Subject: ARM: imx6: mark GPC node as not populated after irq init to probe pm domain driver Since IRQCHIP_DECLARE now flags the GPC node as already populated, the GPC power domain driver is never probed unless we clear the flag again. Fixes: 15cc2ed6dcf9 ("of/irq: Mark initialised interrupt controllers as populated") Suggested-by: Rob Herring Signed-off-by: Philipp Zabel Cc: Jon Hunter Signed-off-by: Rob Herring diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c index fd87205..0df062d 100644 --- a/arch/arm/mach-imx/gpc.c +++ b/arch/arm/mach-imx/gpc.c @@ -271,6 +271,12 @@ static int __init imx_gpc_init(struct device_node *node, for (i = 0; i < IMR_NUM; i++) writel_relaxed(~0, gpc_base + GPC_IMR1 + i * 4); + /* + * Clear the OF_POPULATED flag set in of_irq_init so that + * later the GPC power domain driver will not be skipped. + */ + of_node_clear_flag(node, OF_POPULATED); + return 0; } IRQCHIP_DECLARE(imx_gpc, "fsl,imx6q-gpc", imx_gpc_init); -- cgit v0.10.2 From a0cba2179ea4c1820fce2ee046b6ed90ecc56196 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 9 Aug 2016 10:48:18 -0700 Subject: Revert "printk: create pr_ functions" This reverts commit 874f9c7da9a4acbc1b9e12ca722579fb50e4d142. Geert Uytterhoeven reports: "This change seems to have an (unintendent?) side-effect. Before, pr_*() calls without a trailing newline characters would be printed with a newline character appended, both on the console and in the output of the dmesg command. After this commit, no new line character is appended, and the output of the next pr_*() call of the same type may be appended, like in: - Truncating RAM at 0x0000000040000000-0x00000000c0000000 to -0x0000000070000000 - Ignoring RAM at 0x0000000200000000-0x0000000240000000 (!CONFIG_HIGHMEM) + Truncating RAM at 0x0000000040000000-0x00000000c0000000 to -0x0000000070000000Ignoring RAM at 0x0000000200000000-0x0000000240000000 (!CONFIG_HIGHMEM)" Joe Perches says: "No, that is not intentional. The newline handling code inside vprintk_emit is a bit involved and for now I suggest a revert until this has all the same behavior as earlier" Reported-by: Geert Uytterhoeven Requested-by: Joe Perches Cc: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/include/linux/printk.h b/include/linux/printk.h index 8dc155d..696a56b 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -266,39 +266,21 @@ extern asmlinkage void dump_stack(void) __cold; * and other debug macros are compiled out unless either DEBUG is defined * or CONFIG_DYNAMIC_DEBUG is set. */ - -#ifdef CONFIG_PRINTK - -asmlinkage __printf(1, 2) __cold void __pr_emerg(const char *fmt, ...); -asmlinkage __printf(1, 2) __cold void __pr_alert(const char *fmt, ...); -asmlinkage __printf(1, 2) __cold void __pr_crit(const char *fmt, ...); -asmlinkage __printf(1, 2) __cold void __pr_err(const char *fmt, ...); -asmlinkage __printf(1, 2) __cold void __pr_warn(const char *fmt, ...); -asmlinkage __printf(1, 2) __cold void __pr_notice(const char *fmt, ...); -asmlinkage __printf(1, 2) __cold void __pr_info(const char *fmt, ...); - -#define pr_emerg(fmt, ...) __pr_emerg(pr_fmt(fmt), ##__VA_ARGS__) -#define pr_alert(fmt, ...) __pr_alert(pr_fmt(fmt), ##__VA_ARGS__) -#define pr_crit(fmt, ...) __pr_crit(pr_fmt(fmt), ##__VA_ARGS__) -#define pr_err(fmt, ...) __pr_err(pr_fmt(fmt), ##__VA_ARGS__) -#define pr_warn(fmt, ...) __pr_warn(pr_fmt(fmt), ##__VA_ARGS__) -#define pr_notice(fmt, ...) __pr_notice(pr_fmt(fmt), ##__VA_ARGS__) -#define pr_info(fmt, ...) __pr_info(pr_fmt(fmt), ##__VA_ARGS__) - -#else - -#define pr_emerg(fmt, ...) printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) -#define pr_alert(fmt, ...) printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) -#define pr_crit(fmt, ...) printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) -#define pr_err(fmt, ...) printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) -#define pr_warn(fmt, ...) printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) -#define pr_notice(fmt, ...) printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) -#define pr_info(fmt, ...) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) - -#endif - -#define pr_warning pr_warn - +#define pr_emerg(fmt, ...) \ + printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) +#define pr_alert(fmt, ...) \ + printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) +#define pr_crit(fmt, ...) \ + printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) +#define pr_err(fmt, ...) \ + printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) +#define pr_warning(fmt, ...) \ + printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) +#define pr_warn pr_warning +#define pr_notice(fmt, ...) \ + printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) +#define pr_info(fmt, ...) \ + printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) /* * Like KERN_CONT, pr_cont() should only be used when continuing * a line with no newline ('\n') enclosed. Otherwise it defaults diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h index 5d4505f..7fd2838 100644 --- a/kernel/printk/internal.h +++ b/kernel/printk/internal.h @@ -16,11 +16,9 @@ */ #include -typedef __printf(2, 0) int (*printk_func_t)(int level, const char *fmt, - va_list args); +typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args); -__printf(2, 0) -int vprintk_default(int level, const char *fmt, va_list args); +int __printf(1, 0) vprintk_default(const char *fmt, va_list args); #ifdef CONFIG_PRINTK_NMI @@ -33,10 +31,9 @@ extern raw_spinlock_t logbuf_lock; * via per-CPU variable. */ DECLARE_PER_CPU(printk_func_t, printk_func); -__printf(2, 0) -static inline int vprintk_func(int level, const char *fmt, va_list args) +static inline __printf(1, 0) int vprintk_func(const char *fmt, va_list args) { - return this_cpu_read(printk_func)(level, fmt, args); + return this_cpu_read(printk_func)(fmt, args); } extern atomic_t nmi_message_lost; @@ -47,10 +44,9 @@ static inline int get_nmi_message_lost(void) #else /* CONFIG_PRINTK_NMI */ -__printf(2, 0) -static inline int vprintk_func(int level, const char *fmt, va_list args) +static inline __printf(1, 0) int vprintk_func(const char *fmt, va_list args) { - return vprintk_default(level, fmt, args); + return vprintk_default(fmt, args); } static inline int get_nmi_message_lost(void) diff --git a/kernel/printk/nmi.c b/kernel/printk/nmi.c index bc3eeb1..b69eb8a 100644 --- a/kernel/printk/nmi.c +++ b/kernel/printk/nmi.c @@ -58,7 +58,7 @@ static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq); * one writer running. But the buffer might get flushed from another * CPU, so we need to be careful. */ -static int vprintk_nmi(int level, const char *fmt, va_list args) +static int vprintk_nmi(const char *fmt, va_list args) { struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq); int add = 0; @@ -79,16 +79,7 @@ again: if (!len) smp_rmb(); - if (level != LOGLEVEL_DEFAULT) { - add = snprintf(s->buffer + len, sizeof(s->buffer) - len, - KERN_SOH "%c", '0' + level); - add += vsnprintf(s->buffer + len + add, - sizeof(s->buffer) - len - add, - fmt, args); - } else { - add = vsnprintf(s->buffer + len, sizeof(s->buffer) - len, - fmt, args); - } + add = vsnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args); /* * Do it once again if the buffer has been flushed in the meantime. diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index a37fc8c..eea6dbc 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1930,26 +1930,7 @@ asmlinkage int printk_emit(int facility, int level, } EXPORT_SYMBOL(printk_emit); -#define define_pr_level(func, loglevel) \ -asmlinkage __visible void func(const char *fmt, ...) \ -{ \ - va_list args; \ - \ - va_start(args, fmt); \ - vprintk_default(loglevel, fmt, args); \ - va_end(args); \ -} \ -EXPORT_SYMBOL(func) - -define_pr_level(__pr_emerg, LOGLEVEL_EMERG); -define_pr_level(__pr_alert, LOGLEVEL_ALERT); -define_pr_level(__pr_crit, LOGLEVEL_CRIT); -define_pr_level(__pr_err, LOGLEVEL_ERR); -define_pr_level(__pr_warn, LOGLEVEL_WARNING); -define_pr_level(__pr_notice, LOGLEVEL_NOTICE); -define_pr_level(__pr_info, LOGLEVEL_INFO); - -int vprintk_default(int level, const char *fmt, va_list args) +int vprintk_default(const char *fmt, va_list args) { int r; @@ -1959,7 +1940,7 @@ int vprintk_default(int level, const char *fmt, va_list args) return r; } #endif - r = vprintk_emit(0, level, NULL, 0, fmt, args); + r = vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args); return r; } @@ -1992,7 +1973,7 @@ asmlinkage __visible int printk(const char *fmt, ...) int r; va_start(args, fmt); - r = vprintk_func(LOGLEVEL_DEFAULT, fmt, args); + r = vprintk_func(fmt, args); va_end(args); return r; -- cgit v0.10.2 From a5d0dc810abf3d6b241777467ee1d6efb02575fc Mon Sep 17 00:00:00 2001 From: Lance Richardson Date: Tue, 9 Aug 2016 15:29:42 -0400 Subject: vti: flush x-netns xfrm cache when vti interface is removed When executing the script included below, the netns delete operation hangs with the following message (repeated at 10 second intervals): kernel:unregister_netdevice: waiting for lo to become free. Usage count = 1 This occurs because a reference to the lo interface in the "secure" netns is still held by a dst entry in the xfrm bundle cache in the init netns. Address this problem by garbage collecting the tunnel netns flow cache when a cross-namespace vti interface receives a NETDEV_DOWN notification. A more detailed description of the problem scenario (referencing commands in the script below): (1) ip link add vti_test type vti local 1.1.1.1 remote 1.1.1.2 key 1 The vti_test interface is created in the init namespace. vti_tunnel_init() attaches a struct ip_tunnel to the vti interface's netdev_priv(dev), setting the tunnel net to &init_net. (2) ip link set vti_test netns secure The vti_test interface is moved to the "secure" netns. Note that the associated struct ip_tunnel still has tunnel->net set to &init_net. (3) ip netns exec secure ping -c 4 -i 0.02 -I 192.168.100.1 192.168.200.1 The first packet sent using the vti device causes xfrm_lookup() to be called as follows: dst = xfrm_lookup(tunnel->net, skb_dst(skb), fl, NULL, 0); Note that tunnel->net is the init namespace, while skb_dst(skb) references the vti_test interface in the "secure" namespace. The returned dst references an interface in the init namespace. Also note that the first parameter to xfrm_lookup() determines which flow cache is used to store the computed xfrm bundle, so after xfrm_lookup() returns there will be a cached bundle in the init namespace flow cache with a dst referencing a device in the "secure" namespace. (4) ip netns del secure Kernel begins to delete the "secure" namespace. At some point the vti_test interface is deleted, at which point dst_ifdown() changes the dst->dev in the cached xfrm bundle flow from vti_test to lo (still in the "secure" namespace however). Since nothing has happened to cause the init namespace's flow cache to be garbage collected, this dst remains attached to the flow cache, so the kernel loops waiting for the last reference to lo to go away. ip link add br1 type bridge ip link set dev br1 up ip addr add dev br1 1.1.1.1/8 ip netns add secure ip link add vti_test type vti local 1.1.1.1 remote 1.1.1.2 key 1 ip link set vti_test netns secure ip netns exec secure ip link set vti_test up ip netns exec secure ip link s lo up ip netns exec secure ip addr add dev lo 192.168.100.1/24 ip netns exec secure ip route add 192.168.200.0/24 dev vti_test ip xfrm policy flush ip xfrm state flush ip xfrm policy add dir out tmpl src 1.1.1.1 dst 1.1.1.2 \ proto esp mode tunnel mark 1 ip xfrm policy add dir in tmpl src 1.1.1.2 dst 1.1.1.1 \ proto esp mode tunnel mark 1 ip xfrm state add src 1.1.1.1 dst 1.1.1.2 proto esp spi 1 \ mode tunnel enc des3_ede 0x112233445566778811223344556677881122334455667788 ip xfrm state add src 1.1.1.2 dst 1.1.1.1 proto esp spi 1 \ mode tunnel enc des3_ede 0x112233445566778811223344556677881122334455667788 ip netns exec secure ping -c 4 -i 0.02 -I 192.168.100.1 192.168.200.1 ip netns del secure Reported-by: Hangbin Liu Reported-by: Jan Tluka Signed-off-by: Lance Richardson Signed-off-by: David S. Miller diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index a917903..cc701fa 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -557,6 +557,33 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = { .get_link_net = ip_tunnel_get_link_net, }; +static bool is_vti_tunnel(const struct net_device *dev) +{ + return dev->netdev_ops == &vti_netdev_ops; +} + +static int vti_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct ip_tunnel *tunnel = netdev_priv(dev); + + if (!is_vti_tunnel(dev)) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_DOWN: + if (!net_eq(tunnel->net, dev_net(dev))) + xfrm_garbage_collect(tunnel->net); + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block vti_notifier_block __read_mostly = { + .notifier_call = vti_device_event, +}; + static int __init vti_init(void) { const char *msg; @@ -564,6 +591,8 @@ static int __init vti_init(void) pr_info("IPv4 over IPsec tunneling driver\n"); + register_netdevice_notifier(&vti_notifier_block); + msg = "tunnel device"; err = register_pernet_device(&vti_net_ops); if (err < 0) @@ -596,6 +625,7 @@ xfrm_proto_ah_failed: xfrm_proto_esp_failed: unregister_pernet_device(&vti_net_ops); pernet_dev_failed: + unregister_netdevice_notifier(&vti_notifier_block); pr_err("vti init: failed to register %s\n", msg); return err; } @@ -607,6 +637,7 @@ static void __exit vti_fini(void) xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP); unregister_pernet_device(&vti_net_ops); + unregister_netdevice_notifier(&vti_notifier_block); } module_init(vti_init); -- cgit v0.10.2 From 64827adc9e14c8d17cf5f3d5a9ee47a42e95dd8c Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Thu, 28 Jul 2016 17:20:32 +0800 Subject: drm/amdgpu: fix vm init error path Signed-off-by: Chunming Zhou Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8e642fc..80120fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1535,7 +1535,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) r = amd_sched_entity_init(&ring->sched, &vm->entity, rq, amdgpu_sched_jobs); if (r) - return r; + goto err; vm->page_directory_fence = NULL; @@ -1565,6 +1565,9 @@ error_free_page_directory: error_free_sched_entity: amd_sched_entity_fini(&ring->sched, &vm->entity); +err: + drm_free_large(vm->page_tables); + return r; } -- cgit v0.10.2 From 254a49d5139a70828d652ef4faec40763993e403 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Tue, 9 Aug 2016 15:09:44 +0300 Subject: drivers: net: cpsw: fix kmemleak false-positive reports for sk buffers Kmemleak reports following false positive memory leaks for each sk buffers allocated by CPSW (__netdev_alloc_skb_ip_align()) in cpsw_ndo_open() and cpsw_rx_handler(): unreferenced object 0xea915000 (size 2048): comm "systemd-network", pid 713, jiffies 4294938323 (age 102.180s) hex dump (first 32 bytes): 00 58 91 ea ff ff ff ff ff ff ff ff ff ff ff ff .X.............. ff ff ff ff ff ff fd 0f 00 00 00 00 00 00 00 00 ................ backtrace: [] __kmalloc_track_caller+0x1a4/0x230 [] __alloc_skb+0x68/0x16c [] __netdev_alloc_skb+0x40/0x104 [] cpsw_ndo_open+0x374/0x670 [ti_cpsw] [] __dev_open+0xb0/0x114 [] __dev_change_flags+0x9c/0x14c [] dev_change_flags+0x20/0x50 [] do_setlink+0x2cc/0x78c [] rtnl_setlink+0xcc/0x100 [] rtnetlink_rcv_msg+0x184/0x224 [] netlink_rcv_skb+0xa8/0xc4 [] rtnetlink_rcv+0x2c/0x34 [] netlink_unicast+0x16c/0x1f8 [] netlink_sendmsg+0x334/0x348 [] sock_sendmsg+0x1c/0x2c [] SyS_sendto+0xc0/0xe8 unreferenced object 0xec861780 (size 192): comm "softirq", pid 0, jiffies 4294938759 (age 109.540s) hex dump (first 32 bytes): 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ 00 00 00 00 00 b0 5a ed 00 00 00 00 00 00 00 00 ......Z......... backtrace: [] kmem_cache_alloc+0x190/0x208 [] __build_skb+0x30/0x98 [] __netdev_alloc_skb+0xb8/0x104 [] cpsw_rx_handler+0x68/0x1e4 [ti_cpsw] [] __cpdma_chan_free+0xa8/0xc4 [davinci_cpdma] [] __cpdma_chan_process+0x14c/0x16c [davinci_cpdma] [] cpdma_chan_process+0x44/0x5c [davinci_cpdma] [] cpsw_rx_poll+0x1c/0x9c [ti_cpsw] [] net_rx_action+0x1f0/0x2ec [] __do_softirq+0x134/0x258 [] do_softirq+0x68/0x70 [] __local_bh_enable_ip+0xd4/0xe8 [] _raw_spin_unlock_bh+0x30/0x34 [] igmp6_group_added+0x4c/0x1bc [] ipv6_dev_mc_inc+0x398/0x434 [] addrconf_dad_work+0x224/0x39c This happens because CPSW allocates SK buffers and then passes pointers on them in CPDMA where they stored in internal CPPI RAM (SRAM) which belongs to DEV MMIO space. Kmemleak does not scan IO memory and so reports memory leaks. Hence, mark allocated sk buffers as false positive explicitly. Cc: Catalin Marinas Signed-off-by: Grygorii Strashko Acked-by: Catalin Marinas Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index c51f346..f85d605 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -734,6 +734,7 @@ static void cpsw_rx_handler(void *token, int len, int status) netif_receive_skb(skb); ndev->stats.rx_bytes += len; ndev->stats.rx_packets++; + kmemleak_not_leak(new_skb); } else { ndev->stats.rx_dropped++; new_skb = skb; @@ -1325,6 +1326,7 @@ static int cpsw_ndo_open(struct net_device *ndev) kfree_skb(skb); goto err_cleanup; } + kmemleak_not_leak(skb); } /* continue even if we didn't manage to submit all * receive descs -- cgit v0.10.2 From 0d039f337f45c48fb78b80cbf7b706b4de7f07ea Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Tue, 9 Aug 2016 21:36:04 +0800 Subject: bonding: fix the typo The message "803.ad" should be "802.3ad". Signed-off-by: Zhu Yanjun Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 1f276fa..217e8da 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -152,7 +152,7 @@ module_param(lacp_rate, charp, 0); MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; " "0 for slow, 1 for fast"); module_param(ad_select, charp, 0); -MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; " +MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; " "0 for stable (default), 1 for bandwidth, " "2 for count"); module_param(min_links, int, 0); -- cgit v0.10.2 From b9a019899f61acca18df5fb5e38a8fcdfea86fcd Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Thu, 28 Jul 2016 19:38:07 +0100 Subject: ARM: 8590/1: sanity_check_meminfo(): avoid overflow on vmalloc_limit To limit the amount of mapped low memory, we determine a physical address boundary based on the start of the vmalloc area using __pa(). Strictly speaking, the vmalloc area location is arbitrary and does not necessarily corresponds to a valid physical address. For example, if PAGE_OFFSET = 0x80000000 PHYS_OFFSET = 0x90000000 vmalloc_min = 0xf0000000 then __pa(vmalloc_min) overflows and returns a wrapped 0 when phys_addr_t is a 32-bit type. Then the code that follows determines that the entire physical memory is above that boundary and no low memory gets mapped at all: |[...] |Machine model: Freescale i.MX51 NA04 Board |Ignoring RAM at 0x90000000-0xb0000000 (!CONFIG_HIGHMEM) |Consider using a HIGHMEM enabled kernel. To avoid this problem let's make vmalloc_limit a 64-bit value all the time and determine that boundary explicitly without using __pa(). Reported-by: Emil Renner Berthing Signed-off-by: Nicolas Pitre Tested-by: Emil Renner Berthing Signed-off-by: Russell King diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 62f4d01..12774c8 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1155,10 +1155,19 @@ void __init sanity_check_meminfo(void) { phys_addr_t memblock_limit = 0; int highmem = 0; - phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; + u64 vmalloc_limit; struct memblock_region *reg; bool should_use_highmem = false; + /* + * Let's use our own (unoptimized) equivalent of __pa() that is + * not affected by wrap-arounds when sizeof(phys_addr_t) == 4. + * The result is used as the upper bound on physical memory address + * and may itself be outside the valid range for which phys_addr_t + * and therefore __pa() is defined. + */ + vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET; + for_each_memblock(memory, reg) { phys_addr_t block_start = reg->base; phys_addr_t block_end = reg->base + reg->size; @@ -1183,10 +1192,11 @@ void __init sanity_check_meminfo(void) if (reg->size > size_limit) { phys_addr_t overlap_size = reg->size - size_limit; - pr_notice("Truncating RAM at %pa-%pa to -%pa", - &block_start, &block_end, &vmalloc_limit); - memblock_remove(vmalloc_limit, overlap_size); + pr_notice("Truncating RAM at %pa-%pa", + &block_start, &block_end); block_end = vmalloc_limit; + pr_cont(" to -%pa", &block_end); + memblock_remove(vmalloc_limit, overlap_size); should_use_highmem = true; } } -- cgit v0.10.2 From 61444cde9170e256c238a02c9a4861930db04f5f Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 28 Jul 2016 19:48:44 +0100 Subject: ARM: 8591/1: mm: use fully constructed struct pages for EFI pgd allocations The late_alloc() PTE allocation function used by create_mapping_late() does not call pgtable_page_ctor() on PTE pages it allocates, leaving the per-page spinlock uninitialized. Since generic page table manipulation code may assume that translation table pages that are not owned by init_mm are covered by fully constructed struct pages, the following crash may occur with the new UEFI memory attributes table code. efi: memattr: Processing EFI Memory Attributes table: efi: memattr: 0x0000ffa16000-0x0000ffa82fff [Runtime Code |RUN| | |XP| | | | | | | | ] Unable to handle kernel NULL pointer dereference at virtual address 00000010 pgd = c0204000 [00000010] *pgd=00000000 Internal error: Oops: 5 [#1] SMP ARM Modules linked in: CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.7.0-rc4-00063-g3882aa7b340b #361 Hardware name: Generic DT based system task: ed858000 ti: ed842000 task.ti: ed842000 PC is at __lock_acquire+0xa0/0x19a8 ... [] (__lock_acquire) from [] (lock_acquire+0x6c/0x88) [] (lock_acquire) from [] (_raw_spin_lock+0x2c/0x3c) [] (_raw_spin_lock) from [] (apply_to_page_range+0xe8/0x238) [] (apply_to_page_range) from [] (efi_set_mapping_permissions+0x54/0x5c) [] (efi_set_mapping_permissions) from [] (efi_memattr_apply_permissions+0x2b8/0x378) [] (efi_memattr_apply_permissions) from [] (arm_enable_runtime_services+0x1f0/0x22c) [] (arm_enable_runtime_services) from [] (do_one_initcall+0x44/0x174) [] (do_one_initcall) from [] (kernel_init_freeable+0x90/0x1e8) [] (kernel_init_freeable) from [] (kernel_init+0x8/0x114) [] (kernel_init) from [] (ret_from_fork+0x14/0x24) The crash is due to the fact that the UEFI page tables are not owned by init_mm, but are not covered by fully constructed struct pages. Given that the UEFI subsystem is currently the only user of create_mapping_late(), add an unconditional call to pgtable_page_ctor() to late_alloc(). Fixes: 9fc68b717c24 ("ARM/efi: Apply strict permissions for UEFI Runtime Services regions") Signed-off-by: Ard Biesheuvel Signed-off-by: Russell King diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 12774c8..6344913 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -728,7 +728,8 @@ static void *__init late_alloc(unsigned long sz) { void *ptr = (void *)__get_free_pages(PGALLOC_GFP, get_order(sz)); - BUG_ON(!ptr); + if (!ptr || !pgtable_page_ctor(virt_to_page(ptr))) + BUG(); return ptr; } -- cgit v0.10.2 From 87eed3c74d7c65556f744230a90bf9556dd29146 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 3 Aug 2016 10:33:35 +0100 Subject: ARM: fix address limit restoration for undefined instructions During boot, sometimes the kernel will test to see if an instruction causes an undefined instruction exception. Unfortunately, the exit path for these exceptions did not restore the address limit, which causes the rootfs mount code to fail. Fix the missing address limit restoration. Tested-by: Guenter Roeck Signed-off-by: Russell King diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index bc5f507..9f157e7 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -295,6 +295,7 @@ __und_svc_fault: bl __und_fault __und_svc_finish: + get_thread_info tsk ldr r5, [sp, #S_PSR] @ Get SVC cpsr svc_exit r5 @ return from exception UNWIND(.fnend ) -- cgit v0.10.2 From a96d3b7593a3eefab62dd930e5c99201c3678ee4 Mon Sep 17 00:00:00 2001 From: Sylwester Nawrocki Date: Tue, 9 Aug 2016 18:00:08 +0200 Subject: dm9000: Fix irq trigger type setup on non-dt platforms Commit b5a099c67a1c36b "net: ethernet: davicom: fix devicetree irq resource" causes an interrupt storm after the ethernet interface is activated on S3C24XX platform (ARM non-dt), due to the interrupt trigger type not being set properly. It seems, after adding parsing of IRQ flags in commit 7085a7401ba54e92b "drivers: platform: parse IRQ flags from resources", there is no path for non-dt platforms where irq_set_type callback could be invoked when we don't pass the trigger type flags to the request_irq() call. In case of a board where the regression is seen the interrupt trigger type flags are passed through a platform device's resource and it is not currently handled properly without passing the irq trigger type flags to the request_irq() call. In case of OF an of_irq_get() call within platform_get_irq() function seems to be ensuring required irq_chip setup, but there is no equivalent code for non OF/ACPI platforms. This patch mostly restores irq trigger type setting code which has been removed in commit ("net: ethernet: davicom: fix devicetree irq resource"). Fixes: b5a099c67a1c36b913 ("net: ethernet: davicom: fix devicetree irq resource") Signed-off-by: Sylwester Nawrocki Acked-by: Robert Jarzmik Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 1471e16..f45385f 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1299,6 +1299,7 @@ static int dm9000_open(struct net_device *dev) { struct board_info *db = netdev_priv(dev); + unsigned int irq_flags = irq_get_trigger_type(dev->irq); if (netif_msg_ifup(db)) dev_dbg(db->dev, "enabling %s\n", dev->name); @@ -1306,9 +1307,11 @@ dm9000_open(struct net_device *dev) /* If there is no IRQ type specified, tell the user that this is a * problem */ - if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE) + if (irq_flags == IRQF_TRIGGER_NONE) dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); + irq_flags |= IRQF_SHARED; + /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ mdelay(1); /* delay needs by DM9000B */ @@ -1316,8 +1319,7 @@ dm9000_open(struct net_device *dev) /* Initialize DM9000 board */ dm9000_init_dm9000(dev); - if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED, - dev->name, dev)) + if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev)) return -EAGAIN; /* Now that we have an interrupt handler hooked up we can unmask * our interrupts -- cgit v0.10.2 From 836384d2501dee87b1c437f3e268871980c857bf Mon Sep 17 00:00:00 2001 From: Wenyou Yang Date: Fri, 5 Aug 2016 14:35:41 +0800 Subject: net: phy: micrel: Add specific suspend Disable all interrupts when suspend, they will be enabled when resume. Otherwise, the suspend/resume process will be blocked occasionally. Signed-off-by: Wenyou Yang Acked-by: Nicolas Ferre Signed-off-by: David S. Miller diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 1882d98..053e879 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -677,17 +677,28 @@ static void kszphy_get_stats(struct phy_device *phydev, data[i] = kszphy_get_stat(phydev, i); } -static int kszphy_resume(struct phy_device *phydev) +static int kszphy_suspend(struct phy_device *phydev) { - int value; + /* Disable PHY Interrupts */ + if (phy_interrupt_is_valid(phydev)) { + phydev->interrupts = PHY_INTERRUPT_DISABLED; + if (phydev->drv->config_intr) + phydev->drv->config_intr(phydev); + } - mutex_lock(&phydev->lock); + return genphy_suspend(phydev); +} - value = phy_read(phydev, MII_BMCR); - phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); +static int kszphy_resume(struct phy_device *phydev) +{ + genphy_resume(phydev); - kszphy_config_intr(phydev); - mutex_unlock(&phydev->lock); + /* Enable PHY Interrupts */ + if (phy_interrupt_is_valid(phydev)) { + phydev->interrupts = PHY_INTERRUPT_ENABLED; + if (phydev->drv->config_intr) + phydev->drv->config_intr(phydev); + } return 0; } @@ -900,7 +911,7 @@ static struct phy_driver ksphy_driver[] = { .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, - .suspend = genphy_suspend, + .suspend = kszphy_suspend, .resume = kszphy_resume, }, { .phy_id = PHY_ID_KSZ8061, -- cgit v0.10.2 From 51350ea0d7f355dfc03deb343a665802d3d5cbba Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Thu, 4 Aug 2016 21:36:05 +0300 Subject: mm, writeback: flush plugged IO in wakeup_flusher_threads() I've found funny live-lock between raid10 barriers during resync and memory controller hard limits. Inside mpage_readpages() task holds on to its plug bio which blocks the barrier in raid10. Its memory cgroup have no free memory thus the task goes into reclaimer but all reclaimable pages are dirty and cannot be written because raid10 is rebuilding and stuck on the barrier. Common flush of such IO in schedule() never happens, because the caller doesn't go to sleep. Lock is 'live' because changing memory limit or killing tasks which holds that stuck bio unblock whole progress. That was what happened in 3.18.x but I see no difference in upstream logic. Theoretically this might happen even without memory cgroup. Signed-off-by: Konstantin Khlebnikov Signed-off-by: Jens Axboe diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 4d09d44..05713a5 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -1949,6 +1949,12 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason) { struct backing_dev_info *bdi; + /* + * If we are expecting writeback progress we must submit plugged IO. + */ + if (blk_needs_flush_plug(current)) + blk_schedule_flush_plug(current); + if (!nr_pages) nr_pages = get_nr_dirty_pages(); -- cgit v0.10.2 From c6d2ee09c2fffd3efdd31be2b2811d081a45bb99 Mon Sep 17 00:00:00 2001 From: Frederic Barrat Date: Mon, 8 Aug 2016 11:57:48 +0200 Subject: cxl: Set psl_fir_cntl to production environment value Switch the setting of psl_fir_cntl from debug to production environment recommended value. It mostly affects the PSL behavior when an error is raised in psl_fir1/2. Tested with cxlflash. Signed-off-by: Frederic Barrat Reviewed-by: Uma Krishnan Signed-off-by: Michael Ellerman diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 1d0347c3..6f0c4ac 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -379,7 +379,7 @@ static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, u64 *capp_unit_id static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_dev *dev) { - u64 psl_dsnctl; + u64 psl_dsnctl, psl_fircntl; u64 chipid; u64 capp_unit_id; int rc; @@ -398,8 +398,11 @@ static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_ cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL); /* snoop write mask */ cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL); - /* set fir_accum */ - cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, 0x0800000000000000ULL); + /* set fir_cntl to recommended value for production env */ + psl_fircntl = (0x2ULL << (63-3)); /* ce_report */ + psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */ + psl_fircntl |= 0x1ULL; /* ce_thresh */ + cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, psl_fircntl); /* for debugging with trace arrays */ cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL); -- cgit v0.10.2 From 7bb90c3715a496c650b2e879225030f9dd9cfafb Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Thu, 4 Aug 2016 11:11:19 +0900 Subject: bridge: Fix problems around fdb entries pointing to the bridge device Adding fdb entries pointing to the bridge device uses fdb_insert(), which lacks various checks and does not respect added_by_user flag. As a result, some inconsistent behavior can happen: * Adding temporary entries succeeds but results in permanent entries. * Same goes for "dynamic" and "use". * Changing mac address of the bridge device causes deletion of user-added entries. * Replacing existing entries looks successful from userspace but actually not, regardless of NLM_F_EXCL flag. Use the same logic as other entries and fix them. Fixes: 3741873b4f73 ("bridge: allow adding of fdb entries pointing to the bridge device") Signed-off-by: Toshiaki Makita Acked-by: Roopa Prabhu Signed-off-by: David S. Miller diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index c18080a..cd620fa 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -267,7 +267,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr) /* If old entry was unassociated with any port, then delete it. */ f = __br_fdb_get(br, br->dev->dev_addr, 0); - if (f && f->is_local && !f->dst) + if (f && f->is_local && !f->dst && !f->added_by_user) fdb_delete_local(br, NULL, f); fdb_insert(br, NULL, newaddr, 0); @@ -282,7 +282,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr) if (!br_vlan_should_use(v)) continue; f = __br_fdb_get(br, br->dev->dev_addr, v->vid); - if (f && f->is_local && !f->dst) + if (f && f->is_local && !f->dst && !f->added_by_user) fdb_delete_local(br, NULL, f); fdb_insert(br, NULL, newaddr, v->vid); } @@ -764,20 +764,25 @@ out: } /* Update (create or replace) forwarding database entry */ -static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, - __u16 state, __u16 flags, __u16 vid) +static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, + const __u8 *addr, __u16 state, __u16 flags, __u16 vid) { - struct net_bridge *br = source->br; struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; struct net_bridge_fdb_entry *fdb; bool modified = false; /* If the port cannot learn allow only local and static entries */ - if (!(state & NUD_PERMANENT) && !(state & NUD_NOARP) && + if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) && !(source->state == BR_STATE_LEARNING || source->state == BR_STATE_FORWARDING)) return -EPERM; + if (!source && !(state & NUD_PERMANENT)) { + pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n", + br->dev->name); + return -EINVAL; + } + fdb = fdb_find(head, addr, vid); if (fdb == NULL) { if (!(flags & NLM_F_CREATE)) @@ -832,22 +837,28 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, return 0; } -static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p, - const unsigned char *addr, u16 nlh_flags, u16 vid) +static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, + struct net_bridge_port *p, const unsigned char *addr, + u16 nlh_flags, u16 vid) { int err = 0; if (ndm->ndm_flags & NTF_USE) { + if (!p) { + pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n", + br->dev->name); + return -EINVAL; + } local_bh_disable(); rcu_read_lock(); - br_fdb_update(p->br, p, addr, vid, true); + br_fdb_update(br, p, addr, vid, true); rcu_read_unlock(); local_bh_enable(); } else { - spin_lock_bh(&p->br->hash_lock); - err = fdb_add_entry(p, addr, ndm->ndm_state, + spin_lock_bh(&br->hash_lock); + err = fdb_add_entry(br, p, addr, ndm->ndm_state, nlh_flags, vid); - spin_unlock_bh(&p->br->hash_lock); + spin_unlock_bh(&br->hash_lock); } return err; @@ -884,6 +895,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], dev->name); return -EINVAL; } + br = p->br; vg = nbp_vlan_group(p); } @@ -895,15 +907,9 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], } /* VID was specified, so use it. */ - if (dev->priv_flags & IFF_EBRIDGE) - err = br_fdb_insert(br, NULL, addr, vid); - else - err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); + err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid); } else { - if (dev->priv_flags & IFF_EBRIDGE) - err = br_fdb_insert(br, NULL, addr, 0); - else - err = __br_fdb_add(ndm, p, addr, nlh_flags, 0); + err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0); if (err || !vg || !vg->num_vlans) goto out; @@ -914,11 +920,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], list_for_each_entry(v, &vg->vlan_list, vlist) { if (!br_vlan_should_use(v)) continue; - if (dev->priv_flags & IFF_EBRIDGE) - err = br_fdb_insert(br, NULL, addr, v->vid); - else - err = __br_fdb_add(ndm, p, addr, nlh_flags, - v->vid); + err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid); if (err) goto out; } -- cgit v0.10.2 From 1bc8b816cb8058c31f61fe78442f10a43209e582 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 2 Aug 2016 10:07:05 +0200 Subject: powerpc/32: Fix csum_partial_copy_generic() Commit 7aef4136566b0 ("powerpc32: rewrite csum_partial_copy_generic() based on copy_tofrom_user()") introduced a bug when destination address is odd and initial csum is not null In that (rare) case the initial csum value has to be rotated one byte as well as the resulting value is This patch also fixes related comments Fixes: 7aef4136566b0 ("powerpc32: rewrite csum_partial_copy_generic() based on copy_tofrom_user()") Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S index d90870a..0a57fe6 100644 --- a/arch/powerpc/lib/checksum_32.S +++ b/arch/powerpc/lib/checksum_32.S @@ -127,8 +127,9 @@ _GLOBAL(csum_partial_copy_generic) stw r7,12(r1) stw r8,8(r1) - andi. r0,r4,1 /* is destination address even ? */ - cmplwi cr7,r0,0 + rlwinm r0,r4,3,0x8 + rlwnm r6,r6,r0,0,31 /* odd destination address: rotate one byte */ + cmplwi cr7,r0,0 /* is destination address even ? */ addic r12,r6,0 addi r6,r4,-4 neg r0,r4 @@ -237,7 +238,7 @@ _GLOBAL(csum_partial_copy_generic) 66: addze r3,r12 addi r1,r1,16 beqlr+ cr7 - rlwinm r3,r3,8,0,31 /* swap bytes for odd destination */ + rlwinm r3,r3,8,0,31 /* odd destination address: rotate one byte */ blr /* read fault */ -- cgit v0.10.2 From c7a318ba868c61fc9be710a4970172d8c2eeb8b9 Mon Sep 17 00:00:00 2001 From: Cyril Bur Date: Wed, 10 Aug 2016 15:44:46 +1000 Subject: powerpc/ptrace: Fix coredump since ptrace TM changes Commit 8d460f6156cd ("powerpc/process: Add the function flush_tmregs_to_thread") added flush_tmregs_to_thread() and included the assumption that it would only be called for a task which is not current. Although this is correct for ptrace, when generating a core dump, some of the routines which call flush_tmregs_to_thread() are called. This leads to a WARNing such as: Not expecting ptrace on self: TM regs may be incorrect ------------[ cut here ]------------ WARNING: CPU: 123 PID: 7727 at arch/powerpc/kernel/process.c:1088 flush_tmregs_to_thread+0x78/0x80 CPU: 123 PID: 7727 Comm: libvirtd Not tainted 4.8.0-rc1-gcc6x-g61e8a0d #1 task: c000000fe631b600 task.stack: c000000fe63b0000 NIP: c00000000001a1a8 LR: c00000000001a1a4 CTR: c000000000717780 REGS: c000000fe63b3420 TRAP: 0700 Not tainted (4.8.0-rc1-gcc6x-g61e8a0d) MSR: 900000010282b033 CR: 28004222 XER: 20000000 ... NIP [c00000000001a1a8] flush_tmregs_to_thread+0x78/0x80 LR [c00000000001a1a4] flush_tmregs_to_thread+0x74/0x80 Call Trace: flush_tmregs_to_thread+0x74/0x80 (unreliable) vsr_get+0x64/0x1a0 elf_core_dump+0x604/0x1430 do_coredump+0x5fc/0x1200 get_signal+0x398/0x740 do_signal+0x54/0x2b0 do_notify_resume+0x98/0xb0 ret_from_except_lite+0x70/0x74 So fix flush_tmregs_to_thread() to detect the case where it is called on current, and a transaction is active, and in that case flush the TM regs to the thread_struct. This patch also moves flush_tmregs_to_thread() into ptrace.c as it is only called from that file. Fixes: 8d460f6156cd ("powerpc/process: Add the function flush_tmregs_to_thread") Signed-off-by: Cyril Bur [mpe: Flesh out change log] Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 0a74ebe..17c8380 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -75,14 +75,6 @@ static inline void disable_kernel_spe(void) static inline void __giveup_spe(struct task_struct *t) { } #endif -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM -extern void flush_tmregs_to_thread(struct task_struct *); -#else -static inline void flush_tmregs_to_thread(struct task_struct *t) -{ -} -#endif - static inline void clear_task_ebb(struct task_struct *t) { #ifdef CONFIG_PPC_BOOK3S_64 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 58ccf86..9ee2623 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1074,26 +1074,6 @@ static inline void restore_sprs(struct thread_struct *old_thread, #endif } -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM -void flush_tmregs_to_thread(struct task_struct *tsk) -{ - /* - * Process self tracing is not yet supported through - * ptrace interface. Ptrace generic code should have - * prevented this from happening in the first place. - * Warn once here with the message, if some how it - * is attempted. - */ - WARN_ONCE(tsk == current, - "Not expecting ptrace on self: TM regs may be incorrect\n"); - - /* - * If task is not current, it should have been flushed - * already to it's thread_struct during __switch_to(). - */ -} -#endif - struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *new) { diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 4f3c575..bf91658 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -38,6 +38,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -118,6 +119,24 @@ static const struct pt_regs_offset regoffset_table[] = { REG_OFFSET_END, }; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +static void flush_tmregs_to_thread(struct task_struct *tsk) +{ + /* + * If task is not current, it will have been flushed already to + * it's thread_struct during __switch_to(). + * + * A reclaim flushes ALL the state. + */ + + if (tsk == current && MSR_TM_SUSPENDED(mfmsr())) + tm_reclaim_current(TM_CAUSE_SIGNAL); + +} +#else +static inline void flush_tmregs_to_thread(struct task_struct *tsk) { } +#endif + /** * regs_query_register_offset() - query register offset from its name * @name: the name of a register -- cgit v0.10.2 From 9ba333dc55cbb9523553df973adb3024d223e905 Mon Sep 17 00:00:00 2001 From: Stefan Haberland Date: Mon, 8 Aug 2016 14:08:17 +0200 Subject: s390/dasd: fix hanging device after clear subchannel When a device is in a status where CIO has killed all I/O by itself the interrupt for a clear request may not contain an irb to determine the clear function. Instead it contains an error pointer -EIO. This was ignored by the DASD int_handler leading to a hanging device waiting for a clear interrupt. Handle -EIO error pointer correctly for requests that are clear pending and treat the clear as successful. Signed-off-by: Stefan Haberland Reviewed-by: Sebastian Ott Cc: stable@vger.kernel.org Signed-off-by: Martin Schwidefsky diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 8973d34..fb1b56a 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1643,9 +1643,18 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, u8 *sense = NULL; int expires; + cqr = (struct dasd_ccw_req *) intparm; if (IS_ERR(irb)) { switch (PTR_ERR(irb)) { case -EIO: + if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { + device = (struct dasd_device *) cqr->startdev; + cqr->status = DASD_CQR_CLEARED; + dasd_device_clear_timer(device); + wake_up(&dasd_flush_wq); + dasd_schedule_device_bh(device); + return; + } break; case -ETIMEDOUT: DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " @@ -1661,7 +1670,6 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, } now = get_tod_clock(); - cqr = (struct dasd_ccw_req *) intparm; /* check for conditions that should be handled immediately */ if (!cqr || !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && -- cgit v0.10.2 From 4d81aaa53c2dea220ddf88e19c33033d6cf4f8cb Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 9 Aug 2016 12:26:28 +0200 Subject: s390/pageattr: handle numpages parameter correctly Both set_memory_ro() and set_memory_rw() will modify the page attributes of at least one page, even if the numpages parameter is zero. The author expected that calling these functions with numpages == zero would never happen. However with the new 444d13ff10fb ("modules: add ro_after_init support") feature this happens frequently. Therefore do the right thing and make these two functions return gracefully if nothing should be done. Fixes crashes on module load like this one: Unable to handle kernel pointer dereference in virtual kernel address space Failing address: 000003ff80008000 TEID: 000003ff80008407 Fault in home space mode while using kernel ASCE. AS:0000000000d18007 R3:00000001e6aa4007 S:00000001e6a10800 P:00000001e34ee21d Oops: 0004 ilc:3 [#1] SMP Modules linked in: x_tables CPU: 10 PID: 1 Comm: systemd Not tainted 4.7.0-11895-g3fa9045 #4 Hardware name: IBM 2964 N96 703 (LPAR) task: 00000001e9118000 task.stack: 00000001e9120000 Krnl PSW : 0704e00180000000 00000000005677f8 (rb_erase+0xf0/0x4d0) R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:3 CC:2 PM:0 RI:0 EA:3 Krnl GPRS: 000003ff80008b20 000003ff80008b20 000003ff80008b70 0000000000b9d608 000003ff80008b20 0000000000000000 00000001e9123e88 000003ff80008950 00000001e485ab40 000003ff00000000 000003ff80008b00 00000001e4858480 0000000100000000 000003ff80008b68 00000000001d5998 00000001e9123c28 Krnl Code: 00000000005677e8: ec1801c3007c cgij %r1,0,8,567b6e 00000000005677ee: e32010100020 cg %r2,16(%r1) #00000000005677f4: a78401c2 brc 8,567b78 >00000000005677f8: e35010080024 stg %r5,8(%r1) 00000000005677fe: ec5801af007c cgij %r5,0,8,567b5c 0000000000567804: e30050000024 stg %r0,0(%r5) 000000000056780a: ebacf0680004 lmg %r10,%r12,104(%r15) 0000000000567810: 07fe bcr 15,%r14 Call Trace: ([<000003ff80008900>] __this_module+0x0/0xffffffffffffd700 [x_tables]) ([<0000000000264fd4>] do_init_module+0x12c/0x220) ([<00000000001da14a>] load_module+0x24e2/0x2b10) ([<00000000001da976>] SyS_finit_module+0xbe/0xd8) ([<0000000000803b26>] system_call+0xd6/0x264) Last Breaking-Event-Address: [<000000000056771a>] rb_erase+0x12/0x4d0 Kernel panic - not syncing: Fatal exception: panic_on_oops Reported-by: Christian Borntraeger Reported-and-tested-by: Sebastian Ott Fixes: e8a97e42dc98 ("s390/pageattr: allow kernel page table splitting") Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index 7104ffb..af7cf28c 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -252,6 +252,8 @@ static int change_page_attr(unsigned long addr, unsigned long end, int rc = -EINVAL; pgd_t *pgdp; + if (addr == end) + return 0; if (end >= MODULES_END) return -EINVAL; mutex_lock(&cpa_mutex); -- cgit v0.10.2 From 7d70c63c7132eb95e428e945242360233ab23d82 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 10 Aug 2016 17:29:29 +1000 Subject: powerpc: Print the kernel load address at the end of prom_init() This makes it easier to debug crashes that happen very early before the kernel takes over Open Firmware by allowing us to relate the OF reported crashing addresses to offsets within the kernel. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 6ee4b72..4e74fc5 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -2940,7 +2940,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, /* Don't print anything after quiesce under OPAL, it crashes OFW */ if (of_platform != PLATFORM_OPAL) { - prom_printf("Booting Linux via __start() ...\n"); + prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase); prom_debug("->dt_header_start=0x%x\n", hdr); } -- cgit v0.10.2 From f9cc1d1f808dbdfd56978259d262b879504efaed Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 10 Aug 2016 17:32:38 +1000 Subject: powerpc: Update obsolete comment in setup_32.c about early_init() We don't identify the machine type anymore... Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index c3e861d..f10a975 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -93,10 +93,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr) * and we are running with enough of the MMU enabled to have our * proper kernel virtual addresses * - * Find out what kind of machine we're on and save any data we need - * from the early boot process (devtree is copied on pmac by prom_init()). - * This is called very early on the boot process, after a minimal - * MMU environment has been set up but before MMU_init is called. + * We do the initial parsing of the flat device-tree and prepares + * for the MMU to be fully initialized. */ extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */ -- cgit v0.10.2 From 97f6e0cc35026a2a09147a6da636d901525e1969 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 10 Aug 2016 17:27:34 +1000 Subject: powerpc/32: Fix crash during static key init We cannot do those initializations from apply_feature_fixups() as this function runs in a very restricted environment on 32-bit where the kernel isn't running at its linked address and the PTRRELOC() macro must be used for any global accesss. Instead, split them into a separtate steup_feature_keys() function which is called in a more suitable spot on ppc32. Fixes: 309b315b6ec6 ("powerpc: Call jump_label_init() in apply_feature_fixups()") Reported-and-tested-by: Christian Kujau Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index 57fec8a..ddf54f5 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -186,6 +186,7 @@ label##3: \ #ifndef __ASSEMBLY__ void apply_feature_fixups(void); +void setup_feature_keys(void); #endif #endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */ diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index f10a975..24ec3ea 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -100,6 +100,9 @@ extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */ notrace void __init machine_init(u64 dt_ptr) { + /* Configure static keys first, now that we're relocated. */ + setup_feature_keys(); + /* Enable early debugging if any specified (see udbg.h) */ udbg_early_init(); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index eafb9a7..7ac8e6e 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -300,6 +300,7 @@ void __init early_setup(unsigned long dt_ptr) /* Apply all the dynamic patching */ apply_feature_fixups(); + setup_feature_keys(); /* Initialize the hash table or TLB handling */ early_init_mmu(); diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 74145f0..043415f 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -188,7 +188,10 @@ void __init apply_feature_fixups(void) &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); #endif do_final_fixups(); +} +void __init setup_feature_keys(void) +{ /* * Initialise jump label. This causes all the cpu/mmu_has_feature() * checks to take on their correct polarity based on the current set of -- cgit v0.10.2 From c987ff0d3cb37d7fe1ddaa370811dfd9f73643fa Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Tue, 9 Aug 2016 17:31:35 +0100 Subject: iommu/dma: Respect IOMMU aperture when allocating Where a device driver has set a 64-bit DMA mask to indicate the absence of addressing limitations, we still need to ensure that we don't allocate IOVAs beyond the actual input size of the IOMMU. The reported aperture is the most reliable way we have of inferring that input address size, so use that to enforce a hard upper limit where available. Fixes: 0db2e5d18f76 ("iommu: Implement common IOMMU ops for DMA mapping") Signed-off-by: Robin Murphy Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 7d991c8..00c8a08 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -152,12 +152,15 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent) } } -static struct iova *__alloc_iova(struct iova_domain *iovad, size_t size, +static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, dma_addr_t dma_limit) { + struct iova_domain *iovad = domain->iova_cookie; unsigned long shift = iova_shift(iovad); unsigned long length = iova_align(iovad, size) >> shift; + if (domain->geometry.force_aperture) + dma_limit = min(dma_limit, domain->geometry.aperture_end); /* * Enforce size-alignment to be safe - there could perhaps be an * attribute to control this per-device, or at least per-domain... @@ -315,7 +318,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, if (!pages) return NULL; - iova = __alloc_iova(iovad, size, dev->coherent_dma_mask); + iova = __alloc_iova(domain, size, dev->coherent_dma_mask); if (!iova) goto out_free_pages; @@ -387,7 +390,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, phys_addr_t phys = page_to_phys(page) + offset; size_t iova_off = iova_offset(iovad, phys); size_t len = iova_align(iovad, size + iova_off); - struct iova *iova = __alloc_iova(iovad, len, dma_get_mask(dev)); + struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev)); if (!iova) return DMA_ERROR_CODE; @@ -539,7 +542,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, prev = s; } - iova = __alloc_iova(iovad, iova_len, dma_get_mask(dev)); + iova = __alloc_iova(domain, iova_len, dma_get_mask(dev)); if (!iova) goto out_restore_sg; -- cgit v0.10.2 From 1a9e4c564ab174e53ed86def922804a5ddc63e7d Mon Sep 17 00:00:00 2001 From: Nicolai Stange Date: Thu, 14 Jul 2016 17:22:54 +0200 Subject: x86/timers/apic: Fix imprecise timer interrupts by eliminating TSC clockevents frequency roundoff error I noticed the following bug/misbehavior on certain Intel systems: with a single task running on a NOHZ CPU on an Intel Haswell, I recognized that I did not only get the one expected local_timer APIC interrupt, but two per second at minimum. (!) Further tracing showed that the first one precedes the programmed deadline by up to ~50us and hence, it did nothing except for reprogramming the TSC deadline clockevent device to trigger shortly thereafter again. The reason for this is imprecise calibration, the timeout we program into the APIC results in 'too short' timer interrupts. The core (hr)timer code notices this (because it has a precise ktime source and sees the short interrupt) and fixes it up by programming an additional very short interrupt period. This is obviously suboptimal. The reason for the imprecise calibration is twofold, and this patch fixes the first reason: In setup_APIC_timer(), the registered clockevent device's frequency is calculated by first dividing tsc_khz by TSC_DIVISOR and multiplying it with 1000 afterwards: (tsc_khz / TSC_DIVISOR) * 1000 The multiplication with 1000 is done for converting from kHz to Hz and the division by TSC_DIVISOR is carried out in order to make sure that the final result fits into an u32. However, with the order given in this calculation, the roundoff error introduced by the division gets magnified by a factor of 1000 by the following multiplication. To fix it, reversing the order of the division and the multiplication a la: (tsc_khz * 1000) / TSC_DIVISOR ... reduces the roundoff error already. Furthermore, if TSC_DIVISOR divides 1000, associativity holds: (tsc_khz * 1000) / TSC_DIVISOR = tsc_khz * (1000 / TSC_DIVISOR) and thus, the roundoff error even vanishes and the whole operation can be carried out within 32 bits. The powers of two that divide 1000 are 2, 4 and 8. A value of 8 for TSC_DIVISOR still allows for TSC frequencies up to 2^32 / 10^9ns * 8 = 34.4GHz which is way larger than anything to expect in the next years. Thus we also replace the current TSC_DIVISOR value of 32 by 8. Reverse the order of the divison and the multiplication in the calculation of the registered clockevent device's frequency. Signed-off-by: Nicolai Stange Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Paolo Bonzini Acked-by: Thomas Gleixner Cc: Adrian Hunter Cc: Borislav Petkov Cc: Christopher S. Hall Cc: H. Peter Anvin Cc: Hidehiro Kawai Cc: Len Brown Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Viresh Kumar Link: http://lkml.kernel.org/r/20160714152255.18295-2-nicstange@gmail.com [ Improved changelog. ] Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index ac8d8ad..a315dc4 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -313,7 +313,7 @@ int lapic_get_maxlvt(void) /* Clock divisor */ #define APIC_DIVISOR 16 -#define TSC_DIVISOR 32 +#define TSC_DIVISOR 8 /* * This function sets up the local APIC timer, with a timeout of @@ -565,7 +565,7 @@ static void setup_APIC_timer(void) CLOCK_EVT_FEAT_DUMMY); levt->set_next_event = lapic_next_deadline; clockevents_config_and_register(levt, - (tsc_khz / TSC_DIVISOR) * 1000, + tsc_khz * (1000 / TSC_DIVISOR), 0xF, ~0UL); } else clockevents_register_device(levt); -- cgit v0.10.2 From 6731b0d611a1274f9e785fa0189ac2aeeabd0591 Mon Sep 17 00:00:00 2001 From: Nicolai Stange Date: Thu, 14 Jul 2016 17:22:55 +0200 Subject: x86/timers/apic: Inform TSC deadline clockevent device about recalibration This patch eliminates a source of imprecise APIC timer interrupts, which imprecision may result in double interrupts or even late interrupts. The TSC deadline clockevent devices' configuration and registration happens before the TSC frequency calibration is refined in tsc_refine_calibration_work(). This results in the TSC clocksource and the TSC deadline clockevent devices being configured with slightly different frequencies: the former gets the refined one and the latter are configured with the inaccurate frequency detected earlier by means of the "Fast TSC calibration using PIT". Within the APIC code, introduce the notifier function lapic_update_tsc_freq() which reconfigures all per-CPU TSC deadline clockevent devices with the current tsc_khz. Call it from the TSC code after TSC calibration refinement has happened. Signed-off-by: Nicolai Stange Signed-off-by: Peter Zijlstra (Intel) Acked-by: Thomas Gleixner Cc: Adrian Hunter Cc: Borislav Petkov Cc: Christopher S. Hall Cc: H. Peter Anvin Cc: Hidehiro Kawai Cc: Len Brown Cc: Linus Torvalds Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Viresh Kumar Link: http://lkml.kernel.org/r/20160714152255.18295-3-nicstange@gmail.com [ Pushed #ifdef CONFIG_X86_LOCAL_APIC into header, improved changelog. ] Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index f5befd4..1243577 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -135,6 +135,7 @@ extern void init_apic_mappings(void); void register_lapic_address(unsigned long address); extern void setup_boot_APIC_clock(void); extern void setup_secondary_APIC_clock(void); +extern void lapic_update_tsc_freq(void); extern int APIC_init_uniprocessor(void); #ifdef CONFIG_X86_64 @@ -170,6 +171,7 @@ static inline void init_apic_mappings(void) { } static inline void disable_local_APIC(void) { } # define setup_boot_APIC_clock x86_init_noop # define setup_secondary_APIC_clock x86_init_noop +static inline void lapic_update_tsc_freq(void) { } #endif /* !CONFIG_X86_LOCAL_APIC */ #ifdef CONFIG_X86_X2APIC diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index a315dc4..0fd3d65 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -572,6 +572,30 @@ static void setup_APIC_timer(void) } /* + * Install the updated TSC frequency from recalibration at the TSC + * deadline clockevent devices. + */ +static void __lapic_update_tsc_freq(void *info) +{ + struct clock_event_device *levt = this_cpu_ptr(&lapic_events); + + if (!this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) + return; + + clockevents_update_freq(levt, tsc_khz * (1000 / TSC_DIVISOR)); +} + +void lapic_update_tsc_freq(void) +{ + /* + * The clockevent device's ->mult and ->shift can both be + * changed. In order to avoid races, schedule the frequency + * update code on each CPU. + */ + on_each_cpu(__lapic_update_tsc_freq, NULL, 0); +} + +/* * In this functions we calibrate APIC bus clocks to the external timer. * * We want to do the calibration only once since we want to have local timer diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index a804b5a..8fb4b6a 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -22,6 +22,7 @@ #include #include #include +#include unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ EXPORT_SYMBOL(cpu_khz); @@ -1249,6 +1250,9 @@ static void tsc_refine_calibration_work(struct work_struct *work) (unsigned long)tsc_khz / 1000, (unsigned long)tsc_khz % 1000); + /* Inform the TSC deadline clockevent devices about the recalibration */ + lapic_update_tsc_freq(); + out: if (boot_cpu_has(X86_FEATURE_ART)) art_related_clocksource = &clocksource_tsc; -- cgit v0.10.2 From 0b8f1e2e26bfc6b9abe3f0f3faba2cb0eecb9fb9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 4 Aug 2016 14:37:24 +0200 Subject: perf/core: Fix sideband list-iteration vs. event ordering NULL pointer deference crash Vegard Nossum reported that perf fuzzing generates a NULL pointer dereference crash: > Digging a bit deeper into this, it seems the event itself is getting > created by perf_event_open() and it gets added to the pmu_event_list > through: > > perf_event_open() > - perf_event_alloc() > - account_event() > - account_pmu_sb_event() > - attach_sb_event() > > so at this point the event is being attached but its ->ctx is still > NULL. It seems like ->ctx is set just a bit later in > perf_event_open(), though. > > But before that, __schedule() comes along and creates a stack trace > similar to the one above: > > __schedule() > - __perf_event_task_sched_out() > - perf_iterate_sb() > - perf_iterate_sb_cpu() > - event_filter_match() > - perf_cgroup_match() > - __get_cpu_context() > - (dereference ctx which is NULL) > > So I guess the question is... should the event be attached (= put on > the list) before ->ctx gets set? Or should the cgroup code check for a > NULL ->ctx? The latter seems like the simplest solution. Moving the list-add later creates a bit of a mess. Reported-by: Vegard Nossum Tested-by: Vegard Nossum Tested-by: Vince Weaver Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: David Carrillo-Cisneros Cc: Jiri Olsa Cc: Kan Liang Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Fixes: f2fb6bef9251 ("perf/core: Optimize side-band event delivery") Link: http://lkml.kernel.org/r/20160804123724.GN6862@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar diff --git a/kernel/events/core.c b/kernel/events/core.c index a19550d..87d02b8 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1716,8 +1716,8 @@ static inline int pmu_filter_match(struct perf_event *event) static inline int event_filter_match(struct perf_event *event) { - return (event->cpu == -1 || event->cpu == smp_processor_id()) - && perf_cgroup_match(event) && pmu_filter_match(event); + return (event->cpu == -1 || event->cpu == smp_processor_id()) && + perf_cgroup_match(event) && pmu_filter_match(event); } static void @@ -1737,8 +1737,8 @@ event_sched_out(struct perf_event *event, * maintained, otherwise bogus information is return * via read() for time_enabled, time_running: */ - if (event->state == PERF_EVENT_STATE_INACTIVE - && !event_filter_match(event)) { + if (event->state == PERF_EVENT_STATE_INACTIVE && + !event_filter_match(event)) { delta = tstamp - event->tstamp_stopped; event->tstamp_running += delta; event->tstamp_stopped = tstamp; @@ -2236,10 +2236,15 @@ perf_install_in_context(struct perf_event_context *ctx, lockdep_assert_held(&ctx->mutex); - event->ctx = ctx; if (event->cpu != -1) event->cpu = cpu; + /* + * Ensures that if we can observe event->ctx, both the event and ctx + * will be 'complete'. See perf_iterate_sb_cpu(). + */ + smp_store_release(&event->ctx, ctx); + if (!task) { cpu_function_call(cpu, __perf_install_in_context, event); return; @@ -5969,6 +5974,14 @@ static void perf_iterate_sb_cpu(perf_iterate_f output, void *data) struct perf_event *event; list_for_each_entry_rcu(event, &pel->list, sb_list) { + /* + * Skip events that are not fully formed yet; ensure that + * if we observe event->ctx, both event and ctx will be + * complete enough. See perf_install_in_context(). + */ + if (!smp_load_acquire(&event->ctx)) + continue; + if (event->state < PERF_EVENT_STATE_INACTIVE) continue; if (!event_filter_match(event)) -- cgit v0.10.2 From db4a835601b73cf8d6cd8986381d966b8e13d2d9 Mon Sep 17 00:00:00 2001 From: David Carrillo-Cisneros Date: Tue, 2 Aug 2016 00:48:12 -0700 Subject: perf/core: Set cgroup in CPU contexts for new cgroup events There's a perf stat bug easy to observer on a machine with only one cgroup: $ perf stat -e cycles -I 1000 -C 0 -G / # time counts unit events 1.000161699 cycles / 2.000355591 cycles / 3.000565154 cycles / 4.000951350 cycles / We'd expect some output there. The underlying problem is that there is an optimization in perf_cgroup_sched_{in,out}() that skips the switch of cgroup events if the old and new cgroups in a task switch are the same. This optimization interacts with the current code in two ways that cause a CPU context's cgroup (cpuctx->cgrp) to be NULL even if a cgroup event matches the current task. These are: 1. On creation of the first cgroup event in a CPU: In current code, cpuctx->cpu is only set in perf_cgroup_sched_in, but due to the aforesaid optimization, perf_cgroup_sched_in will run until the next cgroup switches in that CPU. This may happen late or never happen, depending on system's number of cgroups, CPU load, etc. 2. On deletion of the last cgroup event in a cpuctx: In list_del_event, cpuctx->cgrp is set NULL. Any new cgroup event will not be sched in because cpuctx->cgrp == NULL until a cgroup switch occurs and perf_cgroup_sched_in is executed (updating cpuctx->cgrp). This patch fixes both problems by setting cpuctx->cgrp in list_add_event, mirroring what list_del_event does when removing a cgroup event from CPU context, as introduced in: commit 68cacd29167b ("perf_events: Fix stale ->cgrp pointer in update_cgrp_time_from_cpuctx()") With this patch, cpuctx->cgrp is always set/clear when installing/removing the first/last cgroup event in/from the CPU context. With cpuctx->cgrp correctly set, event_filter_match works as intended when events are sched in/out. After the fix, the output is as expected: $ perf stat -e cycles -I 1000 -a -G / # time counts unit events 1.004699159 627342882 cycles / 2.007397156 615272690 cycles / 3.010019057 616726074 cycles / Signed-off-by: David Carrillo-Cisneros Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Kan Liang Cc: Linus Torvalds Cc: Paul Turner Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vegard Nossum Cc: Vince Weaver Link: http://lkml.kernel.org/r/1470124092-113192-1-git-send-email-davidcc@google.com Signed-off-by: Ingo Molnar diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 8ed43261..2b6b43c 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -743,7 +743,9 @@ struct perf_event_context { u64 parent_gen; u64 generation; int pin_count; +#ifdef CONFIG_CGROUP_PERF int nr_cgroups; /* cgroup evts */ +#endif void *task_ctx_data; /* pmu specific data */ struct rcu_head rcu_head; }; @@ -769,7 +771,9 @@ struct perf_cpu_context { unsigned int hrtimer_active; struct pmu *unique_pmu; +#ifdef CONFIG_CGROUP_PERF struct perf_cgroup *cgrp; +#endif }; struct perf_output_handle { diff --git a/kernel/events/core.c b/kernel/events/core.c index 87d02b8..1903b8f 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -843,6 +843,32 @@ perf_cgroup_mark_enabled(struct perf_event *event, } } } + +/* + * Update cpuctx->cgrp so that it is set when first cgroup event is added and + * cleared when last cgroup event is removed. + */ +static inline void +list_update_cgroup_event(struct perf_event *event, + struct perf_event_context *ctx, bool add) +{ + struct perf_cpu_context *cpuctx; + + if (!is_cgroup_event(event)) + return; + + if (add && ctx->nr_cgroups++) + return; + else if (!add && --ctx->nr_cgroups) + return; + /* + * Because cgroup events are always per-cpu events, + * this will always be called from the right CPU. + */ + cpuctx = __get_cpu_context(ctx); + cpuctx->cgrp = add ? event->cgrp : NULL; +} + #else /* !CONFIG_CGROUP_PERF */ static inline bool @@ -920,6 +946,13 @@ perf_cgroup_mark_enabled(struct perf_event *event, struct perf_event_context *ctx) { } + +static inline void +list_update_cgroup_event(struct perf_event *event, + struct perf_event_context *ctx, bool add) +{ +} + #endif /* @@ -1392,6 +1425,7 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) static void list_add_event(struct perf_event *event, struct perf_event_context *ctx) { + lockdep_assert_held(&ctx->lock); WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); @@ -1412,8 +1446,7 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx) list_add_tail(&event->group_entry, list); } - if (is_cgroup_event(event)) - ctx->nr_cgroups++; + list_update_cgroup_event(event, ctx, true); list_add_rcu(&event->event_entry, &ctx->event_list); ctx->nr_events++; @@ -1581,8 +1614,6 @@ static void perf_group_attach(struct perf_event *event) static void list_del_event(struct perf_event *event, struct perf_event_context *ctx) { - struct perf_cpu_context *cpuctx; - WARN_ON_ONCE(event->ctx != ctx); lockdep_assert_held(&ctx->lock); @@ -1594,20 +1625,7 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx) event->attach_state &= ~PERF_ATTACH_CONTEXT; - if (is_cgroup_event(event)) { - ctx->nr_cgroups--; - /* - * Because cgroup events are always per-cpu events, this will - * always be called from the right CPU. - */ - cpuctx = __get_cpu_context(ctx); - /* - * If there are no more cgroup events then clear cgrp to avoid - * stale pointer in update_cgrp_time_from_cpuctx(). - */ - if (!ctx->nr_cgroups) - cpuctx->cgrp = NULL; - } + list_update_cgroup_event(event, ctx, false); ctx->nr_events--; if (event->attr.inherit_stat) -- cgit v0.10.2 From 4da449ae1df9cfeb167e78f250b250eff64bc65e Mon Sep 17 00:00:00 2001 From: Laura Garcia Liebana Date: Tue, 9 Aug 2016 20:46:16 +0200 Subject: netfilter: nft_exthdr: Add size check on u8 nft_exthdr attributes Fix the direct assignment of offset and length attributes included in nft_exthdr structure from u32 data to u8. Signed-off-by: Laura Garcia Liebana Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index ba7aed1..82c264e 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c @@ -59,6 +59,7 @@ static int nft_exthdr_init(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { struct nft_exthdr *priv = nft_expr_priv(expr); + u32 offset, len; if (tb[NFTA_EXTHDR_DREG] == NULL || tb[NFTA_EXTHDR_TYPE] == NULL || @@ -66,9 +67,15 @@ static int nft_exthdr_init(const struct nft_ctx *ctx, tb[NFTA_EXTHDR_LEN] == NULL) return -EINVAL; + offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET])); + len = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN])); + + if (offset > U8_MAX || len > U8_MAX) + return -ERANGE; + priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]); - priv->offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET])); - priv->len = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN])); + priv->offset = offset; + priv->len = len; priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]); return nft_validate_register_store(ctx, priv->dreg, NULL, -- cgit v0.10.2 From c7de573471832dff7d31f0c13b0f143d6f017799 Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Fri, 29 Jul 2016 03:17:58 +0300 Subject: usb: dwc3: gadget: increment request->actual once When using SG lists, we would end up setting request->actual to: num_mapped_sgs * (request->length - count) Let's fix that up by incrementing request->actual only once. Cc: Reported-by: Brian E Rogers Signed-off-by: Felipe Balbi diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 8f8c215..863c306 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2013,14 +2013,6 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, s_pkt = 1; } - /* - * We assume here we will always receive the entire data block - * which we should receive. Meaning, if we program RX to - * receive 4K but we receive only 2K, we assume that's all we - * should receive and we simply bounce the request back to the - * gadget driver for further processing. - */ - req->request.actual += req->request.length - count; if (s_pkt) return 1; if ((event->status & DEPEVT_STATUS_LST) && @@ -2040,6 +2032,7 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, struct dwc3_trb *trb; unsigned int slot; unsigned int i; + int count = 0; int ret; do { @@ -2054,6 +2047,8 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, slot++; slot %= DWC3_TRB_NUM; trb = &dep->trb_pool[slot]; + count += trb->size & DWC3_TRB_SIZE_MASK; + ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, event, status); @@ -2061,6 +2056,14 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, break; } while (++i < req->request.num_mapped_sgs); + /* + * We assume here we will always receive the entire data block + * which we should receive. Meaning, if we program RX to + * receive 4K but we receive only 2K, we assume that's all we + * should receive and we simply bounce the request back to the + * gadget driver for further processing. + */ + req->request.actual += req->request.length - count; dwc3_gadget_giveback(dep, req, status); if (ret) -- cgit v0.10.2 From e5b36ae2f851024d43c76e51f395d32ce8d769ce Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Wed, 10 Aug 2016 11:13:26 +0300 Subject: usb: dwc3: gadget: fix for short pkts during chained xfers DWC3 has one interesting peculiarity with chained transfers. If we setup N chained transfers and we get a short packet before processing all N TRBs, DWC3 will (conditionally) issue a XferComplete or XferInProgress event and retire all TRBs from the one which got a short packet to the last without clearing their HWO bits. This means SW must clear HWO bit manually, which this patch is doing. Cc: Cc: Brian E Rogers Signed-off-by: Felipe Balbi diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 863c306..241f5c7 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1955,7 +1955,8 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, struct dwc3_request *req, struct dwc3_trb *trb, - const struct dwc3_event_depevt *event, int status) + const struct dwc3_event_depevt *event, int status, + int chain) { unsigned int count; unsigned int s_pkt = 0; @@ -1964,6 +1965,19 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, dep->queued_requests--; trace_dwc3_complete_trb(dep, trb); + /* + * If we're in the middle of series of chained TRBs and we + * receive a short transfer along the way, DWC3 will skip + * through all TRBs including the last TRB in the chain (the + * where CHN bit is zero. DWC3 will also avoid clearing HWO + * bit and SW has to do it manually. + * + * We're going to do that here to avoid problems of HW trying + * to use bogus TRBs for transfers. + */ + if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO)) + trb->ctrl &= ~DWC3_TRB_CTRL_HWO; + if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) /* * We continue despite the error. There is not much we @@ -1975,6 +1989,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, */ dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", dep->name, trb); + count = trb->size & DWC3_TRB_SIZE_MASK; if (dep->direction) { @@ -2036,10 +2051,13 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, int ret; do { + int chain; + req = next_request(&dep->started_list); if (WARN_ON_ONCE(!req)) return 1; + chain = req->request.num_mapped_sgs > 0; i = 0; do { slot = req->first_trb_index + i; @@ -2049,9 +2067,8 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, trb = &dep->trb_pool[slot]; count += trb->size & DWC3_TRB_SIZE_MASK; - ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, - event, status); + event, status, chain); if (ret) break; } while (++i < req->request.num_mapped_sgs); -- cgit v0.10.2 From 7c705dfe2ebe731c8fd068623b6b4df2d3512c08 Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Wed, 10 Aug 2016 12:35:30 +0300 Subject: usb: dwc3: gadget: always cleanup all TRBs If we stop earlier due to short packet, we will not be able to giveback all TRBs. Cc: Cc: Brian E Rogers Signed-off-by: Felipe Balbi diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 241f5c7..eb820e4 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2028,7 +2028,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, s_pkt = 1; } - if (s_pkt) + if (s_pkt && !chain) return 1; if ((event->status & DEPEVT_STATUS_LST) && (trb->ctrl & (DWC3_TRB_CTRL_LST | -- cgit v0.10.2 From 4491ed5042f0419b22a4b08331adb54af31e2caa Mon Sep 17 00:00:00 2001 From: Heikki Krogerus Date: Fri, 1 Apr 2016 17:13:11 +0300 Subject: usb: dwc3: pci: add Intel Kabylake PCI ID Intel Kabylake PCH has the same DWC3 than Intel Sunrisepoint. Add the new ID to the supported devices. Cc: Signed-off-by: Heikki Krogerus Signed-off-by: Felipe Balbi diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 45f5a23..2eb84d6 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -37,6 +37,7 @@ #define PCI_DEVICE_ID_INTEL_BXT 0x0aaa #define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa #define PCI_DEVICE_ID_INTEL_APL 0x5aaa +#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; @@ -227,6 +228,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, { } /* Terminating Entry */ }; -- cgit v0.10.2 From a23eadfae2fd45536a355b785d5a1533e1955c22 Mon Sep 17 00:00:00 2001 From: Tommaso Cucinotta Date: Tue, 19 Jul 2016 11:44:50 +0200 Subject: sched/deadline: Fix wrap-around in DL heap Current code in cpudeadline.c has a bug in re-heapifying when adding a new element at the end of the heap, because a deadline value of 0 is temporarily set in the new elem, then cpudl_change_key() is called with the actual elem deadline as param. However, the function compares the new deadline to set with the one previously in the elem, which is 0. So, if current absolute deadlines grew so much to have negative values as s64, the comparison in cpudl_change_key() makes the wrong decision. Instead, as from dl_time_before(), the kernel should handle correctly abs deadlines wrap-arounds. This patch fixes the problem with a minimally invasive change that forces cpudl_change_key() to heapify up in this case. Signed-off-by: Tommaso Cucinotta Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Luca Abeni Cc: Juri Lelli Cc: Juri Lelli Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1468921493-10054-2-git-send-email-tommaso.cucinotta@sssup.it Signed-off-by: Ingo Molnar diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index 5be5882..d418449 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c @@ -168,7 +168,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) if (old_idx == IDX_INVALID) { cp->size++; - cp->elements[cp->size - 1].dl = 0; + cp->elements[cp->size - 1].dl = dl; cp->elements[cp->size - 1].cpu = cpu; cp->elements[cpu].idx = cp->size - 1; cpudl_change_key(cp, cp->size - 1, dl); -- cgit v0.10.2 From b8922125e4790fa237a8a4204562ecf457ef54bb Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Sat, 9 Jul 2016 15:54:22 +0800 Subject: sched/fair: Fix typo in sync_throttle() We should update cfs_rq->throttled_clock_task, not pcfs_rq->throttle_clock_task. The effects of this bug was probably occasionally erratic group scheduling, particularly in cgroups-intense workloads. Signed-off-by: Xunlei Pang [ Added changelog. ] Signed-off-by: Peter Zijlstra (Intel) Acked-by: Konstantin Khlebnikov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: 55e16d30bd99 ("sched/fair: Rework throttle_count sync") Link: http://lkml.kernel.org/r/1468050862-18864-1-git-send-email-xlpang@redhat.com Signed-off-by: Ingo Molnar diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 4088eed..039de34 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4269,7 +4269,7 @@ static void sync_throttle(struct task_group *tg, int cpu) pcfs_rq = tg->parent->cfs_rq[cpu]; cfs_rq->throttle_count = pcfs_rq->throttle_count; - pcfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); + cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); } /* conditionally throttle active cfs_rq's from put_prev_entity() */ -- cgit v0.10.2 From 6075620b0590eaf22f10ce88833eb20a57f760d6 Mon Sep 17 00:00:00 2001 From: Giovanni Gherdovich Date: Fri, 5 Aug 2016 10:21:56 +0200 Subject: sched/cputime: Mitigate performance regression in times()/clock_gettime() Commit: 6e998916dfe3 ("sched/cputime: Fix clock_nanosleep()/clock_gettime() inconsistency") fixed a problem whereby clock_nanosleep() followed by clock_gettime() could allow a task to wake early. It addressed the problem by calling the scheduling classes update_curr() when the cputimer starts. Said change induced a considerable performance regression on the syscalls times() and clock_gettimes(CLOCK_PROCESS_CPUTIME_ID). There are some debuggers and applications that monitor their own performance that accidentally depend on the performance of these specific calls. This patch mitigates the performace loss by prefetching data in the CPU cache, as stalls due to cache misses appear to be where most time is spent in our benchmarks. Here are the performance gain of this patch over v4.7-rc7 on a Sandy Bridge box with 32 logical cores and 2 NUMA nodes. The test is repeated with a variable number of threads, from 2 to 4*num_cpus; the results are in seconds and correspond to the average of 10 runs; the percentage gain is computed with (before-after)/before so a positive value is an improvement (it's faster). The improvement varies between a few percents for 5-20 threads and more than 10% for 2 or >20 threads. pound_clock_gettime: threads 4.7-rc7 patched 4.7-rc7 [num] [secs] [secs (percent)] 2 3.48 3.06 ( 11.83%) 5 3.33 3.25 ( 2.40%) 8 3.37 3.26 ( 3.30%) 12 3.32 3.37 ( -1.60%) 21 4.01 3.90 ( 2.74%) 30 3.63 3.36 ( 7.41%) 48 3.71 3.11 ( 16.27%) 79 3.75 3.16 ( 15.74%) 110 3.81 3.25 ( 14.80%) 128 3.88 3.31 ( 14.76%) pound_times: threads 4.7-rc7 patched 4.7-rc7 [num] [secs] [secs (percent)] 2 3.65 3.25 ( 11.03%) 5 3.45 3.17 ( 7.92%) 8 3.52 3.22 ( 8.69%) 12 3.29 3.36 ( -2.04%) 21 4.07 3.92 ( 3.78%) 30 3.87 3.40 ( 12.17%) 48 3.79 3.16 ( 16.61%) 79 3.88 3.28 ( 15.42%) 110 3.90 3.38 ( 13.35%) 128 4.00 3.38 ( 15.45%) pound_clock_gettime and pound_clock_gettime are two benchmarks included in the MMTests framework. They launch a given number of threads which repeatedly call times() or clock_gettimes(). The results above can be reproduced with cloning MMTests from github.com and running the "poundtime" workload: $ git clone https://github.com/gormanm/mmtests.git $ cd mmtests $ cp configs/config-global-dhp__workload_poundtime config $ ./run-mmtests.sh --run-monitor $(uname -r) The above will run "poundtime" measuring the kernel currently running on the machine; Once a new kernel is installed and the machine rebooted, running again $ cd mmtests $ ./run-mmtests.sh --run-monitor $(uname -r) will produce results to compare with. A comparison table will be output with: $ cd mmtests/work/log $ ../../compare-kernels.sh the table will contain a lot of entries; grepping for "Amean" (as in "arithmetic mean") will give the tables presented above. The source code for the two benchmarks is reported at the end of this changelog for clairity. The cache misses addressed by this patch were found using a combination of `perf top`, `perf record` and `perf annotate`. The incriminated lines were found to be struct sched_entity *curr = cfs_rq->curr; and delta_exec = now - curr->exec_start; in the function update_curr() from kernel/sched/fair.c. This patch prefetches the data from memory just before update_curr is called in the interested execution path. A comparison of the total number of cycles before and after the patch follows; the data is obtained using `perf stat -r 10 -ddd ` running over the same sequence of number of threads used above (a positive gain is an improvement): threads cycles before cycles after gain 2 19,699,563,964 +-1.19% 17,358,917,517 +-1.85% 11.88% 5 47,401,089,566 +-2.96% 45,103,730,829 +-0.97% 4.85% 8 80,923,501,004 +-3.01% 71,419,385,977 +-0.77% 11.74% 12 112,326,485,473 +-0.47% 110,371,524,403 +-0.47% 1.74% 21 193,455,574,299 +-0.72% 180,120,667,904 +-0.36% 6.89% 30 315,073,519,013 +-1.64% 271,222,225,950 +-1.29% 13.92% 48 321,969,515,332 +-1.48% 273,353,977,321 +-1.16% 15.10% 79 337,866,003,422 +-0.97% 289,462,481,538 +-1.05% 14.33% 110 338,712,691,920 +-0.78% 290,574,233,170 +-0.77% 14.21% 128 348,384,794,006 +-0.50% 292,691,648,206 +-0.66% 15.99% A comparison of cache miss vs total cache loads ratios, before and after the patch (again from the `perf stat -r 10 -ddd ` tables): threads L1 misses/total*100 L1 misses/total*100 gain before after 2 7.43 +-4.90% 7.36 +-4.70% 0.94% 5 13.09 +-4.74% 13.52 +-3.73% -3.28% 8 13.79 +-5.61% 12.90 +-3.27% 6.45% 12 11.57 +-2.44% 8.71 +-1.40% 24.72% 21 12.39 +-3.92% 9.97 +-1.84% 19.53% 30 13.91 +-2.53% 11.73 +-2.28% 15.67% 48 13.71 +-1.59% 12.32 +-1.97% 10.14% 79 14.44 +-0.66% 13.40 +-1.06% 7.20% 110 15.86 +-0.50% 14.46 +-0.59% 8.83% 128 16.51 +-0.32% 15.06 +-0.78% 8.78% As a final note, the following shows the evolution of performance figures in the "poundtime" benchmark and pinpoints commit 6e998916dfe3 ("sched/cputime: Fix clock_nanosleep()/clock_gettime() inconsistency") as a major source of degradation, mostly unaddressed to this day (figures expressed in seconds). pound_clock_gettime: threads parent of 6e998916dfe3 4.7-rc7 6e998916dfe3 itself 2 2.23 3.68 ( -64.56%) 3.48 (-55.48%) 5 2.83 3.78 ( -33.42%) 3.33 (-17.43%) 8 2.84 4.31 ( -52.12%) 3.37 (-18.76%) 12 3.09 3.61 ( -16.74%) 3.32 ( -7.17%) 21 3.14 4.63 ( -47.36%) 4.01 (-27.71%) 30 3.28 5.75 ( -75.37%) 3.63 (-10.80%) 48 3.02 6.05 (-100.56%) 3.71 (-22.99%) 79 2.88 6.30 (-118.90%) 3.75 (-30.26%) 110 2.95 6.46 (-119.00%) 3.81 (-29.24%) 128 3.05 6.42 (-110.08%) 3.88 (-27.04%) pound_times: threads parent of 6e998916dfe3 4.7-rc7 6e998916dfe3 itself 2 2.27 3.73 ( -64.71%) 3.65 (-61.14%) 5 2.78 3.77 ( -35.56%) 3.45 (-23.98%) 8 2.79 4.41 ( -57.71%) 3.52 (-26.05%) 12 3.02 3.56 ( -17.94%) 3.29 ( -9.08%) 21 3.10 4.61 ( -48.74%) 4.07 (-31.34%) 30 3.33 5.75 ( -72.53%) 3.87 (-16.01%) 48 2.96 6.06 (-105.04%) 3.79 (-28.10%) 79 2.88 6.24 (-116.83%) 3.88 (-34.81%) 110 2.98 6.37 (-114.08%) 3.90 (-31.12%) 128 3.10 6.35 (-104.61%) 4.00 (-28.87%) The source code of the two benchmarks follows. To compile the two: NR_THREADS=42 for FILE in pound_times pound_clock_gettime; do gcc -lrt -O2 -lpthread -DNUM_THREADS=$NR_THREADS $FILE.c -o $FILE done ==== BEGIN pound_times.c ==== struct tms start; void *pound (void *threadid) { struct tms end; int oldutime = 0; int utime; int i; for (i = 0; i < 5000000 / NUM_THREADS; i++) { times(&end); utime = ((int)end.tms_utime - (int)start.tms_utime); if (oldutime > utime) { printf("utime decreased, was %d, now %d!\n", oldutime, utime); } oldutime = utime; } pthread_exit(NULL); } int main() { pthread_t th[NUM_THREADS]; long i; times(&start); for (i = 0; i < NUM_THREADS; i++) { pthread_create (&th[i], NULL, pound, (void *)i); } pthread_exit(NULL); return 0; } ==== END pound_times.c ==== ==== BEGIN pound_clock_gettime.c ==== void *pound (void *threadid) { struct timespec ts; int rc, i; unsigned long prev = 0, this = 0; for (i = 0; i < 5000000 / NUM_THREADS; i++) { rc = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts); if (rc < 0) perror("clock_gettime"); this = (ts.tv_sec * 1000000000) + ts.tv_nsec; if (0 && this < prev) printf("%lu ns timewarp at iteration %d\n", prev - this, i); prev = this; } pthread_exit(NULL); } int main() { pthread_t th[NUM_THREADS]; long rc, i; pid_t pgid; for (i = 0; i < NUM_THREADS; i++) { rc = pthread_create(&th[i], NULL, pound, (void *)i); if (rc < 0) perror("pthread_create"); } pthread_exit(NULL); return 0; } ==== END pound_clock_gettime.c ==== Suggested-by: Mike Galbraith Signed-off-by: Giovanni Gherdovich Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Stanislaw Gruszka Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1470385316-15027-2-git-send-email-ggherdovich@suse.cz Signed-off-by: Ingo Molnar diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5c883fe..2a906f2 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -74,6 +74,7 @@ #include #include #include +#include #include #include @@ -2972,6 +2973,23 @@ EXPORT_PER_CPU_SYMBOL(kstat); EXPORT_PER_CPU_SYMBOL(kernel_cpustat); /* + * The function fair_sched_class.update_curr accesses the struct curr + * and its field curr->exec_start; when called from task_sched_runtime(), + * we observe a high rate of cache misses in practice. + * Prefetching this data results in improved performance. + */ +static inline void prefetch_curr_exec_start(struct task_struct *p) +{ +#ifdef CONFIG_FAIR_GROUP_SCHED + struct sched_entity *curr = (&p->se)->cfs_rq->curr; +#else + struct sched_entity *curr = (&task_rq(p)->cfs)->curr; +#endif + prefetch(curr); + prefetch(&curr->exec_start); +} + +/* * Return accounted runtime for the task. * In case the task is currently running, return the runtime plus current's * pending runtime that have not been accounted yet. @@ -3005,6 +3023,7 @@ unsigned long long task_sched_runtime(struct task_struct *p) * thread, breaking clock_gettime(). */ if (task_current(rq, p) && task_on_rq_queued(p)) { + prefetch_curr_exec_start(p); update_rq_clock(rq); p->sched_class->update_curr(rq); } -- cgit v0.10.2 From c0c8c9fa210c9a042060435f17e40ba4a76d6d6f Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Thu, 4 Aug 2016 09:42:20 +0800 Subject: sched/deadline: Fix lock pinning warning during CPU hotplug The following warning can be triggered by hot-unplugging the CPU on which an active SCHED_DEADLINE task is running on: WARNING: CPU: 0 PID: 0 at kernel/locking/lockdep.c:3531 lock_release+0x690/0x6a0 releasing a pinned lock Call Trace: dump_stack+0x99/0xd0 __warn+0xd1/0xf0 ? dl_task_timer+0x1a1/0x2b0 warn_slowpath_fmt+0x4f/0x60 ? sched_clock+0x13/0x20 lock_release+0x690/0x6a0 ? enqueue_pushable_dl_task+0x9b/0xa0 ? enqueue_task_dl+0x1ca/0x480 _raw_spin_unlock+0x1f/0x40 dl_task_timer+0x1a1/0x2b0 ? push_dl_task.part.31+0x190/0x190 WARNING: CPU: 0 PID: 0 at kernel/locking/lockdep.c:3649 lock_unpin_lock+0x181/0x1a0 unpinning an unpinned lock Call Trace: dump_stack+0x99/0xd0 __warn+0xd1/0xf0 warn_slowpath_fmt+0x4f/0x60 lock_unpin_lock+0x181/0x1a0 dl_task_timer+0x127/0x2b0 ? push_dl_task.part.31+0x190/0x190 As per the comment before this code, its safe to drop the RQ lock here, and since we (potentially) change rq, unpin and repin to avoid the splat. Signed-off-by: Wanpeng Li [ Rewrote changelog. ] Signed-off-by: Peter Zijlstra (Intel) Cc: Juri Lelli Cc: Linus Torvalds Cc: Luca Abeni Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1470274940-17976-1-git-send-email-wanpeng.li@hotmail.com Signed-off-by: Ingo Molnar diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index fcb7f02..1ce8867 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -658,8 +658,11 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) * * XXX figure out if select_task_rq_dl() deals with offline cpus. */ - if (unlikely(!rq->online)) + if (unlikely(!rq->online)) { + lockdep_unpin_lock(&rq->lock, rf.cookie); rq = dl_task_offline_migration(rq, p); + rf.cookie = lockdep_pin_lock(&rq->lock); + } /* * Queueing this task back might have overloaded rq, check if we need -- cgit v0.10.2 From 2db34e8bf9a22f4e38b29deccee57457bc0e7d74 Mon Sep 17 00:00:00 2001 From: pan xinhui Date: Mon, 18 Jul 2016 17:47:39 +0800 Subject: locking/qrwlock: Fix write unlock bug on big endian systems This patch aims to get rid of endianness in queued_write_unlock(). We want to set __qrwlock->wmode to NULL, however the address is not &lock->cnts in big endian machine. That causes queued_write_unlock() write NULL to the wrong field of __qrwlock. So implement __qrwlock_write_byte() which returns the correct __qrwlock->wmode address. Suggested-by: Peter Zijlstra (Intel) Signed-off-by: Pan Xinhui Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman.Long@hpe.com Cc: arnd@arndb.de Cc: boqun.feng@gmail.com Cc: will.deacon@arm.com Link: http://lkml.kernel.org/r/1468835259-4486-1-git-send-email-xinhui.pan@linux.vnet.ibm.com Signed-off-by: Ingo Molnar diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h index 54a8e65..7d026bf 100644 --- a/include/asm-generic/qrwlock.h +++ b/include/asm-generic/qrwlock.h @@ -25,7 +25,20 @@ #include /* - * Writer states & reader shift and bias + * Writer states & reader shift and bias. + * + * | +0 | +1 | +2 | +3 | + * ----+----+----+----+----+ + * LE | 78 | 56 | 34 | 12 | 0x12345678 + * ----+----+----+----+----+ + * | wr | rd | + * +----+----+----+----+ + * + * ----+----+----+----+----+ + * BE | 12 | 34 | 56 | 78 | 0x12345678 + * ----+----+----+----+----+ + * | rd | wr | + * +----+----+----+----+ */ #define _QW_WAITING 1 /* A writer is waiting */ #define _QW_LOCKED 0xff /* A writer holds the lock */ @@ -134,12 +147,22 @@ static inline void queued_read_unlock(struct qrwlock *lock) } /** + * __qrwlock_write_byte - retrieve the write byte address of a queue rwlock + * @lock : Pointer to queue rwlock structure + * Return: the write byte address of a queue rwlock + */ +static inline u8 *__qrwlock_write_byte(struct qrwlock *lock) +{ + return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG_ENDIAN); +} + +/** * queued_write_unlock - release write lock of a queue rwlock * @lock : Pointer to queue rwlock structure */ static inline void queued_write_unlock(struct qrwlock *lock) { - smp_store_release((u8 *)&lock->cnts, 0); + smp_store_release(__qrwlock_write_byte(lock), 0); } /* -- cgit v0.10.2 From 229ce631574761870a2ac938845fadbd07f35caa Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Thu, 14 Jul 2016 16:15:56 +0800 Subject: locking/pvqspinlock: Fix double hash race When the lock holder vCPU is racing with the queue head: CPU 0 (lock holder) CPU1 (queue head) =================== ================= spin_lock(); spin_lock(); pv_kick_node(): pv_wait_head_or_lock(): if (!lp) { lp = pv_hash(lock, pn); xchg(&l->locked, _Q_SLOW_VAL); } WRITE_ONCE(pn->state, vcpu_halted); cmpxchg(&pn->state, vcpu_halted, vcpu_hashed); WRITE_ONCE(l->locked, _Q_SLOW_VAL); (void)pv_hash(lock, pn); In this case, lock holder inserts the pv_node of queue head into the hash table and set _Q_SLOW_VAL unnecessary. This patch avoids it by restoring/setting vcpu_hashed state after failing adaptive locking spinning. Signed-off-by: Wanpeng Li Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Pan Xinhui Cc: Andrew Morton Cc: Davidlohr Bueso Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman Long Link: http://lkml.kernel.org/r/1468484156-4521-1-git-send-email-wanpeng.li@hotmail.com Signed-off-by: Ingo Molnar diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index 37649e6..8a99abf 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -450,7 +450,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) goto gotlock; } } - WRITE_ONCE(pn->state, vcpu_halted); + WRITE_ONCE(pn->state, vcpu_hashed); qstat_inc(qstat_pv_wait_head, true); qstat_inc(qstat_pv_wait_again, waitcnt); pv_wait(&l->locked, _Q_SLOW_VAL); -- cgit v0.10.2 From c2ace36b884de9330c4149064ae8d212d2e0d9ee Mon Sep 17 00:00:00 2001 From: Pan Xinhui Date: Wed, 13 Jul 2016 18:23:34 +0800 Subject: locking/pvqspinlock: Fix a bug in qstat_read() It's obviously wrong to set stat to NULL. So lets remove it. Otherwise it is always zero when we check the latency of kick/wake. Signed-off-by: Pan Xinhui Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Waiman Long Cc: Andrew Morton Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1468405414-3700-1-git-send-email-xinhui.pan@linux.vnet.ibm.com Signed-off-by: Ingo Molnar diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h index 22e0253..b9d0315 100644 --- a/kernel/locking/qspinlock_stat.h +++ b/kernel/locking/qspinlock_stat.h @@ -153,7 +153,6 @@ static ssize_t qstat_read(struct file *file, char __user *user_buf, */ if ((counter == qstat_pv_latency_kick) || (counter == qstat_pv_latency_wake)) { - stat = 0; if (kicks) stat = DIV_ROUND_CLOSEST_ULL(stat, kicks); } -- cgit v0.10.2 From 469f00231278da68062a809306df0bac95a27507 Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Fri, 15 Jul 2016 11:42:43 +0200 Subject: x86, kasan, ftrace: Put APIC interrupt handlers into .irqentry.text Dmitry Vyukov has reported unexpected KASAN stackdepot growth: https://github.com/google/kasan/issues/36 ... which is caused by the APIC handlers not being present in .irqentry.text: When building with CONFIG_FUNCTION_GRAPH_TRACER=y or CONFIG_KASAN=y, put the APIC interrupt handlers into the .irqentry.text section. This is needed because both KASAN and function graph tracer use __irqentry_text_start and __irqentry_text_end to determine whether a function is an IRQ entry point. Reported-by: Dmitry Vyukov Signed-off-by: Alexander Potapenko Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: aryabinin@virtuozzo.com Cc: kasan-dev@googlegroups.com Cc: kcc@google.com Cc: rostedt@goodmis.org Link: http://lkml.kernel.org/r/1468575763-144889-1-git-send-email-glider@google.com [ Minor edits. ] Signed-off-by: Ingo Molnar diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index b846875..9f85827 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -601,9 +601,20 @@ apicinterrupt3 \num trace(\sym) smp_trace(\sym) .endm #endif +/* Make sure APIC interrupt handlers end up in the irqentry section: */ +#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN) +# define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax" +# define POP_SECTION_IRQENTRY .popsection +#else +# define PUSH_SECTION_IRQENTRY +# define POP_SECTION_IRQENTRY +#endif + .macro apicinterrupt num sym do_sym +PUSH_SECTION_IRQENTRY apicinterrupt3 \num \sym \do_sym trace_apicinterrupt \num \sym +POP_SECTION_IRQENTRY .endm #ifdef CONFIG_SMP -- cgit v0.10.2 From 22cc1ca3c5469cf17e149be232817b9223afa5e4 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 9 Aug 2016 21:54:53 +0200 Subject: x86/hpet: Fix /dev/rtc breakage caused by RTC cleanup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ville Syrjälä reports "The first time I run hwclock after rebooting I get this: open("/dev/rtc", O_RDONLY) = 3 ioctl(3, PHN_SET_REGS or RTC_UIE_ON, 0) = 0 select(4, [3], NULL, NULL, {10, 0}) = 0 (Timeout) ioctl(3, PHN_NOT_OH or RTC_UIE_OFF, 0) = 0 close(3) = 0 On all subsequent runs I get this: open("/dev/rtc", O_RDONLY) = 3 ioctl(3, PHN_SET_REGS or RTC_UIE_ON, 0) = -1 EINVAL (Invalid argument) ioctl(3, RTC_RD_TIME, 0x7ffd76b3ae70) = -1 EINVAL (Invalid argument) close(3) = 0" This was caused by a stupid typo in a patch that should have been a simple rename to move around contents of a header file, but accidentally wrote zeroes into the rtc rather than reading from it: 463a86304cae ("char/genrtc: x86: remove remnants of asm/rtc.h") Reported-by: Ville Syrjälä Tested-by: Jarkko Nikula Tested-by: Ville Syrjälä Signed-off-by: Arnd Bergmann Cc: Alessandro Zummo Cc: Alexandre Belloni Cc: Borislav Petkov Cc: Geert Uytterhoeven Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: rtc-linux@googlegroups.com Fixes: 463a86304cae ("char/genrtc: x86: remove remnants of asm/rtc.h") Link: http://lkml.kernel.org/r/20160809195528.1604312-1-arnd@arndb.de Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index ed16e58..c6dfd80 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -1242,7 +1242,7 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) memset(&curr_time, 0, sizeof(struct rtc_time)); if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) - mc146818_set_time(&curr_time); + mc146818_get_time(&curr_time); if (hpet_rtc_flags & RTC_UIE && curr_time.tm_sec != hpet_prev_update_sec) { -- cgit v0.10.2 From c7d2361f7524f365c1ae42f47880e3fa9efb2c2a Mon Sep 17 00:00:00 2001 From: Thomas Garnier Date: Tue, 9 Aug 2016 10:11:04 -0700 Subject: x86/mm/KASLR: Fix physical memory calculation on KASLR memory randomization Initialize KASLR memory randomization after max_pfn is initialized. Also ensure the size is rounded up. It could create problems on machines with more than 1Tb of memory on certain random addresses. Signed-off-by: Thomas Garnier Cc: Aleksey Makarov Cc: Andrew Morton Cc: Andy Lutomirski Cc: Baoquan He Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Christian Borntraeger Cc: Dan Williams Cc: Dave Hansen Cc: Dave Young Cc: Denys Vlasenko Cc: Fabian Frederick Cc: H. Peter Anvin Cc: Joerg Roedel Cc: Josh Poimboeuf Cc: Kees Cook Cc: Linus Torvalds Cc: Lv Zheng Cc: Mark Salter Cc: Peter Zijlstra Cc: Rafael J . Wysocki Cc: Thomas Gleixner Cc: Toshi Kani Cc: kernel-hardening@lists.openwall.com Fixes: 021182e52fe0 ("Enable KASLR for physical mapping memory regions") Link: http://lkml.kernel.org/r/1470762665-88032-1-git-send-email-thgarnie@google.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 991b779..95cf31c 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -936,8 +936,6 @@ void __init setup_arch(char **cmdline_p) x86_init.oem.arch_setup(); - kernel_randomize_memory(); - iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; setup_memory_map(); parse_setup_data(); @@ -1055,6 +1053,12 @@ void __init setup_arch(char **cmdline_p) max_possible_pfn = max_pfn; + /* + * Define random base addresses for memory sections after max_pfn is + * defined and before each memory section base is used. + */ + kernel_randomize_memory(); + #ifdef CONFIG_X86_32 /* max_low_pfn get updated here */ find_low_pfn_range(); diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index 26dccd6..ec8654f 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -97,7 +97,7 @@ void __init kernel_randomize_memory(void) * add padding if needed (especially for memory hotplug support). */ BUG_ON(kaslr_regions[0].base != &page_offset_base); - memory_tb = ((max_pfn << PAGE_SHIFT) >> TB_SHIFT) + + memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) + CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING; /* Adapt phyiscal memory region size based on available memory */ -- cgit v0.10.2 From fb754f958f8e46202c1efd7f66d5b3db1208117d Mon Sep 17 00:00:00 2001 From: Thomas Garnier Date: Tue, 9 Aug 2016 10:11:05 -0700 Subject: x86/mm/KASLR: Increase BRK pages for KASLR memory randomization Default implementation expects 6 pages maximum are needed for low page allocations. If KASLR memory randomization is enabled, the worse case of e820 layout would require 12 pages (no large pages). It is due to the PUD level randomization and the variable e820 memory layout. This bug was found while doing extensive testing of KASLR memory randomization on different type of hardware. Signed-off-by: Thomas Garnier Cc: Aleksey Makarov Cc: Andrew Morton Cc: Andy Lutomirski Cc: Baoquan He Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Christian Borntraeger Cc: Dan Williams Cc: Dave Hansen Cc: Dave Young Cc: Denys Vlasenko Cc: Fabian Frederick Cc: H. Peter Anvin Cc: Joerg Roedel Cc: Josh Poimboeuf Cc: Kees Cook Cc: Linus Torvalds Cc: Lv Zheng Cc: Mark Salter Cc: Peter Zijlstra Cc: Rafael J . Wysocki Cc: Thomas Gleixner Cc: Toshi Kani Cc: kernel-hardening@lists.openwall.com Fixes: 021182e52fe0 ("Enable KASLR for physical mapping memory regions") Link: http://lkml.kernel.org/r/1470762665-88032-2-git-send-email-thgarnie@google.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 6209289..d28a2d7 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -122,8 +122,18 @@ __ref void *alloc_low_pages(unsigned int num) return __va(pfn << PAGE_SHIFT); } -/* need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS */ -#define INIT_PGT_BUF_SIZE (6 * PAGE_SIZE) +/* + * By default need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS. + * With KASLR memory randomization, depending on the machine e820 memory + * and the PUD alignment. We may need twice more pages when KASLR memory + * randomization is enabled. + */ +#ifndef CONFIG_RANDOMIZE_MEMORY +#define INIT_PGD_PAGE_COUNT 6 +#else +#define INIT_PGD_PAGE_COUNT 12 +#endif +#define INIT_PGT_BUF_SIZE (INIT_PGD_PAGE_COUNT * PAGE_SIZE) RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE); void __init early_alloc_pgt_buf(void) { -- cgit v0.10.2 From 164af597ce945751e2dcd53d0a86e84203a6d117 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 9 Aug 2016 22:43:46 +1000 Subject: powerpc/Makefile: Use cflags-y/aflags-y for setting endian options When we introduced the little endian support, we added the endian flags to CC directly using override. I don't know the history of why we did that, I suspect no one does. Although this mostly works, it has one bug, which is that CROSS32CC doesn't get -mbig-endian. That means when the compiler is little endian by default and the user is building big endian, vdso32 is incorrectly compiled as little endian and the kernel fails to build. Instead we can add the endian flags to cflags-y/aflags-y, and then append those to KBUILD_CFLAGS/KBUILD_AFLAGS. This has the advantage of being 1) less ugly, 2) the documented way of adding flags in the arch Makefile and 3) it fixes building vdso32 with a LE toolchain. Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index ca25454..1934707 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -66,29 +66,28 @@ endif UTS_MACHINE := $(OLDARCH) ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) -override CC += -mlittle-endian -ifneq ($(cc-name),clang) -override CC += -mno-strict-align -endif -override AS += -mlittle-endian override LD += -EL -override CROSS32CC += -mlittle-endian override CROSS32AS += -mlittle-endian LDEMULATION := lppc GNUTARGET := powerpcle MULTIPLEWORD := -mno-multiple KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-save-toc-indirect) else -ifeq ($(call cc-option-yn,-mbig-endian),y) -override CC += -mbig-endian -override AS += -mbig-endian -endif override LD += -EB LDEMULATION := ppc GNUTARGET := powerpc MULTIPLEWORD := -mmultiple endif +cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian) +cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian +ifneq ($(cc-name),clang) + cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align +endif + +aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian) +aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian + ifeq ($(HAS_BIARCH),y) override AS += -a$(CONFIG_WORD_SIZE) override LD += -m elf$(CONFIG_WORD_SIZE)$(LDEMULATION) @@ -232,6 +231,9 @@ cpu-as-$(CONFIG_E200) += -Wa,-me200 KBUILD_AFLAGS += $(cpu-as-y) KBUILD_CFLAGS += $(cpu-as-y) +KBUILD_AFLAGS += $(aflags-y) +KBUILD_CFLAGS += $(cflags-y) + head-y := arch/powerpc/kernel/head_$(CONFIG_WORD_SIZE).o head-$(CONFIG_8xx) := arch/powerpc/kernel/head_8xx.o head-$(CONFIG_40x) := arch/powerpc/kernel/head_40x.o -- cgit v0.10.2 From b9a4a0d02c5b8d9a1397c11d741d2a1a56381178 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 9 Aug 2016 22:17:29 +1000 Subject: powerpc/vdso: Fix build rules to rebuild vdsos correctly When using if_changed, we need to add FORCE as a dependency (see Documentation/kbuild/makefiles.txt) otherwise we don't get command line change checking amongst other things. This has resulted in vdsos not being rebuilt when switching between big and little endian. The vdso64/32ld commands have to be changed around to avoid pulling FORCE into the linker command line (code copied from x86). Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile index cbabd14..78a7449 100644 --- a/arch/powerpc/kernel/vdso32/Makefile +++ b/arch/powerpc/kernel/vdso32/Makefile @@ -30,7 +30,7 @@ CPPFLAGS_vdso32.lds += -P -C -Upowerpc $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so # link rule for the .so file, .lds has to be first -$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) +$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE $(call if_changed,vdso32ld) # strip rule for the .so file @@ -39,12 +39,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE $(call if_changed,objcopy) # assembly rules for the .S files -$(obj-vdso32): %.o: %.S +$(obj-vdso32): %.o: %.S FORCE $(call if_changed_dep,vdso32as) # actual build commands quiet_cmd_vdso32ld = VDSO32L $@ - cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $^ -o $@ + cmd_vdso32ld = $(CROSS32CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) quiet_cmd_vdso32as = VDSO32A $@ cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $< diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile index c710802..366ae09 100644 --- a/arch/powerpc/kernel/vdso64/Makefile +++ b/arch/powerpc/kernel/vdso64/Makefile @@ -23,7 +23,7 @@ CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so # link rule for the .so file, .lds has to be first -$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) +$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE $(call if_changed,vdso64ld) # strip rule for the .so file @@ -32,12 +32,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE $(call if_changed,objcopy) # assembly rules for the .S files -$(obj-vdso64): %.o: %.S +$(obj-vdso64): %.o: %.S FORCE $(call if_changed_dep,vdso64as) # actual build commands quiet_cmd_vdso64ld = VDSO64L $@ - cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ + cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) quiet_cmd_vdso64as = VDSO64A $@ cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< -- cgit v0.10.2 From ca49e64f0cb1368fc666a53b16b45d4505763d9c Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 29 Jul 2016 20:48:09 +1000 Subject: selftests/powerpc: Specify we expect to build with std=gnu99 We have some tests that assume we're using std=gnu99, which is fine on most compilers, but some old compilers use a different default. So make it explicit that we want to use std=gnu99. Signed-off-by: Michael Ellerman diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile index 3c40c9d..1cc6d64 100644 --- a/tools/testing/selftests/powerpc/Makefile +++ b/tools/testing/selftests/powerpc/Makefile @@ -8,7 +8,7 @@ ifeq ($(ARCH),powerpc) GIT_VERSION = $(shell git describe --always --long --dirty || echo "unknown") -CFLAGS := -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $(CFLAGS) +CFLAGS := -std=gnu99 -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $(CFLAGS) export CFLAGS -- cgit v0.10.2 From 5cf0791da5c162ebc14b01eb01631cfa7ed4fa6e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 5 Aug 2016 15:37:39 +0200 Subject: x86/mm: Disable preemption during CR3 read+write MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There's a subtle preemption race on UP kernels: Usually current->mm (and therefore mm->pgd) stays the same during the lifetime of a task so it does not matter if a task gets preempted during the read and write of the CR3. But then, there is this scenario on x86-UP: TaskA is in do_exit() and exit_mm() sets current->mm = NULL followed by: -> mmput() -> exit_mmap() -> tlb_finish_mmu() -> tlb_flush_mmu() -> tlb_flush_mmu_tlbonly() -> tlb_flush() -> flush_tlb_mm_range() -> __flush_tlb_up() -> __flush_tlb() -> __native_flush_tlb() At this point current->mm is NULL but current->active_mm still points to the "old" mm. Let's preempt taskA _after_ native_read_cr3() by taskB. TaskB has its own mm so CR3 has changed. Now preempt back to taskA. TaskA has no ->mm set so it borrows taskB's mm and so CR3 remains unchanged. Once taskA gets active it continues where it was interrupted and that means it writes its old CR3 value back. Everything is fine because userland won't need its memory anymore. Now the fun part: Let's preempt taskA one more time and get back to taskB. This time switch_mm() won't do a thing because oldmm (->active_mm) is the same as mm (as per context_switch()). So we remain with a bad CR3 / PGD and return to userland. The next thing that happens is handle_mm_fault() with an address for the execution of its code in userland. handle_mm_fault() realizes that it has a PTE with proper rights so it returns doing nothing. But the CPU looks at the wrong PGD and insists that something is wrong and faults again. And again. And one more time… This pagefault circle continues until the scheduler gets tired of it and puts another task on the CPU. It gets little difficult if the task is a RT task with a high priority. The system will either freeze or it gets fixed by the software watchdog thread which usually runs at RT-max prio. But waiting for the watchdog will increase the latency of the RT task which is no good. Fix this by disabling preemption across the critical code section. Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Acked-by: Rik van Riel Acked-by: Andy Lutomirski Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/1470404259-26290-1-git-send-email-bigeasy@linutronix.de [ Prettified the changelog. ] Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 4e5be94..6fa8594 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -135,7 +135,14 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask) static inline void __native_flush_tlb(void) { + /* + * If current->mm == NULL then we borrow a mm which may change during a + * task switch and therefore we must not be preempted while we write CR3 + * back: + */ + preempt_disable(); native_write_cr3(native_read_cr3()); + preempt_enable(); } static inline void __native_flush_tlb_global_irq_disabled(void) -- cgit v0.10.2 From 5b236d0fde21d88351420ef0b9a6cb7aeeea0c54 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 26 Jul 2016 14:51:58 +0000 Subject: pinctrl: meson: Drop pinctrl_unregister for devm_ registered device It's not necessary to unregister pin controller device registered with devm_pinctrl_register() and using pinctrl_unregister() leads to a double free. This is detected by Coccinelle semantic patch. Fixes: e649f7ec8c5f ("pinctrl: meson: Use devm_pinctrl_register() for pinctrl registration") Signed-off-by: Wei Yongjun Reviewed-by: Dmitry Torokhov Acked-by: Kevin Hilman Signed-off-by: Linus Walleij diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index 11623c6..44e69c9 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c @@ -727,13 +727,7 @@ static int meson_pinctrl_probe(struct platform_device *pdev) return PTR_ERR(pc->pcdev); } - ret = meson_gpiolib_register(pc); - if (ret) { - pinctrl_unregister(pc->pcdev); - return ret; - } - - return 0; + return meson_gpiolib_register(pc); } static struct platform_driver meson_pinctrl_driver = { -- cgit v0.10.2 From b120a3c286520ca465c54e8afa442be10560053b Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 26 Jul 2016 14:52:57 +0000 Subject: pinctrl: pistachio: Drop pinctrl_unregister for devm_ registered device It's not necessary to unregister pin controller device registered with devm_pinctrl_register() and using pinctrl_unregister() leads to a double free. This is detected by Coccinelle semantic patch. Signed-off-by: Wei Yongjun Signed-off-by: Linus Walleij diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c index c6d410e..7bad200 100644 --- a/drivers/pinctrl/pinctrl-pistachio.c +++ b/drivers/pinctrl/pinctrl-pistachio.c @@ -1432,7 +1432,6 @@ static int pistachio_pinctrl_probe(struct platform_device *pdev) { struct pistachio_pinctrl *pctl; struct resource *res; - int ret; pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL); if (!pctl) @@ -1464,13 +1463,7 @@ static int pistachio_pinctrl_probe(struct platform_device *pdev) return PTR_ERR(pctl->pctldev); } - ret = pistachio_gpio_register(pctl); - if (ret < 0) { - pinctrl_unregister(pctl->pctldev); - return ret; - } - - return 0; + return pistachio_gpio_register(pctl); } static struct platform_driver pistachio_pinctrl_driver = { -- cgit v0.10.2 From 8cf4345575a416e6856a6856ac6eaa31ad883126 Mon Sep 17 00:00:00 2001 From: "Agrawal, Nitesh-kumar" Date: Tue, 26 Jul 2016 08:28:19 +0000 Subject: pinctrl/amd: Remove the default de-bounce time MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the function amd_gpio_irq_enable() and amd_gpio_direction_input(), remove the code which is setting the default de-bounce time to 2.75ms. The driver code shall use the same settings as specified in BIOS. Any default assignment impacts TouchPad behaviour when the LevelTrig is set to EDGE FALLING. Cc: stable@vger.kernel.org Reviewed-by:  Ken Xue Signed-off-by: Nitesh Kumar Agrawal Signed-off-by: Linus Walleij diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 634b4d3..b3e7723 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -43,17 +43,6 @@ static int amd_gpio_direction_input(struct gpio_chip *gc, unsigned offset) spin_lock_irqsave(&gpio_dev->lock, flags); pin_reg = readl(gpio_dev->base + offset * 4); - /* - * Suppose BIOS or Bootloader sets specific debounce for the - * GPIO. if not, set debounce to be 2.75ms and remove glitch. - */ - if ((pin_reg & DB_TMR_OUT_MASK) == 0) { - pin_reg |= 0xf; - pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF); - pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF; - pin_reg &= ~BIT(DB_TMR_LARGE_OFF); - } - pin_reg &= ~BIT(OUTPUT_ENABLE_OFF); writel(pin_reg, gpio_dev->base + offset * 4); spin_unlock_irqrestore(&gpio_dev->lock, flags); @@ -326,15 +315,6 @@ static void amd_gpio_irq_enable(struct irq_data *d) spin_lock_irqsave(&gpio_dev->lock, flags); pin_reg = readl(gpio_dev->base + (d->hwirq)*4); - /* - Suppose BIOS or Bootloader sets specific debounce for the - GPIO. if not, set debounce to be 2.75ms. - */ - if ((pin_reg & DB_TMR_OUT_MASK) == 0) { - pin_reg |= 0xf; - pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF); - pin_reg &= ~BIT(DB_TMR_LARGE_OFF); - } pin_reg |= BIT(INTERRUPT_ENABLE_OFF); pin_reg |= BIT(INTERRUPT_MASK_OFF); writel(pin_reg, gpio_dev->base + (d->hwirq)*4); -- cgit v0.10.2 From e95d0dfb229fffe96dc4c29054f6c7a7302e111e Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 2 Aug 2016 18:18:33 +0300 Subject: pinctrl: intel: merrifield: Add missed header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On x86 builds the absense of makes static analyzer and compiler unhappy which fails to build the driver. CHECK drivers/pinctrl/intel/pinctrl-merrifield.c drivers/pinctrl/intel/pinctrl-merrifield.c:518:17: error: undefined identifier 'readl' drivers/pinctrl/intel/pinctrl-merrifield.c:570:17: error: undefined identifier 'readl' drivers/pinctrl/intel/pinctrl-merrifield.c:575:9: error: undefined identifier 'writel' drivers/pinctrl/intel/pinctrl-merrifield.c:645:17: error: undefined identifier 'readl' CC drivers/pinctrl/intel/pinctrl-merrifield.o drivers/pinctrl/intel/pinctrl-merrifield.c: In function ‘mrfld_pin_dbg_show’: drivers/pinctrl/intel/pinctrl-merrifield.c:518:10: error: implicit declaration of function ‘readl’ [-Werror=implicit-function-declaration] value = readl(bufcfg); ^ drivers/pinctrl/intel/pinctrl-merrifield.c: In function ‘mrfld_update_bufcfg’: drivers/pinctrl/intel/pinctrl-merrifield.c:575:2: error: implicit declaration of function ‘writel’ [-Werror=implicit-function-declaration] writel(value, bufcfg); ^ cc1: some warnings being treated as errors Add header to the top of the module. Fixes: 4e80c8f50574 ("pinctrl: intel: Add Intel Merrifield pin controller support") Signed-off-by: Andy Shevchenko Signed-off-by: Linus Walleij diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c index eb4990f..7fb7656 100644 --- a/drivers/pinctrl/intel/pinctrl-merrifield.c +++ b/drivers/pinctrl/intel/pinctrl-merrifield.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include -- cgit v0.10.2 From 3e035305875cfa8a58c1ca573d0cfa6a7f201f27 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Wed, 3 Aug 2016 19:14:29 +0200 Subject: x86/entry: Clarify the RF saving/restoring situation with SYSCALL/SYSRET Clarify why exactly RF cannot be restored properly by SYSRET to avoid confusion. No functionality change. Signed-off-by: Borislav Petkov Acked-by: Andy Lutomirski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20160803171429.GA2590@nazgul.tnic Signed-off-by: Ingo Molnar diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 9f85827..d172c61 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -288,11 +288,15 @@ return_from_SYSCALL_64: jne opportunistic_sysret_failed /* - * SYSRET can't restore RF. SYSRET can restore TF, but unlike IRET, - * restoring TF results in a trap from userspace immediately after - * SYSRET. This would cause an infinite loop whenever #DB happens - * with register state that satisfies the opportunistic SYSRET - * conditions. For example, single-stepping this user code: + * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot + * restore RF properly. If the slowpath sets it for whatever reason, we + * need to restore it correctly. + * + * SYSRET can restore TF, but unlike IRET, restoring TF results in a + * trap from userspace immediately after SYSRET. This would cause an + * infinite loop whenever #DB happens with register state that satisfies + * the opportunistic SYSRET conditions. For example, single-stepping + * this user code: * * movq $stuck_here, %rcx * pushfq -- cgit v0.10.2 From 054f621fd5b1c7245710f5d3935c94ce6ae4b3b7 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Mon, 1 Aug 2016 13:40:50 -0500 Subject: x86/platform/UV: Fix problem with UV4 Socket IDs not being contiguous The UV4 Socket IDs are not guaranteed to equate to Node values which can cause the GAM (Global Addressable Memory) table lookups to fail. Fix this by using an independent index into the GAM table instead of the Socket ID to reference the base address. Tested-by: Frank Ramsay Tested-by: John Estabrook Signed-off-by: Mike Travis Reviewed-by: Dimitri Sivanich Reviewed-by: Nathan Zimmer Cc: Alex Thorlton Cc: Andrew Banman Cc: Andrew Morton Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Russ Anderson Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20160801184050.048755337@asylum.americas.sgi.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 09b59ad..d918733 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -325,7 +325,7 @@ static __init void build_uv_gr_table(void) struct uv_gam_range_entry *gre = uv_gre_table; struct uv_gam_range_s *grt; unsigned long last_limit = 0, ram_limit = 0; - int bytes, i, sid, lsid = -1; + int bytes, i, sid, lsid = -1, indx = 0, lindx = -1; if (!gre) return; @@ -356,11 +356,12 @@ static __init void build_uv_gr_table(void) } sid = gre->sockid - _min_socket; if (lsid < sid) { /* new range */ - grt = &_gr_table[sid]; - grt->base = lsid; + grt = &_gr_table[indx]; + grt->base = lindx; grt->nasid = gre->nasid; grt->limit = last_limit = gre->limit; lsid = sid; + lindx = indx++; continue; } if (lsid == sid && !ram_limit) { /* update range */ @@ -371,7 +372,7 @@ static __init void build_uv_gr_table(void) } if (!ram_limit) { /* non-contiguous ram range */ grt++; - grt->base = sid - 1; + grt->base = lindx; grt->nasid = gre->nasid; grt->limit = last_limit = gre->limit; continue; -- cgit v0.10.2 From e363d24c2b997c421476c6aa00547edadf678efe Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Mon, 1 Aug 2016 13:40:51 -0500 Subject: x86/platform/UV: Fix bug with iounmap() of the UV4 EFI System Table causing a crash Save the uv_systab::size field before doing the iounmap() of the struct pointer, to avoid a NULL dereference crash. Tested-by: Frank Ramsay Tested-by: John Estabrook Signed-off-by: Mike Travis Reviewed-by: Dimitri Sivanich Reviewed-by: Nathan Zimmer Cc: Alex Thorlton Cc: Andrew Banman Cc: Andrew Morton Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Russ Anderson Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20160801184050.250424783@asylum.americas.sgi.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c index 66b2166..4e9fd13 100644 --- a/arch/x86/platform/uv/bios_uv.c +++ b/arch/x86/platform/uv/bios_uv.c @@ -199,12 +199,14 @@ void uv_bios_init(void) return; } + /* Starting with UV4 the UV systab size is variable */ if (uv_systab->revision >= UV_SYSTAB_VERSION_UV4) { + int size = uv_systab->size; + iounmap(uv_systab); - uv_systab = ioremap(efi.uv_systab, uv_systab->size); + uv_systab = ioremap(efi.uv_systab, size); if (!uv_systab) { - pr_err("UV: UVsystab: ioremap(%d) failed!\n", - uv_systab->size); + pr_err("UV: UVsystab: ioremap(%d) failed!\n", size); return; } } -- cgit v0.10.2 From 22ac2bca92f2d92c6495248d65ff648182df428d Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Mon, 1 Aug 2016 13:40:52 -0500 Subject: x86/platform/UV: Fix problem with UV4 BIOS providing incorrect PXM values There are some circumstances where the UV4 BIOS cannot provide the correct Proximity Node values to associate with specific Sockets and Physical Nodes. The decision was made to remove these values from BIOS and for the kernel to get these values from the standard ACPI tables. Tested-by: Frank Ramsay Tested-by: John Estabrook Signed-off-by: Mike Travis Reviewed-by: Dimitri Sivanich Reviewed-by: Nathan Zimmer Cc: Alex Thorlton Cc: Andrew Banman Cc: Andrew Morton Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Russ Anderson Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20160801184050.414210079@asylum.americas.sgi.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h index c852590..e652a7c 100644 --- a/arch/x86/include/asm/uv/bios.h +++ b/arch/x86/include/asm/uv/bios.h @@ -79,7 +79,7 @@ struct uv_gam_range_entry { u16 nasid; /* HNasid */ u16 sockid; /* Socket ID, high bits of APIC ID */ u16 pnode; /* Index to MMR and GRU spaces */ - u32 pxm; /* ACPI proximity domain number */ + u32 unused2; u32 limit; /* PA bits 56:26 (UV_GAM_RANGE_SHFT) */ }; @@ -88,7 +88,8 @@ struct uv_gam_range_entry { #define UV_SYSTAB_VERSION_UV4 0x400 /* UV4 BIOS base version */ #define UV_SYSTAB_VERSION_UV4_1 0x401 /* + gpa_shift */ #define UV_SYSTAB_VERSION_UV4_2 0x402 /* + TYPE_NVRAM/WINDOW/MBOX */ -#define UV_SYSTAB_VERSION_UV4_LATEST UV_SYSTAB_VERSION_UV4_2 +#define UV_SYSTAB_VERSION_UV4_3 0x403 /* - GAM Range PXM Value */ +#define UV_SYSTAB_VERSION_UV4_LATEST UV_SYSTAB_VERSION_UV4_3 #define UV_SYSTAB_TYPE_UNUSED 0 /* End of table (offset == 0) */ #define UV_SYSTAB_TYPE_GAM_PARAMS 1 /* GAM PARAM conversions */ diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index d918733..6aa0545 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -1156,19 +1156,18 @@ static void __init decode_gam_rng_tbl(unsigned long ptr) for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) { if (!index) { pr_info("UV: GAM Range Table...\n"); - pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s %3s\n", + pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", - "SID", "PN", "PXM"); + "SID", "PN"); } pr_info( - "UV: %2d: 0x%014lx-0x%014lx %5luG %3d %04x %02x %02x %3d\n", + "UV: %2d: 0x%014lx-0x%014lx %5luG %3d %04x %02x %02x\n", index++, (unsigned long)lgre << UV_GAM_RANGE_SHFT, (unsigned long)gre->limit << UV_GAM_RANGE_SHFT, ((unsigned long)(gre->limit - lgre)) >> (30 - UV_GAM_RANGE_SHFT), /* 64M -> 1G */ - gre->type, gre->nasid, gre->sockid, - gre->pnode, gre->pxm); + gre->type, gre->nasid, gre->sockid, gre->pnode); lgre = gre->limit; if (sock_min > gre->sockid) @@ -1287,7 +1286,7 @@ static void __init build_socket_tables(void) _pnode_to_socket[i] = SOCK_EMPTY; /* fill in pnode/node/addr conversion list values */ - pr_info("UV: GAM Building socket/pnode/pxm conversion tables\n"); + pr_info("UV: GAM Building socket/pnode conversion tables\n"); for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) { if (gre->type == UV_GAM_RANGE_TYPE_HOLE) continue; @@ -1295,20 +1294,18 @@ static void __init build_socket_tables(void) if (_socket_to_pnode[i] != SOCK_EMPTY) continue; /* duplicate */ _socket_to_pnode[i] = gre->pnode; - _socket_to_node[i] = gre->pxm; i = gre->pnode - minpnode; _pnode_to_socket[i] = gre->sockid; pr_info( - "UV: sid:%02x type:%d nasid:%04x pn:%02x pxm:%2d pn2s:%2x\n", + "UV: sid:%02x type:%d nasid:%04x pn:%02x pn2s:%2x\n", gre->sockid, gre->type, gre->nasid, _socket_to_pnode[gre->sockid - minsock], - _socket_to_node[gre->sockid - minsock], _pnode_to_socket[gre->pnode - minpnode]); } - /* check socket -> node values */ + /* Set socket -> node values */ lnid = -1; for_each_present_cpu(cpu) { int nid = cpu_to_node(cpu); @@ -1319,14 +1316,9 @@ static void __init build_socket_tables(void) lnid = nid; apicid = per_cpu(x86_cpu_to_apicid, cpu); sockid = apicid >> uv_cpuid.socketid_shift; - i = sockid - minsock; - - if (nid != _socket_to_node[i]) { - pr_warn( - "UV: %02x: type:%d socket:%02x PXM:%02x != node:%2d\n", - i, sockid, gre->type, _socket_to_node[i], nid); - _socket_to_node[i] = nid; - } + _socket_to_node[sockid - minsock] = nid; + pr_info("UV: sid:%02x: apicid:%04x node:%2d\n", + sockid, apicid, nid); } /* Setup physical blade to pnode translation from GAM Range Table */ -- cgit v0.10.2 From 5a52e8f822374bebc702bb2688ed8b5515bbb55b Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Mon, 1 Aug 2016 13:40:53 -0500 Subject: x86/platform/UV: Fix kernel panic running RHEL kdump kernel on UV systems The latest UV kernel support panics when RHEL7 kexec's the kdump kernel to make a dumpfile. This patch fixes the problem by turning off all UV support if NUMA is off. Tested-by: Frank Ramsay Tested-by: John Estabrook Signed-off-by: Mike Travis Reviewed-by: Dimitri Sivanich Reviewed-by: Nathan Zimmer Cc: Alex Thorlton Cc: Andrew Banman Cc: Andrew Morton Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Russ Anderson Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20160801184050.577755634@asylum.americas.sgi.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 6aa0545..cb0673c 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -223,6 +223,11 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) if (strncmp(oem_id, "SGI", 3) != 0) return 0; + if (numa_off) { + pr_err("UV: NUMA is off, disabling UV support\n"); + return 0; + } + /* Setup early hub type field in uv_hub_info for Node 0 */ uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0; -- cgit v0.10.2 From 5e44258d168b2bdee51d9e2e1f1f4726ff9775cd Mon Sep 17 00:00:00 2001 From: Valdis Kletnieks Date: Sun, 31 Jul 2016 23:24:50 -0400 Subject: x86/build: Reduce the W=1 warnings noise when compiling x86 syscall tables Building an X86_64 kernel with W=1 throws a total of 9,948 lines of warnings of this form for both 32-bit and 64-bit syscall tables. Given that the entire rest of the build for my config only generates 8,375 lines of output, this is a big reduction in the warnings generated. The warnings follow this pattern: ./arch/x86/include/generated/asm/syscalls_32.h:885:21: warning: initialized field overwritten [-Woverride-init] __SYSCALL_I386(379, compat_sys_pwritev2, ) ^ arch/x86/entry/syscall_32.c:13:46: note: in definition of macro '__SYSCALL_I386' #define __SYSCALL_I386(nr, sym, qual) [nr] = sym, ^~~ ./arch/x86/include/generated/asm/syscalls_32.h:885:21: note: (near initialization for 'ia32_sys_call_table[379]') __SYSCALL_I386(379, compat_sys_pwritev2, ) ^ arch/x86/entry/syscall_32.c:13:46: note: in definition of macro '__SYSCALL_I386' #define __SYSCALL_I386(nr, sym, qual) [nr] = sym, Since we intentionally build the syscall tables this way, ignore that one warning in the two files. Signed-off-by: Valdis Kletnieks Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/7464.1470021890@turing-police.cc.vt.edu Signed-off-by: Ingo Molnar diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile index fe91c25..77f28ce 100644 --- a/arch/x86/entry/Makefile +++ b/arch/x86/entry/Makefile @@ -5,6 +5,8 @@ OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y +CFLAGS_syscall_64.o += -Wno-override-init +CFLAGS_syscall_32.o += -Wno-override-init obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o obj-y += common.o -- cgit v0.10.2 From b79daf85899215d5ede3641806db2e2a77b776b4 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Wed, 27 Jul 2016 16:20:40 -0700 Subject: x86/mm/pkeys: Fix compact mode by removing protection keys' XSAVE buffer manipulation The Memory Protection Keys "rights register" (PKRU) is XSAVE-managed, and is saved/restored along with the FPU state. When kernel code accesses FPU regsisters, it does a delicate dance with preempt. Otherwise, the context switching code can get confused as to whether the most up-to-date state is in the registers themselves or in the XSAVE buffer. But, PKRU is not a normal FPU register. Using it does not generate the normal device-not-available (#NM) exceptions which means we can not manage it lazily, and the kernel completley disallows using lazy mode when it is enabled. The dance with preempt *only* occurs when managing the FPU lazily. Since we never manage PKRU lazily, we do not have to do the dance with preempt; we can access it directly. Doing it this way saves a ton of complicated code (and is faster too). Further, the XSAVES reenabling failed to patch a bit of code in fpu__xfeature_set_state() the checked for compacted buffers. That check caused fpu__xfeature_set_state() to silently refuse to work when the kernel is using compacted XSAVE buffers. This broke execute-only and future pkey_mprotect() support when using compact XSAVE buffers. But, removing fpu__xfeature_set_state() gets rid of this issue, in addition to the nice cleanup and speedup. This fixes the same thing as a fix that Sai posted: https://lkml.org/lkml/2016/7/25/637 The fix that he posted is a much more obviously correct, but I think we should just do this instead. Reported-by: Sai Praneeth Prakhya Signed-off-by: Dave Hansen Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: Dave Hansen Cc: Fenghua Yu Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Quentin Casasnovas Cc: Ravi Shankar Cc: Thomas Gleixner Cc: Yu-Cheng Yu Link: http://lkml.kernel.org/r/20160727232040.7D060DAD@viggo.jf.intel.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 680049a..01567aa 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -866,105 +866,17 @@ const void *get_xsave_field_ptr(int xsave_state) return get_xsave_addr(&fpu->state.xsave, xsave_state); } - -/* - * Set xfeatures (aka XSTATE_BV) bit for a feature that we want - * to take out of its "init state". This will ensure that an - * XRSTOR actually restores the state. - */ -static void fpu__xfeature_set_non_init(struct xregs_state *xsave, - int xstate_feature_mask) -{ - xsave->header.xfeatures |= xstate_feature_mask; -} - -/* - * This function is safe to call whether the FPU is in use or not. - * - * Note that this only works on the current task. - * - * Inputs: - * @xsave_state: state which is defined in xsave.h (e.g. XFEATURE_MASK_FP, - * XFEATURE_MASK_SSE, etc...) - * @xsave_state_ptr: a pointer to a copy of the state that you would - * like written in to the current task's FPU xsave state. This pointer - * must not be located in the current tasks's xsave area. - * Output: - * address of the state in the xsave area or NULL if the state - * is not present or is in its 'init state'. - */ -static void fpu__xfeature_set_state(int xstate_feature_mask, - void *xstate_feature_src, size_t len) -{ - struct xregs_state *xsave = ¤t->thread.fpu.state.xsave; - struct fpu *fpu = ¤t->thread.fpu; - void *dst; - - if (!boot_cpu_has(X86_FEATURE_XSAVE)) { - WARN_ONCE(1, "%s() attempted with no xsave support", __func__); - return; - } - - /* - * Tell the FPU code that we need the FPU state to be in - * 'fpu' (not in the registers), and that we need it to - * be stable while we write to it. - */ - fpu__current_fpstate_write_begin(); - - /* - * This method *WILL* *NOT* work for compact-format - * buffers. If the 'xstate_feature_mask' is unset in - * xcomp_bv then we may need to move other feature state - * "up" in the buffer. - */ - if (xsave->header.xcomp_bv & xstate_feature_mask) { - WARN_ON_ONCE(1); - goto out; - } - - /* find the location in the xsave buffer of the desired state */ - dst = __raw_xsave_addr(&fpu->state.xsave, xstate_feature_mask); - - /* - * Make sure that the pointer being passed in did not - * come from the xsave buffer itself. - */ - WARN_ONCE(xstate_feature_src == dst, "set from xsave buffer itself"); - - /* put the caller-provided data in the location */ - memcpy(dst, xstate_feature_src, len); - - /* - * Mark the xfeature so that the CPU knows there is state - * in the buffer now. - */ - fpu__xfeature_set_non_init(xsave, xstate_feature_mask); -out: - /* - * We are done writing to the 'fpu'. Reenable preeption - * and (possibly) move the fpstate back in to the fpregs. - */ - fpu__current_fpstate_write_end(); -} - #define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2) #define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1) /* - * This will go out and modify the XSAVE buffer so that PKRU is - * set to a particular state for access to 'pkey'. - * - * PKRU state does affect kernel access to user memory. We do - * not modfiy PKRU *itself* here, only the XSAVE state that will - * be restored in to PKRU when we return back to userspace. + * This will go out and modify PKRU register to set the access + * rights for @pkey to @init_val. */ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, unsigned long init_val) { - struct xregs_state *xsave = &tsk->thread.fpu.state.xsave; - struct pkru_state *old_pkru_state; - struct pkru_state new_pkru_state; + u32 old_pkru; int pkey_shift = (pkey * PKRU_BITS_PER_PKEY); u32 new_pkru_bits = 0; @@ -974,6 +886,15 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, */ if (!boot_cpu_has(X86_FEATURE_OSPKE)) return -EINVAL; + /* + * For most XSAVE components, this would be an arduous task: + * brining fpstate up to date with fpregs, updating fpstate, + * then re-populating fpregs. But, for components that are + * never lazily managed, we can just access the fpregs + * directly. PKRU is never managed lazily, so we can just + * manipulate it directly. Make sure it stays that way. + */ + WARN_ON_ONCE(!use_eager_fpu()); /* Set the bits we need in PKRU: */ if (init_val & PKEY_DISABLE_ACCESS) @@ -984,37 +905,12 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, /* Shift the bits in to the correct place in PKRU for pkey: */ new_pkru_bits <<= pkey_shift; - /* Locate old copy of the state in the xsave buffer: */ - old_pkru_state = get_xsave_addr(xsave, XFEATURE_MASK_PKRU); - - /* - * When state is not in the buffer, it is in the init - * state, set it manually. Otherwise, copy out the old - * state. - */ - if (!old_pkru_state) - new_pkru_state.pkru = 0; - else - new_pkru_state.pkru = old_pkru_state->pkru; - - /* Mask off any old bits in place: */ - new_pkru_state.pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift); - - /* Set the newly-requested bits: */ - new_pkru_state.pkru |= new_pkru_bits; - - /* - * We could theoretically live without zeroing pkru.pad. - * The current XSAVE feature state definition says that - * only bytes 0->3 are used. But we do not want to - * chance leaking kernel stack out to userspace in case a - * memcpy() of the whole xsave buffer was done. - * - * They're in the same cacheline anyway. - */ - new_pkru_state.pad = 0; + /* Get old PKRU and mask off any old bits in place: */ + old_pkru = read_pkru(); + old_pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift); - fpu__xfeature_set_state(XFEATURE_MASK_PKRU, &new_pkru_state, sizeof(new_pkru_state)); + /* Write old part along with new part: */ + write_pkru(old_pkru | new_pkru_bits); return 0; } -- cgit v0.10.2 From b06bc7ec4da26dccda040a65896ea22ee6638a7a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 13 Jul 2016 09:10:31 +0100 Subject: drm/i915: Flush GT idle status upon reset Upon resetting the GPU, we force the engines to be idle by clearing their request lists. However, I neglected to clear the GT active status and so the next request following the reset was not marking the device as busy again. (We had to wait until any outstanding retire worker finally ran and cleared the active status.) Fixes: 67d97da34917 ("drm/i915: Only start retire worker when idle") Testcase: igt/pm_rps/reset Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/1468397438-21226-1-git-send-email-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen (cherry picked from commit b913b33c43db849778f044d4b9e74b167898a9bc) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1168150..27ef10e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3169,6 +3169,8 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine) } intel_ring_init_seqno(engine, engine->last_submitted_seqno); + + engine->i915->gt.active_engines &= ~intel_engine_flag(engine); } void i915_gem_reset(struct drm_device *dev) @@ -3186,6 +3188,7 @@ void i915_gem_reset(struct drm_device *dev) for_each_engine(engine, dev_priv) i915_gem_reset_engine_cleanup(engine); + mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0); i915_gem_context_reset(dev); -- cgit v0.10.2 From c2a4c5b75a717db8bbe1c2fad6473063a7d2f930 Mon Sep 17 00:00:00 2001 From: Jay Cornwall Date: Wed, 3 Aug 2016 13:39:42 -0500 Subject: drm/amdgpu: Fix memory trashing if UVD ring test fails MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fence_put was called on an uninitialized variable. Reviewed-by: Christian König Signed-off-by: Jay Cornwall Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index b11f4e8..4aa993d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1187,7 +1187,8 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = 0; } -error: fence_put(fence); + +error: return r; } -- cgit v0.10.2 From aec742e8e1cf05114530bdef8051238b6dbb4052 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 10 Aug 2016 08:45:11 -0700 Subject: get_maintainer: Don't check if STDIN exists in a VCS repository If get_maintainer is not given any filename arguments on the command line, the standard input is read for a patch. But checking if a VCS has a file named &STDIN is not a good idea and fails. Verify the nominal input file is not &STDIN before checking the VCS. Fixes: 4cad35a7ca69 ("get_maintainer.pl: reduce need for command-line option -f") Reported-by: Christopher Covington Signed-off-by: Joe Perches Signed-off-by: Linus Torvalds diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl index 122fcda..49a00d5 100755 --- a/scripts/get_maintainer.pl +++ b/scripts/get_maintainer.pl @@ -432,7 +432,7 @@ foreach my $file (@ARGV) { die "$P: file '${file}' not found\n"; } } - if ($from_filename || vcs_file_exists($file)) { + if ($from_filename || ($file ne "&STDIN" && vcs_file_exists($file))) { $file =~ s/^\Q${cur_path}\E//; #strip any absolute path $file =~ s/^\Q${lk_path}\E//; #or the path to the lk tree push(@files, $file); -- cgit v0.10.2 From 7de249964f5578e67b99699c5f0b405738d820a2 Mon Sep 17 00:00:00 2001 From: Dave Weinstein Date: Thu, 28 Jul 2016 11:55:41 -0700 Subject: arm: oabi compat: add missing access checks Add access checks to sys_oabi_epoll_wait() and sys_oabi_semtimedop(). This fixes CVE-2016-3857, a local privilege escalation under CONFIG_OABI_COMPAT. Cc: stable@vger.kernel.org Reported-by: Chiachih Wu Reviewed-by: Kees Cook Reviewed-by: Nicolas Pitre Signed-off-by: Dave Weinstein Signed-off-by: Linus Torvalds diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index 087acb5..5f221ac 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -279,8 +279,12 @@ asmlinkage long sys_oabi_epoll_wait(int epfd, mm_segment_t fs; long ret, err, i; - if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event))) + if (maxevents <= 0 || + maxevents > (INT_MAX/sizeof(*kbuf)) || + maxevents > (INT_MAX/sizeof(*events))) return -EINVAL; + if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents)) + return -EFAULT; kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL); if (!kbuf) return -ENOMEM; @@ -317,6 +321,8 @@ asmlinkage long sys_oabi_semtimedop(int semid, if (nsops < 1 || nsops > SEMOPM) return -EINVAL; + if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops)) + return -EFAULT; sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL); if (!sops) return -ENOMEM; -- cgit v0.10.2 From 549fba3a615a5dc6219bb41b3ee3951173fcf1e6 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 8 Jun 2016 16:21:19 +0200 Subject: ARM: don't include removed directories Three platforms used to have header files in include/mach that are now all gone, but the removed directories are still being included, which leads to -Wmissing-include-dirs warnings. This removes the extra -I flags. Signed-off-by: Arnd Bergmann diff --git a/arch/arm/mach-mvebu/Makefile b/arch/arm/mach-mvebu/Makefile index e53c6cf..6c6497e 100644 --- a/arch/arm/mach-mvebu/Makefile +++ b/arch/arm/mach-mvebu/Makefile @@ -1,5 +1,4 @@ -ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \ - -I$(srctree)/arch/arm/plat-orion/include +ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-orion/include AFLAGS_coherency_ll.o := -Wa,-march=armv7-a CFLAGS_pmsu.o := -march=armv7-a diff --git a/arch/arm/mach-realview/Makefile b/arch/arm/mach-realview/Makefile index dae8d86..4048821 100644 --- a/arch/arm/mach-realview/Makefile +++ b/arch/arm/mach-realview/Makefile @@ -1,8 +1,7 @@ # # Makefile for the linux kernel. # -ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \ - -I$(srctree)/arch/arm/plat-versatile/include +ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-versatile/include obj-y := core.o obj-$(CONFIG_REALVIEW_DT) += realview-dt.o diff --git a/arch/arm/mach-s5pv210/Makefile b/arch/arm/mach-s5pv210/Makefile index 72b9e96..fa7fb71 100644 --- a/arch/arm/mach-s5pv210/Makefile +++ b/arch/arm/mach-s5pv210/Makefile @@ -5,7 +5,7 @@ # # Licensed under GPLv2 -ccflags-$(CONFIG_ARCH_MULTIPLATFORM) += -I$(srctree)/$(src)/include -I$(srctree)/arch/arm/plat-samsung/include +ccflags-$(CONFIG_ARCH_MULTIPLATFORM) += -I$(srctree)/arch/arm/plat-samsung/include # Core -- cgit v0.10.2 From de8a06f674a6f9d781e5087e9217319c7b831821 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 9 Jun 2016 09:50:28 +0200 Subject: ARM: hide mach-*/ include for ARM_SINGLE_ARMV7M The machine specific header files are exported for traditional platforms, but not for the ones that use ARCH_MULTIPLATFORM, as they could conflict with one another. In case of ARM_SINGLE_ARMV7M, we end up also exporting them, but that appears to be a mistake, and we should treat it the same way as ARCH_MULTIPLATFORM here. 'make W=1' warns about this because it passes -Wmissing-includes to gcc and the directories are not actually present. Signed-off-by: Arnd Bergmann diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 56ea5c60b..61f6ccc 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -260,12 +260,14 @@ machdirs := $(patsubst %,arch/arm/mach-%/,$(machine-y)) platdirs := $(patsubst %,arch/arm/plat-%/,$(sort $(plat-y))) ifneq ($(CONFIG_ARCH_MULTIPLATFORM),y) +ifneq ($(CONFIG_ARM_SINGLE_ARMV7M),y) ifeq ($(KBUILD_SRC),) KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(machdirs) $(platdirs)) else KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(machdirs) $(platdirs)) endif endif +endif export TEXT_OFFSET GZFLAGS MMUEXT -- cgit v0.10.2 From af9d238c6a1d5c131104bb011cc4c55a6207b9a2 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 6 Jul 2016 14:16:24 +0200 Subject: ARM: oxnas: select reset controller framework For unknown reasons, we have to enable three symbols for a platform to use a reset controller driver, otherwise we get a Kconfig warning: warning: (MACH_OX810SE) selects RESET_OXNAS which has unmet direct dependencies (RESET_CONTROLLER) This selects the other two symbols for oxnas. Signed-off-by: Arnd Bergmann Acked-by: Neil Armstrong diff --git a/arch/arm/mach-oxnas/Kconfig b/arch/arm/mach-oxnas/Kconfig index 567496b..29100be 100644 --- a/arch/arm/mach-oxnas/Kconfig +++ b/arch/arm/mach-oxnas/Kconfig @@ -11,11 +11,13 @@ if ARCH_OXNAS config MACH_OX810SE bool "Support OX810SE Based Products" + select ARCH_HAS_RESET_CONTROLLER select COMMON_CLK_OXNAS select CPU_ARM926T select MFD_SYSCON select OXNAS_RPS_TIMER select PINCTRL_OXNAS + select RESET_CONTROLLER select RESET_OXNAS select VERSATILE_FPGA_IRQ help -- cgit v0.10.2 From 83e484fcbe44e91fe8efb41b33ed3434806eeaac Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 10 Aug 2016 11:38:24 +0200 Subject: ARM: dts: add syscon compatible string for CP syscon This syscon needs to be looked up by flash protection, CLCD display output settings and other consumers. Signed-off-by: Linus Walleij Signed-off-by: Arnd Bergmann diff --git a/arch/arm/boot/dts/integratorcp.dts b/arch/arm/boot/dts/integratorcp.dts index d43f15b..79430fb 100644 --- a/arch/arm/boot/dts/integratorcp.dts +++ b/arch/arm/boot/dts/integratorcp.dts @@ -94,7 +94,7 @@ }; syscon { - compatible = "arm,integrator-cp-syscon"; + compatible = "arm,integrator-cp-syscon", "syscon"; reg = <0xcb000000 0x100>; }; -- cgit v0.10.2 From f2b54191f7bef2925db39eceb84e7e19ff6d8f56 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 10 Aug 2016 11:38:12 +0200 Subject: ARM: dts: add syscon compatible string for AP syscon This syscon needs to be looked up by clocks, flash protection and other consumers. Signed-off-by: Linus Walleij Signed-off-by: Arnd Bergmann diff --git a/arch/arm/boot/dts/integratorap.dts b/arch/arm/boot/dts/integratorap.dts index cf06e32..4b34b54 100644 --- a/arch/arm/boot/dts/integratorap.dts +++ b/arch/arm/boot/dts/integratorap.dts @@ -42,7 +42,7 @@ }; syscon { - compatible = "arm,integrator-ap-syscon"; + compatible = "arm,integrator-ap-syscon", "syscon"; reg = <0x11000000 0x100>; interrupt-parent = <&pic>; /* These are the logical module IRQs */ -- cgit v0.10.2 From b5c86b7496d74f6e454bcab5166efa023e1f0459 Mon Sep 17 00:00:00 2001 From: Ralf Ramsauer Date: Mon, 18 Jul 2016 11:46:48 +0200 Subject: ARM: tegra: fix erroneous address in dts c90bb7b enabled the high speed UARTs of the Jetson TK1. Due to a merge quirk, wrong addresses were introduced. Fix it and use the correct addresses. Thierry let me know, that there is another patch (b5896f67ab3c in linux-next) in preparation which removes all the '0,' prefixes of unit addresses on Tegra124 and is planned to go upstream in 4.8, so this patch will get reverted then. But for the moment, this patch is necessary to fix current misbehaviour. Fixes: c90bb7b9b9 ("ARM: tegra: Add high speed UARTs to Jetson TK1 device tree") Signed-off-by: Ralf Ramsauer Acked-by: Thierry Reding Cc: stable@vger.kernel.org # v4.7 Cc: linux-tegra@vger.kernel.org Signed-off-by: Arnd Bergmann diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts index e52b824..6403e0d 100644 --- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts +++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts @@ -1382,7 +1382,7 @@ * Pin 41: BR_UART1_TXD * Pin 44: BR_UART1_RXD */ - serial@70006000 { + serial@0,70006000 { compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart"; status = "okay"; }; @@ -1394,7 +1394,7 @@ * Pin 71: UART2_CTS_L * Pin 74: UART2_RTS_L */ - serial@70006040 { + serial@0,70006040 { compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart"; status = "okay"; }; -- cgit v0.10.2 From a20303725ec31ea0fcf498f1885b1d4245a4ee56 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Wed, 10 Aug 2016 14:02:17 +0200 Subject: ARM: dts: realview: Fix PBX-A9 cache description Clearly QEMU is very permissive in how its PL310 model may be set up, but the real hardware turns out to be far more particular about things actually being correct. Fix up the DT description so that the real thing actually boots: - The arm,data-latency and arm,tag-latency properties need 3 cells to be valid, otherwise we end up retaining the default 8-cycle latencies which leads pretty quickly to lockup. - The arm,dirty-latency property is only relevant to L210/L220, so get rid of it. - The cache geometry override also leads to lockup and/or general misbehaviour. Irritatingly, the manual doesn't state the actual PL310 configuration, but based on the boardfile code and poking registers from the Boot Monitor, it would seem to be 8 sets of 16KB ways. With that, we can successfully boot to enjoy the fun of mismatched FPUs... Cc: stable@vger.kernel.org Signed-off-by: Robin Murphy Tested-by: Mark Rutland Signed-off-by: Linus Walleij Signed-off-by: Arnd Bergmann diff --git a/arch/arm/boot/dts/arm-realview-pbx-a9.dts b/arch/arm/boot/dts/arm-realview-pbx-a9.dts index db808f9..90d00b4 100644 --- a/arch/arm/boot/dts/arm-realview-pbx-a9.dts +++ b/arch/arm/boot/dts/arm-realview-pbx-a9.dts @@ -70,13 +70,12 @@ * associativity as these may be erroneously set * up by boot loader(s). */ - cache-size = <1048576>; // 1MB - cache-sets = <4096>; + cache-size = <131072>; // 128KB + cache-sets = <512>; cache-line-size = <32>; arm,parity-disable; - arm,tag-latency = <1>; - arm,data-latency = <1 1>; - arm,dirty-latency = <1>; + arm,tag-latency = <1 1 1>; + arm,data-latency = <1 1 1>; }; scu: scu@1f000000 { -- cgit v0.10.2 From 5a3f75a4b2dab54b6b0d645b500583abfc27b260 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Wed, 3 Aug 2016 15:29:33 +0100 Subject: arm64: Kconfig: select ALPINE_MSI only if PCI is selected Even when PCI is disabled, ARCH_ALPINE selects ALPINE_MSI triggerring the following config warning: warning: (ARCH_ALPINE) selects ALPINE_MSI which has unmet direct dependencies (PCI) This patch makes selection of ALPINE_MSI conditional on PCI. Cc: Arnd Bergmann Acked-by: Antoine Tenart Signed-off-by: Sudeep Holla Signed-off-by: Arnd Bergmann diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index bb2616b..fd3ee3a 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -8,7 +8,7 @@ config ARCH_SUNXI config ARCH_ALPINE bool "Annapurna Labs Alpine platform" - select ALPINE_MSI + select ALPINE_MSI if PCI help This enables support for the Annapurna Labs Alpine Soc family. -- cgit v0.10.2 From f9db43bc296833451cbe4b03eb5e1f633ad1f787 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Wed, 3 Aug 2016 15:29:34 +0100 Subject: arm64: Kconfig: select HISILICON_IRQ_MBIGEN only if PCI is selected Even when PCI is disabled, ARCH_HISI selects HISILICON_IRQ_MBIGEN triggerring the following config warning: warning: (ARM64 && HISILICON_IRQ_MBIGEN) selects ARM_GIC_V3_ITS which has unmet direct dependencies (PCI && PCI_MSI) This patch makes selection of HISILICON_IRQ_MBIGEN conditional on PCI. Cc: Ma Jun Cc: Arnd Bergmann Signed-off-by: Sudeep Holla Signed-off-by: Arnd Bergmann diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index fd3ee3a..be5d824 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -66,7 +66,7 @@ config ARCH_LG1K config ARCH_HISI bool "Hisilicon SoC Family" select ARM_TIMER_SP804 - select HISILICON_IRQ_MBIGEN + select HISILICON_IRQ_MBIGEN if PCI help This enables support for Hisilicon ARMv8 SoC family -- cgit v0.10.2 From fae82e59d2ff7d789878c9eb3a007ae7eb6641b2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 16 Jul 2016 18:42:36 +0100 Subject: drm/i915: Handle ENOSPC after failing to insert a mappable node Even after adding individual page support for GTT mmaping, we can still fail to find any space within the mappable region, and drm_mm_insert_node() will then report ENOSPC. We have to then handle this error by using the shmem access to the pages. Fixes: b50a53715f09 ("drm/i915: Support for pread/pwrite ... objects") Testcase: igt/gem_concurrent_blit Signed-off-by: Chris Wilson Cc: Ankitprasad Sharma Cc: Tvrtko Ursulin (cherry picked from commit d1054ee492a89b134fb0ac527b0714c277ae9c0f) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 27ef10e..aceaad0 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1306,7 +1306,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * textures). Fallback to the shmem path in that case. */ } - if (ret == -EFAULT) { + if (ret == -EFAULT || ret == -ENOSPC) { if (obj->phys_handle) ret = i915_gem_phys_pwrite(obj, args, file); else if (i915_gem_object_has_struct_page(obj)) -- cgit v0.10.2 From 5728e0de741a3581e9900c5cbee3a51425daf211 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Tue, 12 Jul 2016 15:59:28 +0300 Subject: drm/i915: Fix iboost setting for DDI with 4 lanes on SKL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bspec says: "For DDIA with x4 capability (DDI_BUF_CTL DDIA Lane Capability Control = DDIA x4), the I_boost value has to be programmed in both tx_blnclegsctl_0 and tx_blnclegsctl_4." Currently we only program tx_blnclegsctl_0. Let's do the other one as well. Cc: stable@vger.kernel.org Fixes: f8896f5d58e6 ("drm/i915/skl: Buffer translation improvements") Cc: David Weinehall Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1468328376-6380-2-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: David Weinehall (cherry picked from commit a7d8dbc07c8f0faaace983b1e4c6e9495dd0aa75) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ce14fe0..5c06413 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1536,6 +1536,7 @@ enum skl_disp_power_wells { #define BALANCE_LEG_MASK(port) (7<<(8+3*(port))) /* Balance leg disable bits */ #define BALANCE_LEG_DISABLE_SHIFT 23 +#define BALANCE_LEG_DISABLE(port) (1 << (23 + (port))) /* * Fence registers diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index dd1d6fe..75354cd 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1379,14 +1379,30 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) TRANS_CLK_SEL_DISABLED); } -static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv, - u32 level, enum port port, int type) +static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv, + enum port port, uint8_t iboost) { + u32 tmp; + + tmp = I915_READ(DISPIO_CR_TX_BMU_CR0); + tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port)); + if (iboost) + tmp |= iboost << BALANCE_LEG_SHIFT(port); + else + tmp |= BALANCE_LEG_DISABLE(port); + I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp); +} + +static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level) +{ + struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); + struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); + enum port port = intel_dig_port->port; + int type = encoder->type; const struct ddi_buf_trans *ddi_translations; uint8_t iboost; uint8_t dp_iboost, hdmi_iboost; int n_entries; - u32 reg; /* VBT may override standard boost values */ dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level; @@ -1428,16 +1444,10 @@ static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv, return; } - reg = I915_READ(DISPIO_CR_TX_BMU_CR0); - reg &= ~BALANCE_LEG_MASK(port); - reg &= ~(1 << (BALANCE_LEG_DISABLE_SHIFT + port)); - - if (iboost) - reg |= iboost << BALANCE_LEG_SHIFT(port); - else - reg |= 1 << (BALANCE_LEG_DISABLE_SHIFT + port); + _skl_ddi_set_iboost(dev_priv, port, iboost); - I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg); + if (port == PORT_A && intel_dig_port->max_lanes == 4) + _skl_ddi_set_iboost(dev_priv, PORT_E, iboost); } static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv, @@ -1568,7 +1578,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp) level = translate_signal_level(signal_levels); if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) - skl_ddi_set_iboost(dev_priv, level, port, encoder->type); + skl_ddi_set_iboost(encoder, level); else if (IS_BROXTON(dev_priv)) bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type); -- cgit v0.10.2 From 7ff9a55614712adf13ce7990565be0263e620f5e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Tue, 12 Jul 2016 15:59:30 +0300 Subject: drm/i915: Program iboost settings for HDMI/DVI on SKL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently we fail to program the iboost stuff for HDMI/DVI. Let's remedy that. Cc: stable@vger.kernel.org Fixes: f8896f5d58e6 ("drm/i915/skl: Buffer translation improvements") Cc: David Weinehall Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1468328376-6380-4-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: David Weinehall (cherry picked from commit 8d8bb85eb7d859aa9bbe36e588690a1d22af7608) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 75354cd..415c061 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -388,6 +388,40 @@ skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) } } +static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) +{ + int n_hdmi_entries; + int hdmi_level; + int hdmi_default_entry; + + hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; + + if (IS_BROXTON(dev_priv)) + return hdmi_level; + + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries); + hdmi_default_entry = 8; + } else if (IS_BROADWELL(dev_priv)) { + n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); + hdmi_default_entry = 7; + } else if (IS_HASWELL(dev_priv)) { + n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); + hdmi_default_entry = 6; + } else { + WARN(1, "ddi translation table missing\n"); + n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); + hdmi_default_entry = 7; + } + + /* Choose a good default if VBT is badly populated */ + if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN || + hdmi_level >= n_hdmi_entries) + hdmi_level = hdmi_default_entry; + + return hdmi_level; +} + /* * Starting with Haswell, DDI port buffers must be programmed with correct * values in advance. The buffer values are different for FDI and DP modes, @@ -399,7 +433,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 iboost_bit = 0; - int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry, + int i, n_hdmi_entries, n_dp_entries, n_edp_entries, size; int hdmi_level; enum port port; @@ -410,7 +444,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) const struct ddi_buf_trans *ddi_translations; port = intel_ddi_get_encoder_port(encoder); - hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; + hdmi_level = intel_ddi_hdmi_level(dev_priv, port); if (IS_BROXTON(dev_priv)) { if (encoder->type != INTEL_OUTPUT_HDMI) @@ -430,7 +464,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) skl_get_buf_trans_edp(dev_priv, &n_edp_entries); ddi_translations_hdmi = skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries); - hdmi_default_entry = 8; /* If we're boosting the current, set bit 31 of trans1 */ if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level || dev_priv->vbt.ddi_port_info[port].dp_boost_level) @@ -456,7 +489,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); - hdmi_default_entry = 7; } else if (IS_HASWELL(dev_priv)) { ddi_translations_fdi = hsw_ddi_translations_fdi; ddi_translations_dp = hsw_ddi_translations_dp; @@ -464,7 +496,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) ddi_translations_hdmi = hsw_ddi_translations_hdmi; n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp); n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); - hdmi_default_entry = 6; } else { WARN(1, "ddi translation table missing\n"); ddi_translations_edp = bdw_ddi_translations_dp; @@ -474,7 +505,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); - hdmi_default_entry = 7; } switch (encoder->type) { @@ -505,11 +535,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) if (encoder->type != INTEL_OUTPUT_HDMI) return; - /* Choose a good default if VBT is badly populated */ - if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN || - hdmi_level >= n_hdmi_entries) - hdmi_level = hdmi_default_entry; - /* Entry 9 is for HDMI: */ I915_WRITE(DDI_BUF_TRANS_LO(port, i), ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit); @@ -1647,6 +1672,10 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) intel_dp_stop_link_train(intel_dp); } else if (type == INTEL_OUTPUT_HDMI) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + int level = intel_ddi_hdmi_level(dev_priv, port); + + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + skl_ddi_set_iboost(intel_encoder, level); intel_hdmi->set_infoframes(encoder, crtc->config->has_hdmi_sink, -- cgit v0.10.2 From d8b6161f7294b63180da443c9ff48c7efce182eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Tue, 2 Aug 2016 14:07:33 +0300 Subject: drm/i915: Clean up the extra RPM ref on CHV with i915.enable_rc6=0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove the CHV early bail out from intel_cleanup_gt_powersave() so that we'll clean up the extra RPM reference held due to i915.enable_rc6=0. Cc: Imre Deak Fixes: b268c699aca5 ("drm/i915: refactor RPM disabling due to RC6 being disabled") Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1470136053-23276-1-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Imre Deak (cherry picked from commit 8dac1e1f2068321fb4b7062d3c5408971f7a7e35) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index f4f3fcc..1ff5c63 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -6573,9 +6573,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) { - if (IS_CHERRYVIEW(dev_priv)) - return; - else if (IS_VALLEYVIEW(dev_priv)) + if (IS_VALLEYVIEW(dev_priv)) valleyview_cleanup_gt_powersave(dev_priv); if (!i915.enable_rc6) -- cgit v0.10.2 From 0a491b96aa59a7232f6c1a81414aa57fb8de8594 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 4 Aug 2016 08:43:53 +0100 Subject: drm/i915/fbc: FBC causes display flicker when VT-d is enabled on Skylake MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Erratum SKL075: Display Flicker May Occur When Both VT-d And FBC Are Enabled "Display flickering may occur when both FBC (Frame Buffer Compression) and VT - d (Intel® Virtualization Technology for Directed I/O) are enabled and in use by the display controller." Ville found the w/a name in the database: WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt and also dug out that it affects Broxton. v2: Log when the quirk is applied. v3: Ensure i915.enable_fbc is false when !HAS_FBC() v4: Fix function name after rebase v5: Add Broxton to the workaround Note for backporting to stable, we need to add #define mkwrite_device_info(ptr) \ ((struct intel_device_info *)INTEL_INFO(ptr)) Signed-off-by: Chris Wilson Cc: Paulo Zanoni Cc: Ville Syrjälä Reviewed-by: Ville Syrjälä Cc: stable@vger.kernel.org Link: http://patchwork.freedesktop.org/patch/msgid/1470296633-20388-1-git-send-email-chris@chris-wilson.co.uk (cherry picked from commit 36dbc4d76918d7557b686f807106dcc799174b12) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 6a7ad3e..3836a1c 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -1230,12 +1230,29 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) if (i915.enable_fbc >= 0) return !!i915.enable_fbc; + if (!HAS_FBC(dev_priv)) + return 0; + if (IS_BROADWELL(dev_priv)) return 1; return 0; } +static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) +{ +#ifdef CONFIG_INTEL_IOMMU + /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ + if (intel_iommu_gfx_mapped && + (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) { + DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n"); + return true; + } +#endif + + return false; +} + /** * intel_fbc_init - Initialize FBC * @dev_priv: the i915 device @@ -1253,6 +1270,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) fbc->active = false; fbc->work.scheduled = false; + if (need_fbc_vtd_wa(dev_priv)) + mkwrite_device_info(dev_priv)->has_fbc = false; + i915.enable_fbc = intel_sanitize_fbc_option(dev_priv); DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc); -- cgit v0.10.2 From 2ca17b87e85ffada1e3da941d5c1c9ee4f56ca9f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 4 Aug 2016 09:09:53 +0100 Subject: drm/i915: Add missing rpm wakelock to GGTT pread Joonas spotted a discrepancy between the pwrite and pread ioctls, in that pwrite takes the rpm wakelock around its GGTT access, The wakelock is required in order for the GTT to function. In disregard for the current convention, we take the rpm wakelock around the access itself rather than around the struct_mutex as the nesting is not strictly required and such ordering will one day be fixed by explicitly noting the barrier dependencies between the GGTT and rpm. Fixes: b50a53715f09 ("drm/i915: Support for pread/pwrite ...") Reported-by: Joonas Lahtinen Signed-off-by: Chris Wilson Cc: Ankitprasad Sharma Cc: Tvrtko Ursulin Cc: Joonas Lahtinen Cc: drm-intel-fixes@lists.freedesktop.org Link: http://patchwork.freedesktop.org/patch/msgid/1470298193-21765-1-git-send-email-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen (cherry picked from commit 1dd5b6f2020389e75bb3d269c038497f065e68c9) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index aceaad0..a77ce99 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -879,9 +879,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, ret = i915_gem_shmem_pread(dev, obj, args, file); /* pread for non shmem backed objects */ - if (ret == -EFAULT || ret == -ENODEV) + if (ret == -EFAULT || ret == -ENODEV) { + intel_runtime_pm_get(to_i915(dev)); ret = i915_gem_gtt_pread(dev, obj, args->size, args->offset, args->data_ptr); + intel_runtime_pm_put(to_i915(dev)); + } out: drm_gem_object_unreference(&obj->base); -- cgit v0.10.2 From 3cffb0a44750726cdc1cc07399efe3cbb45e028b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 3 Aug 2016 17:09:00 +0100 Subject: drm/i915: Acquire audio powerwell for HD-Audio registers On Haswell/Broadwell, the HD-Audio block is inside the HDMI/display power well and so the sna-hda audio codec acquires the display power well while it is operational. However, Skylake separates the powerwells again, but yet we still need the audio powerwell to setup the registers. (But then the hardware uses those registers even while powered off???) Acquiring the powerwell around setting the chicken bits when setting up the audio channel does at least silence the WARNs from touching our registers whilst unpowered. We silence our own test cases, but maybe there is a latent bug in using the audio channel? v2: Grab both rpm wakelock and audio wakelock Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=96214 Fixes: 03b135cebc47 "ALSA: hda - remove dependency on i915 power well for SKL") Signed-off-by: Chris Wilson Cc: Libin Yang Cc: Takashi Iwai Cc: Marius Vlad Tested-by: Hans de Goede Cc: stable@vger.kernel.org Link: http://patchwork.freedesktop.org/patch/msgid/1470240540-29004-1-git-send-email-chris@chris-wilson.co.uk Reviewed-by: Daniel Vetter (cherry picked from commit d838a110f0b310d408ebe6b5a97e36ec27555ebf) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 6700a7b..d32f586 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -600,6 +600,8 @@ static void i915_audio_component_codec_wake_override(struct device *dev, if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)) return; + i915_audio_component_get_power(dev); + /* * Enable/disable generating the codec wake signal, overriding the * internal logic to generate the codec wake to controller. @@ -615,6 +617,8 @@ static void i915_audio_component_codec_wake_override(struct device *dev, I915_WRITE(HSW_AUD_CHICKENBIT, tmp); usleep_range(1000, 1500); } + + i915_audio_component_put_power(dev); } /* Get CDCLK in kHz */ @@ -648,6 +652,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev, !IS_HASWELL(dev_priv)) return 0; + i915_audio_component_get_power(dev); mutex_lock(&dev_priv->av_mutex); /* 1. get the pipe */ intel_encoder = dev_priv->dig_port_map[port]; @@ -698,6 +703,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev, unlock: mutex_unlock(&dev_priv->av_mutex); + i915_audio_component_put_power(dev); return err; } -- cgit v0.10.2 From 58e311b09c319183254d9220c50a533e7157c9ab Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Thu, 4 Aug 2016 14:08:00 -0700 Subject: drm/i915/gen9: Give one extra block per line for SKL plane WM calculations The bspec was updated a couple weeks ago to add an extra block per line to plane watermark calculations for linear pixel formats. Bspec update 115327 description: "Gen9+ - Updated the plane blocks per line calculation for linear cases. Adds +1 for all linear cases to handle the non-block aligned stride cases." Cc: Lyude Cc: drm-intel-fixes@lists.freedesktop.org Signed-off-by: Matt Roper Link: http://patchwork.freedesktop.org/patch/msgid/1470344880-27394-1-git-send-email-matthew.d.roper@intel.com Reviewed-by: Lyude (cherry picked from commit 055c3ff69d440928964228455ec29b071258d5fa) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 1ff5c63..3c7b382 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3344,6 +3344,8 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, plane_bytes_per_line *= 4; plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); plane_blocks_per_line /= 4; + } else if (tiling == DRM_FORMAT_MOD_NONE) { + plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1; } else { plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); } -- cgit v0.10.2 From 85bf59d188721dca37bc8276457e68351213f38f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Tue, 2 Aug 2016 15:21:57 +0300 Subject: drm/i915: Fix iboost setting for SKL Y/U DP DDI buffer translation entry 2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The spec was recently fixed to have the correct iboost setting for the SKL Y/U DP DDI buffer translation table entry 2. Update our tables to match. Cc: David Weinehall Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1470140517-13011-1-git-send-email-ville.syrjala@linux.intel.com Cc: stable@vger.kernel.org Reviewed-by: David Weinehall (cherry picked from commit 5ac9056753e79ac5ad1ccc3c99b311688e46e8c9) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 415c061..1a7efac 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -145,7 +145,7 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = { static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { { 0x0000201B, 0x000000A2, 0x0 }, { 0x00005012, 0x00000088, 0x0 }, - { 0x80007011, 0x000000CD, 0x0 }, + { 0x80007011, 0x000000CD, 0x1 }, { 0x80009010, 0x000000C0, 0x1 }, { 0x0000201B, 0x0000009D, 0x0 }, { 0x80005012, 0x000000C0, 0x1 }, @@ -158,7 +158,7 @@ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = { { 0x00000018, 0x000000A2, 0x0 }, { 0x00005012, 0x00000088, 0x0 }, - { 0x80007011, 0x000000CD, 0x0 }, + { 0x80007011, 0x000000CD, 0x3 }, { 0x80009010, 0x000000C0, 0x3 }, { 0x00000018, 0x0000009D, 0x0 }, { 0x80005012, 0x000000C0, 0x3 }, -- cgit v0.10.2 From bf74c93cd7696793eceadaf02b176500523e0b93 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Tue, 2 Aug 2016 09:36:53 +0100 Subject: drm/i915: fix WaInsertDummyPushConstPs As pointed out by Chris Harris, we are using the wrong WA name, it should in fact be WaToEnableHwFixForPushConstHWBug, also it should be applied from C0 onwards for both BXT and KBL. Fixes: 7b9005cd45f3 ("drm/i915: Add WaInsertDummyPushConstP for bxt and kbl") Cc: Chris Harris Cc: Mika Kuoppala Reported-by: Chris Harris Signed-off-by: Matthew Auld Reviewed-by: Arun Siluvery Signed-off-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/1470127013-29653-1-git-send-email-matthew.auld@intel.com (cherry picked from commit 575e3ccbce4582395d57612b289178bad4af3be8) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index cca7792..1d3161b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1178,8 +1178,8 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2)); - /* WaInsertDummyPushConstPs:bxt */ - if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) + /* WaToEnableHwFixForPushConstHWBug:bxt */ + if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); @@ -1222,8 +1222,8 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_RO_PERF_DIS); - /* WaInsertDummyPushConstPs:kbl */ - if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) + /* WaToEnableHwFixForPushConstHWBug:kbl */ + if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER)) WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); -- cgit v0.10.2 From 3871f42a57efcdc6a9da751a8cb6fa196c212289 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Fri, 5 Aug 2016 19:04:40 +0100 Subject: drm/i915: fix aliasing_ppgtt leak In i915_ggtt_cleanup_hw we need to remember to free aliasing_ppgtt. This fixes the following kmemleak message: unreferenced object 0xffff880213cca000 (size 8192): comm "modprobe", pid 1298, jiffies 4294745402 (age 703.930s) hex dump (first 32 bytes): 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: [] kmemleak_alloc+0x4e/0xb0 [] kmem_cache_alloc_trace+0x142/0x1d0 [] i915_gem_init_ggtt+0x10f/0x210 [i915] [] i915_gem_init+0x5b/0xd0 [i915] [] i915_driver_load+0x97a/0x1460 [i915] [] i915_pci_probe+0x4f/0x70 [i915] [] local_pci_probe+0x45/0xa0 [] pci_device_probe+0x103/0x150 [] driver_probe_device+0x22c/0x440 [] __driver_attach+0xd1/0xf0 [] bus_for_each_dev+0x6c/0xc0 [] driver_attach+0x1e/0x20 [] bus_add_driver+0x1c3/0x280 [] driver_register+0x60/0xe0 [] __pci_register_driver+0x4c/0x50 [] 0xffffffffa013605b Signed-off-by: Matthew Auld Reviewed-by: Chris Wilson Fixes: b18b6bde300e ("drm/i915/bdw: Free PPGTT struct") Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1470420280-21417-1-git-send-email-matthew.auld@intel.com (cherry picked from commit cb7f27601c81a1e0454e9461e96f65b31fafbea0) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 10f1e32..7a30af7 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2873,6 +2873,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev) struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; ppgtt->base.cleanup(&ppgtt->base); + kfree(ppgtt); } i915_gem_cleanup_stolen(dev); -- cgit v0.10.2 From dfa2997055659b4e706a85fba481050cc7e7ad82 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Fri, 5 Aug 2016 23:28:27 +0300 Subject: drm/i915: Fix modeset handling during gpu reset, v5. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This function would call drm_modeset_lock_all, while the suspend/resume functions already have their own locking. Fix this by factoring out __intel_display_resume, and calling the atomic helpers for duplicating atomic state and disabling all crtc's during suspend. Changes since v1: - Deal with -EDEADLK right after lock_all and clean up calls to hw readout. - Always take all modeset locks so updates during gpu reset are blocked. Changes since v2: - Fix deadlock in intel_update_primary_planes. - Move WARN_ON(EDEADLK) to __intel_display_resume. - pctx -> ctx - only call __intel_display_resume on success in intel_display_resume. Changes since v3: - Rebase on top of dev_priv -> dev change. - Use drm_modeset_lock_all_ctx instead of drm_modeset_lock_all. Changes since v4 [by vsyrjala]: - Deal with skip_intermediate_wm - Update comment w.r.t. mode_config.mutex vs. ->detect() - Rebase due to INTEL_GEN() etc. Signed-off-by: Maarten Lankhorst Fixes: e2c8b8701e2d ("drm/i915: Use atomic helpers for suspend, v2.") Cc: stable@vger.kernel.org Tested-by: Ville Syrjälä Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1470428910-12125-2-git-send-email-ville.syrjala@linux.intel.com (cherry picked from commit 739748939974791b84629a8790527a16f76873a4) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 21f9390..20fe9d5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1854,6 +1854,7 @@ struct drm_i915_private { enum modeset_restore modeset_restore; struct mutex modeset_restore_lock; struct drm_atomic_state *modeset_restore_state; + struct drm_modeset_acquire_ctx reset_ctx; struct list_head vm_list; /* Global list of all address spaces */ struct i915_ggtt ggtt; /* VM representing the global address space */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c457eed..fbb8574 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3093,40 +3093,110 @@ static void intel_update_primary_planes(struct drm_device *dev) for_each_crtc(dev, crtc) { struct intel_plane *plane = to_intel_plane(crtc->primary); - struct intel_plane_state *plane_state; - - drm_modeset_lock_crtc(crtc, &plane->base); - plane_state = to_intel_plane_state(plane->base.state); + struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); if (plane_state->visible) plane->update_plane(&plane->base, to_intel_crtc_state(crtc->state), plane_state); + } +} + +static int +__intel_display_resume(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + int i, ret; + + intel_modeset_setup_hw_state(dev); + i915_redisable_vga(dev); - drm_modeset_unlock_crtc(crtc); + if (!state) + return 0; + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + /* + * Force recalculation even if we restore + * current state. With fast modeset this may not result + * in a modeset when the state is compatible. + */ + crtc_state->mode_changed = true; } + + /* ignore any reset values/BIOS leftovers in the WM registers */ + to_intel_atomic_state(state)->skip_intermediate_wm = true; + + ret = drm_atomic_commit(state); + + WARN_ON(ret == -EDEADLK); + return ret; } void intel_prepare_reset(struct drm_i915_private *dev_priv) { + struct drm_device *dev = &dev_priv->drm; + struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; + struct drm_atomic_state *state; + int ret; + /* no reset support for gen2 */ if (IS_GEN2(dev_priv)) return; - /* reset doesn't touch the display */ + /* + * Need mode_config.mutex so that we don't + * trample ongoing ->detect() and whatnot. + */ + mutex_lock(&dev->mode_config.mutex); + drm_modeset_acquire_init(ctx, 0); + while (1) { + ret = drm_modeset_lock_all_ctx(dev, ctx); + if (ret != -EDEADLK) + break; + + drm_modeset_backoff(ctx); + } + + /* reset doesn't touch the display, but flips might get nuked anyway, */ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) return; - drm_modeset_lock_all(&dev_priv->drm); /* * Disabling the crtcs gracefully seems nicer. Also the * g33 docs say we should at least disable all the planes. */ - intel_display_suspend(&dev_priv->drm); + state = drm_atomic_helper_duplicate_state(dev, ctx); + if (IS_ERR(state)) { + ret = PTR_ERR(state); + state = NULL; + DRM_ERROR("Duplicating state failed with %i\n", ret); + goto err; + } + + ret = drm_atomic_helper_disable_all(dev, ctx); + if (ret) { + DRM_ERROR("Suspending crtc's failed with %i\n", ret); + goto err; + } + + dev_priv->modeset_restore_state = state; + state->acquire_ctx = ctx; + return; + +err: + drm_atomic_state_free(state); } void intel_finish_reset(struct drm_i915_private *dev_priv) { + struct drm_device *dev = &dev_priv->drm; + struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; + struct drm_atomic_state *state = dev_priv->modeset_restore_state; + int ret; + /* * Flips in the rings will be nuked by the reset, * so complete all pending flips so that user space @@ -3138,6 +3208,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) if (IS_GEN2(dev_priv)) return; + dev_priv->modeset_restore_state = NULL; + /* reset doesn't touch the display */ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { /* @@ -3149,29 +3221,32 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) * FIXME: Atomic will make this obsolete since we won't schedule * CS-based flips (which might get lost in gpu resets) any more. */ - intel_update_primary_planes(&dev_priv->drm); - return; - } - - /* - * The display has been reset as well, - * so need a full re-initialization. - */ - intel_runtime_pm_disable_interrupts(dev_priv); - intel_runtime_pm_enable_interrupts(dev_priv); + intel_update_primary_planes(dev); + } else { + /* + * The display has been reset as well, + * so need a full re-initialization. + */ + intel_runtime_pm_disable_interrupts(dev_priv); + intel_runtime_pm_enable_interrupts(dev_priv); - intel_modeset_init_hw(&dev_priv->drm); + intel_modeset_init_hw(dev); - spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev_priv); - spin_unlock_irq(&dev_priv->irq_lock); + spin_lock_irq(&dev_priv->irq_lock); + if (dev_priv->display.hpd_irq_setup) + dev_priv->display.hpd_irq_setup(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); - intel_display_resume(&dev_priv->drm); + ret = __intel_display_resume(dev, state); + if (ret) + DRM_ERROR("Restoring old state failed with %i\n", ret); - intel_hpd_init(dev_priv); + intel_hpd_init(dev_priv); + } - drm_modeset_unlock_all(&dev_priv->drm); + drm_modeset_drop_locks(ctx); + drm_modeset_acquire_fini(ctx); + mutex_unlock(&dev->mode_config.mutex); } static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) @@ -16174,9 +16249,10 @@ void intel_display_resume(struct drm_device *dev) struct drm_atomic_state *state = dev_priv->modeset_restore_state; struct drm_modeset_acquire_ctx ctx; int ret; - bool setup = false; dev_priv->modeset_restore_state = NULL; + if (state) + state->acquire_ctx = &ctx; /* * This is a cludge because with real atomic modeset mode_config.mutex @@ -16187,43 +16263,17 @@ void intel_display_resume(struct drm_device *dev) mutex_lock(&dev->mode_config.mutex); drm_modeset_acquire_init(&ctx, 0); -retry: - ret = drm_modeset_lock_all_ctx(dev, &ctx); - - if (ret == 0 && !setup) { - setup = true; - - intel_modeset_setup_hw_state(dev); - i915_redisable_vga(dev); - } - - if (ret == 0 && state) { - struct drm_crtc_state *crtc_state; - struct drm_crtc *crtc; - int i; - - state->acquire_ctx = &ctx; - - /* ignore any reset values/BIOS leftovers in the WM registers */ - to_intel_atomic_state(state)->skip_intermediate_wm = true; - - for_each_crtc_in_state(state, crtc, crtc_state, i) { - /* - * Force recalculation even if we restore - * current state. With fast modeset this may not result - * in a modeset when the state is compatible. - */ - crtc_state->mode_changed = true; - } - - ret = drm_atomic_commit(state); - } + while (1) { + ret = drm_modeset_lock_all_ctx(dev, &ctx); + if (ret != -EDEADLK) + break; - if (ret == -EDEADLK) { drm_modeset_backoff(&ctx); - goto retry; } + if (!ret) + ret = __intel_display_resume(dev, state); + drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); mutex_unlock(&dev->mode_config.mutex); -- cgit v0.10.2 From d8d378fa1a0c98ecb50ca52c9bf3bc14e25aa2d2 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 10 Aug 2016 15:59:09 -0700 Subject: tools/testing/nvdimm: fix SIGTERM vs hotplug crash The unit tests crash when hotplug races the previous probe. This race requires that the loading of the nfit_test module be terminated with SIGTERM, and the module to be unloaded while the ars scan is still running. In contrast to the normal nfit driver, the unit test calls acpi_nfit_init() twice to simulate hotplug, whereas the nominal case goes through the acpi_nfit_notify() event handler. The acpi_nfit_notify() path is careful to flush the previous region registration before servicing the hotplug event. The unit test was missing this guarantee. BUG: unable to handle kernel NULL pointer dereference at (null) IP: [] pwq_activate_delayed_work+0x47/0x170 [..] Call Trace: [] pwq_dec_nr_in_flight+0x66/0xa0 [] process_one_work+0x2d0/0x680 [] ? process_one_work+0x171/0x680 [] worker_thread+0x4e/0x480 [] ? process_one_work+0x680/0x680 [] ? process_one_work+0x680/0x680 [] kthread+0xf3/0x110 [] ret_from_fork+0x1f/0x40 [] ? kthread_create_on_node+0x230/0x230 Cc: Signed-off-by: Dan Williams diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index 5404efa..dd48f42 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c @@ -13,6 +13,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include +#include #include #include #include @@ -1474,6 +1475,7 @@ static int nfit_test_probe(struct platform_device *pdev) if (nfit_test->setup != nfit_test0_setup) return 0; + flush_work(&acpi_desc->work); nfit_test->setup_hotplug = 1; nfit_test->setup(nfit_test); -- cgit v0.10.2 From 73984137d32bb6e48646daea60ebe1457a01a061 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 10 Aug 2016 16:27:38 -0700 Subject: rapidio: dereferencing an error pointer Original patch: https://lkml.org/lkml/2016/8/4/32 If riocm_ch_alloc() fails then we end up dereferencing the error pointer. The problem is that we're not unwinding in the reverse order from how we allocate things so it gets confusing. I've changed this around so now "ch" is NULL when we are done with it after we call riocm_put_channel(). That way we can check if it's NULL and avoid calling riocm_put_channel() on it twice. I renamed err_nodev to err_put_new_ch so that it better reflects what the goto does. Then because we had flipping things around, it means we don't neeed to initialize the pointers to NULL and we can remove an if statement and pull things in an indent level. Link: http://lkml.kernel.org/r/20160805152406.20713-1-alexandre.bounine@idt.com Signed-off-by: Dan Carpenter Signed-off-by: Alexandre Bounine Cc: Matt Porter Cc: Andre van Herk Cc: Barry Wood Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c index cecc15a..3fa17ac 100644 --- a/drivers/rapidio/rio_cm.c +++ b/drivers/rapidio/rio_cm.c @@ -1080,8 +1080,8 @@ static int riocm_send_ack(struct rio_channel *ch) static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, long timeout) { - struct rio_channel *ch = NULL; - struct rio_channel *new_ch = NULL; + struct rio_channel *ch; + struct rio_channel *new_ch; struct conn_req *req; struct cm_peer *peer; int found = 0; @@ -1155,6 +1155,7 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, spin_unlock_bh(&ch->lock); riocm_put_channel(ch); + ch = NULL; kfree(req); down_read(&rdev_sem); @@ -1172,7 +1173,7 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, if (!found) { /* If peer device object not found, simply ignore the request */ err = -ENODEV; - goto err_nodev; + goto err_put_new_ch; } new_ch->rdev = peer->rdev; @@ -1184,15 +1185,16 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, *new_ch_id = new_ch->id; return new_ch; + +err_put_new_ch: + spin_lock_bh(&idr_lock); + idr_remove(&ch_idr, new_ch->id); + spin_unlock_bh(&idr_lock); + riocm_put_channel(new_ch); + err_put: - riocm_put_channel(ch); -err_nodev: - if (new_ch) { - spin_lock_bh(&idr_lock); - idr_remove(&ch_idr, new_ch->id); - spin_unlock_bh(&idr_lock); - riocm_put_channel(new_ch); - } + if (ch) + riocm_put_channel(ch); *new_ch_id = 0; return ERR_PTR(err); } -- cgit v0.10.2 From a545de5ce2ef3abc4db0b9331840acf59c8f9efa Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 10 Aug 2016 16:27:41 -0700 Subject: revert "ARM: keystone: dts: add psci command definition" Revert commit 51d5d12b8f3d ("ARM: keystone: dts: add psci command definition"), which was inadvertently added twice. Cc: Russell King - ARM Linux Cc: Vitaly Andrianov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/arch/arm/boot/dts/keystone.dtsi b/arch/arm/boot/dts/keystone.dtsi index 00cb314..e23f46d 100644 --- a/arch/arm/boot/dts/keystone.dtsi +++ b/arch/arm/boot/dts/keystone.dtsi @@ -70,14 +70,6 @@ cpu_on = <0x84000003>; }; - psci { - compatible = "arm,psci"; - method = "smc"; - cpu_suspend = <0x84000001>; - cpu_off = <0x84000002>; - cpu_on = <0x84000003>; - }; - soc { #address-cells = <1>; #size-cells = <1>; -- cgit v0.10.2 From 3b33719c9b741066f7d2bc6036409752f8e0478d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 10 Aug 2016 16:27:44 -0700 Subject: thp: move shmem_huge_enabled() outside of SYSFS ifdef The newly introduced shmem_huge_enabled() function has two definitions, but neither of them is visible if CONFIG_SYSFS is disabled, leading to a build error: mm/khugepaged.o: In function `khugepaged': khugepaged.c:(.text.khugepaged+0x3ca): undefined reference to `shmem_huge_enabled' This changes the #ifdef guards around the definition to match those that are used in the header file. Fixes: e496cf3d7821 ("thp: introduce CONFIG_TRANSPARENT_HUGE_PAGECACHE") Link: http://lkml.kernel.org/r/20160809123638.1357593-1-arnd@arndb.de Signed-off-by: Arnd Bergmann Acked-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/shmem.c b/mm/shmem.c index 7f7748a..fd8b2b5 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3975,7 +3975,9 @@ static ssize_t shmem_enabled_store(struct kobject *kobj, struct kobj_attribute shmem_enabled_attr = __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); +#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */ +#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE bool shmem_huge_enabled(struct vm_area_struct *vma) { struct inode *inode = file_inode(vma->vm_file); @@ -4006,7 +4008,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma) return false; } } -#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */ +#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ #else /* !CONFIG_SHMEM */ -- cgit v0.10.2 From 81cbcbc2d810c0ce49fba81f864302e1afe5ff27 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Wed, 10 Aug 2016 16:27:46 -0700 Subject: mm/page_alloc.c: fix wrong initialization when sysctl_min_unmapped_ratio changes Before resetting min_unmapped_pages, we need to initialize min_unmapped_pages rather than min_slab_pages. Fixes: a5f5f91da6 (mm: convert zone_reclaim to node_reclaim) Link: http://lkml.kernel.org/r/1470724248-26780-1-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Joonsoo Kim Acked-by: Mel Gorman Cc: Vlastimil Babka Cc: Johannes Weiner Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ee744fa..9a92718 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6854,7 +6854,7 @@ int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, return rc; for_each_online_pgdat(pgdat) - pgdat->min_slab_pages = 0; + pgdat->min_unmapped_pages = 0; for_each_zone(zone) zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages * -- cgit v0.10.2 From 6423aa8192c596848e1b23bd4193dc0924e7274d Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Wed, 10 Aug 2016 16:27:49 -0700 Subject: mm/page_alloc.c: recalculate some of node threshold when on/offline memory Some of node threshold depends on number of managed pages in the node. When memory is going on/offline, it can be changed and we need to adjust them. Add recalculation to appropriate places and clean-up related functions for better maintenance. Link: http://lkml.kernel.org/r/1470724248-26780-2-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Joonsoo Kim Acked-by: Mel Gorman Cc: Vlastimil Babka Cc: Johannes Weiner Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9a92718..ab2c0ff 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4757,6 +4757,8 @@ int local_memory_node(int node) } #endif +static void setup_min_unmapped_ratio(void); +static void setup_min_slab_ratio(void); #else /* CONFIG_NUMA */ static void set_zonelist_order(void) @@ -5878,9 +5880,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat) zone->managed_pages = is_highmem_idx(j) ? realsize : freesize; #ifdef CONFIG_NUMA zone->node = nid; - pgdat->min_unmapped_pages += (freesize*sysctl_min_unmapped_ratio) - / 100; - pgdat->min_slab_pages += (freesize * sysctl_min_slab_ratio) / 100; #endif zone->name = zone_names[j]; zone->zone_pgdat = pgdat; @@ -6801,6 +6800,12 @@ int __meminit init_per_zone_wmark_min(void) setup_per_zone_wmarks(); refresh_zone_stat_thresholds(); setup_per_zone_lowmem_reserve(); + +#ifdef CONFIG_NUMA + setup_min_unmapped_ratio(); + setup_min_slab_ratio(); +#endif + return 0; } core_initcall(init_per_zone_wmark_min) @@ -6842,16 +6847,10 @@ int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, } #ifdef CONFIG_NUMA -int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *length, loff_t *ppos) +static void setup_min_unmapped_ratio(void) { - struct pglist_data *pgdat; + pg_data_t *pgdat; struct zone *zone; - int rc; - - rc = proc_dointvec_minmax(table, write, buffer, length, ppos); - if (rc) - return rc; for_each_online_pgdat(pgdat) pgdat->min_unmapped_pages = 0; @@ -6859,26 +6858,47 @@ int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, for_each_zone(zone) zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages * sysctl_min_unmapped_ratio) / 100; - return 0; } -int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, + +int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { - struct pglist_data *pgdat; - struct zone *zone; int rc; rc = proc_dointvec_minmax(table, write, buffer, length, ppos); if (rc) return rc; + setup_min_unmapped_ratio(); + + return 0; +} + +static void setup_min_slab_ratio(void) +{ + pg_data_t *pgdat; + struct zone *zone; + for_each_online_pgdat(pgdat) pgdat->min_slab_pages = 0; for_each_zone(zone) zone->zone_pgdat->min_slab_pages += (zone->managed_pages * sysctl_min_slab_ratio) / 100; +} + +int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos) +{ + int rc; + + rc = proc_dointvec_minmax(table, write, buffer, length, ppos); + if (rc) + return rc; + + setup_min_slab_ratio(); + return 0; } #endif -- cgit v0.10.2 From c8efc390c1e0eca195ae59a2f7cec46773620e0c Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 10 Aug 2016 16:27:52 -0700 Subject: mm, rmap: fix false positive VM_BUG() in page_add_file_rmap() PageTransCompound() doesn't distinguish THP from from any other type of compound pages. This can lead to false-positive VM_BUG_ON() in page_add_file_rmap() if called on compound page from a driver[1]. I think we can exclude such cases by checking if the page belong to a mapping. The VM_BUG_ON_PAGE() is downgraded to VM_WARN_ON_ONCE(). This path should not cause any harm to non-THP page, but good to know if we step on anything else. [1] http://lkml.kernel.org/r/c711e067-0bff-a6cb-3c37-04dfe77d2db1@redhat.com Link: http://lkml.kernel.org/r/20160810161345.GA67522@black.fi.intel.com Signed-off-by: Kirill A. Shutemov Reported-by: Laura Abbott Tested-by: Laura Abbott Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/rmap.c b/mm/rmap.c index 709bc83..d4f5606 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1284,8 +1284,9 @@ void page_add_file_rmap(struct page *page, bool compound) VM_BUG_ON_PAGE(!PageSwapBacked(page), page); __inc_node_page_state(page, NR_SHMEM_PMDMAPPED); } else { - if (PageTransCompound(page)) { - VM_BUG_ON_PAGE(!PageLocked(page), page); + if (PageTransCompound(page) && page_mapping(page)) { + VM_WARN_ON_ONCE(!PageLocked(page)); + SetPageDoubleMap(compound_head(page)); if (PageMlocked(page)) clear_page_mlock(compound_head(page)); -- cgit v0.10.2 From 57dea93ac42d341e1fe902528b348ef6763fa485 Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Wed, 10 Aug 2016 16:27:55 -0700 Subject: rmap: fix compound check logic in page_remove_file_rmap In page_remove_file_rmap(.) we have the following check: VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); This is meant to check for either HugeTLB pages or THP when a compound page is passed in. Unfortunately, if one disables CONFIG_TRANSPARENT_HUGEPAGE, then PageTransHuge(.) will always return false, provoking BUGs when one runs the libhugetlbfs test suite. This patch replaces PageTransHuge(), with PageHead() which will work for both HugeTLB and THP. Fixes: dd78fedde4b9 ("rmap: support file thp") Link: http://lkml.kernel.org/r/1470838217-5889-1-git-send-email-steve.capper@arm.com Signed-off-by: Steve Capper Acked-by: Kirill A. Shutemov Cc: Huang Shijie Cc: Will Deacon Cc: Catalin Marinas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/rmap.c b/mm/rmap.c index d4f5606..1ef3640 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1304,7 +1304,7 @@ static void page_remove_file_rmap(struct page *page, bool compound) { int i, nr = 1; - VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); + VM_BUG_ON_PAGE(compound && !PageHead(page), page); lock_page_memcg(page); /* Hugepages are not counted in NR_FILE_MAPPED for now. */ -- cgit v0.10.2 From 6039892396d845b18228935561960441900cffca Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 10 Aug 2016 16:27:58 -0700 Subject: mm/slub.c: run free_partial() outside of the kmem_cache_node->list_lock With debugobjects enabled and using SLAB_DESTROY_BY_RCU, when a kmem_cache_node is destroyed the call_rcu() may trigger a slab allocation to fill the debug object pool (__debug_object_init:fill_pool). Everywhere but during kmem_cache_destroy(), discard_slab() is performed outside of the kmem_cache_node->list_lock and avoids a lockdep warning about potential recursion: ============================================= [ INFO: possible recursive locking detected ] 4.8.0-rc1-gfxbench+ #1 Tainted: G U --------------------------------------------- rmmod/8895 is trying to acquire lock: (&(&n->list_lock)->rlock){-.-...}, at: [] get_partial_node.isra.63+0x47/0x430 but task is already holding lock: (&(&n->list_lock)->rlock){-.-...}, at: [] __kmem_cache_shutdown+0x54/0x320 other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(&(&n->list_lock)->rlock); lock(&(&n->list_lock)->rlock); *** DEADLOCK *** May be due to missing lock nesting notation 5 locks held by rmmod/8895: #0: (&dev->mutex){......}, at: driver_detach+0x42/0xc0 #1: (&dev->mutex){......}, at: driver_detach+0x50/0xc0 #2: (cpu_hotplug.dep_map){++++++}, at: get_online_cpus+0x2d/0x80 #3: (slab_mutex){+.+.+.}, at: kmem_cache_destroy+0x3c/0x220 #4: (&(&n->list_lock)->rlock){-.-...}, at: __kmem_cache_shutdown+0x54/0x320 stack backtrace: CPU: 6 PID: 8895 Comm: rmmod Tainted: G U 4.8.0-rc1-gfxbench+ #1 Hardware name: Gigabyte Technology Co., Ltd. H87M-D3H/H87M-D3H, BIOS F11 08/18/2015 Call Trace: __lock_acquire+0x1646/0x1ad0 lock_acquire+0xb2/0x200 _raw_spin_lock+0x36/0x50 get_partial_node.isra.63+0x47/0x430 ___slab_alloc.constprop.67+0x1a7/0x3b0 __slab_alloc.isra.64.constprop.66+0x43/0x80 kmem_cache_alloc+0x236/0x2d0 __debug_object_init+0x2de/0x400 debug_object_activate+0x109/0x1e0 __call_rcu.constprop.63+0x32/0x2f0 call_rcu+0x12/0x20 discard_slab+0x3d/0x40 __kmem_cache_shutdown+0xdb/0x320 shutdown_cache+0x19/0x60 kmem_cache_destroy+0x1ae/0x220 i915_gem_load_cleanup+0x14/0x40 [i915] i915_driver_unload+0x151/0x180 [i915] i915_pci_remove+0x14/0x20 [i915] pci_device_remove+0x34/0xb0 __device_release_driver+0x95/0x140 driver_detach+0xb6/0xc0 bus_remove_driver+0x53/0xd0 driver_unregister+0x27/0x50 pci_unregister_driver+0x25/0x70 i915_exit+0x1a/0x1e2 [i915] SyS_delete_module+0x193/0x1f0 entry_SYSCALL_64_fastpath+0x1c/0xac Fixes: 52b4b950b507 ("mm: slab: free kmem_cache_node after destroy sysfs file") Link: http://lkml.kernel.org/r/1470759070-18743-1-git-send-email-chris@chris-wilson.co.uk Reported-by: Dave Gordon Signed-off-by: Chris Wilson Reviewed-by: Vladimir Davydov Acked-by: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Dmitry Safonov Cc: Daniel Vetter Cc: Dave Gordon Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/slub.c b/mm/slub.c index cead063..9adae58 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3629,6 +3629,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, */ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) { + LIST_HEAD(discard); struct page *page, *h; BUG_ON(irqs_disabled()); @@ -3636,13 +3637,16 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) list_for_each_entry_safe(page, h, &n->partial, lru) { if (!page->inuse) { remove_partial(n, page); - discard_slab(s, page); + list_add(&page->lru, &discard); } else { list_slab_objects(s, page, "Objects remaining in %s on __kmem_cache_shutdown()"); } } spin_unlock_irq(&n->list_lock); + + list_for_each_entry_safe(page, h, &discard, lru) + discard_slab(s, page); } /* -- cgit v0.10.2 From 672ca65d9aa8578f382784fe73578cd499664828 Mon Sep 17 00:00:00 2001 From: Parthasarathy Bhuvaragan Date: Wed, 10 Aug 2016 14:07:34 +0200 Subject: tipc: fix variable dereference before NULL check In commit cf6f7e1d5109 ("tipc: dump monitor attributes"), I dereferenced a pointer before checking if its valid. This is reported by static check Smatch as: net/tipc/monitor.c:733 tipc_nl_add_monitor_peer() warn: variable dereferenced before check 'mon' (see line 731) In this commit, we check for a valid monitor before proceeding with any other operation. Fixes: cf6f7e1d5109 ("tipc: dump monitor attributes") Reported-by: Dan Carpenter Signed-off-by: Parthasarathy Bhuvaragan Signed-off-by: David S. Miller diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c index b62caa1..ed97a58 100644 --- a/net/tipc/monitor.c +++ b/net/tipc/monitor.c @@ -728,12 +728,13 @@ int tipc_nl_add_monitor_peer(struct net *net, struct tipc_nl_msg *msg, u32 bearer_id, u32 *prev_node) { struct tipc_monitor *mon = tipc_monitor(net, bearer_id); - struct tipc_peer *peer = mon->self; + struct tipc_peer *peer; if (!mon) return -EINVAL; read_lock_bh(&mon->lock); + peer = mon->self; do { if (*prev_node) { if (peer->addr == *prev_node) -- cgit v0.10.2 From dafa6b0db2d62164c5ef81a40312d5ba514126b9 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Wed, 10 Aug 2016 17:48:36 +0200 Subject: net: hns: fix typo in g_gmac_stats_string[] s/gamc/gmac/ Signed-off-by: Fabian Frederick Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 1235c7f..1e1eb92 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c @@ -17,7 +17,7 @@ static const struct mac_stats_string g_gmac_stats_string[] = { {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)}, {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)}, {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)}, - {"gamc_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, + {"gmac_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)}, {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)}, {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)}, -- cgit v0.10.2 From e7f851684efb3377e9c93aca7fae6e76212e5680 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Fri, 5 Aug 2016 23:37:34 -0700 Subject: megaraid_sas: Fix probing cards without io port Found one megaraid_sas HBA probe fails, [ 187.235190] scsi host2: Avago SAS based MegaRAID driver [ 191.112365] megaraid_sas 0000:89:00.0: BAR 0: can't reserve [io 0x0000-0x00ff] [ 191.120548] megaraid_sas 0000:89:00.0: IO memory region busy! and the card has resource like, [ 125.097714] pci 0000:89:00.0: [1000:005d] type 00 class 0x010400 [ 125.104446] pci 0000:89:00.0: reg 0x10: [io 0x0000-0x00ff] [ 125.110686] pci 0000:89:00.0: reg 0x14: [mem 0xce400000-0xce40ffff 64bit] [ 125.118286] pci 0000:89:00.0: reg 0x1c: [mem 0xce300000-0xce3fffff 64bit] [ 125.125891] pci 0000:89:00.0: reg 0x30: [mem 0xce200000-0xce2fffff pref] that does not io port resource allocated from BIOS, and kernel can not assign one as io port shortage. The driver is only looking for MEM, and should not fail. It turns out megasas_init_fw() etc are using bar index as mask. index 1 is used as mask 1, so that pci_request_selected_regions() is trying to request BAR0 instead of BAR1. Fix all related reference. Fixes: b6d5d8808b4c ("megaraid_sas: Use lowest memory bar for SR-IOV VF support") Signed-off-by: Yinghai Lu Acked-by: Kashyap Desai Signed-off-by: Martin K. Petersen diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 2dab3dc..c1ed25a 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -5037,7 +5037,7 @@ static int megasas_init_fw(struct megasas_instance *instance) /* Find first memory bar */ bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); instance->bar = find_first_bit(&bar_list, sizeof(unsigned long)); - if (pci_request_selected_regions(instance->pdev, instance->bar, + if (pci_request_selected_regions(instance->pdev, 1<bar, "megasas: LSI")) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); return -EBUSY; @@ -5339,7 +5339,7 @@ fail_ready_state: iounmap(instance->reg_set); fail_ioremap: - pci_release_selected_regions(instance->pdev, instance->bar); + pci_release_selected_regions(instance->pdev, 1<bar); return -EINVAL; } @@ -5360,7 +5360,7 @@ static void megasas_release_mfi(struct megasas_instance *instance) iounmap(instance->reg_set); - pci_release_selected_regions(instance->pdev, instance->bar); + pci_release_selected_regions(instance->pdev, 1<bar); } /** diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index ec83754..52d8bbf 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -2603,7 +2603,7 @@ megasas_release_fusion(struct megasas_instance *instance) iounmap(instance->reg_set); - pci_release_selected_regions(instance->pdev, instance->bar); + pci_release_selected_regions(instance->pdev, 1<bar); } /** -- cgit v0.10.2 From 0d7826ddded60dbe36c451b462c24387e2727275 Mon Sep 17 00:00:00 2001 From: Brian King Date: Wed, 10 Aug 2016 22:45:57 -0400 Subject: ipr: Fix sync scsi scan Commit b195d5e2bffd ("ipr: Wait to do async scan until scsi host is initialized") fixed async scan for ipr, but broke sync scan. This fixes sync scan back up. Signed-off-by: Brian King Tested-by: Michael Ellerman Signed-off-by: Martin K. Petersen diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index bf85974..17d04c7 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -10410,8 +10410,11 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) __ipr_remove(pdev); return rc; } + spin_lock_irqsave(ioa_cfg->host->host_lock, flags); + ioa_cfg->scan_enabled = 1; + schedule_work(&ioa_cfg->work_q); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); - scsi_scan_host(ioa_cfg->host); ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { @@ -10421,10 +10424,8 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) } } - spin_lock_irqsave(ioa_cfg->host->host_lock, flags); - ioa_cfg->scan_enabled = 1; - schedule_work(&ioa_cfg->work_q); - spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); + scsi_scan_host(ioa_cfg->host); + return 0; } -- cgit v0.10.2 From 4b5b9ba553f9aa5f484ab972fc9b58061885ceca Mon Sep 17 00:00:00 2001 From: Martynas Pumputis Date: Tue, 9 Aug 2016 16:24:50 +0100 Subject: openvswitch: do not ignore netdev errors when creating tunnel vports The creation of a tunnel vport (geneve, gre, vxlan) brings up a corresponding netdev, a multi-step operation which can fail. For example, changing a vxlan vport's netdev state to 'up' binds the vport's socket to a UDP port - if the binding fails (e.g. due to the port being in use), the error is currently ignored giving the appearance that the tunnel vport creation completed successfully. Signed-off-by: Martynas Pumputis Acked-by: Pravin B Shelar Signed-off-by: David S. Miller diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c index 1a1fcec..5aaf3ba 100644 --- a/net/openvswitch/vport-geneve.c +++ b/net/openvswitch/vport-geneve.c @@ -93,7 +93,14 @@ static struct vport *geneve_tnl_create(const struct vport_parms *parms) return ERR_CAST(dev); } - dev_change_flags(dev, dev->flags | IFF_UP); + err = dev_change_flags(dev, dev->flags | IFF_UP); + if (err < 0) { + rtnl_delete_link(dev); + rtnl_unlock(); + ovs_vport_free(vport); + goto error; + } + rtnl_unlock(); return vport; error: diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c index 7f8897f..0e72d95 100644 --- a/net/openvswitch/vport-gre.c +++ b/net/openvswitch/vport-gre.c @@ -54,6 +54,7 @@ static struct vport *gre_tnl_create(const struct vport_parms *parms) struct net *net = ovs_dp_get_net(parms->dp); struct net_device *dev; struct vport *vport; + int err; vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms); if (IS_ERR(vport)) @@ -67,9 +68,15 @@ static struct vport *gre_tnl_create(const struct vport_parms *parms) return ERR_CAST(dev); } - dev_change_flags(dev, dev->flags | IFF_UP); - rtnl_unlock(); + err = dev_change_flags(dev, dev->flags | IFF_UP); + if (err < 0) { + rtnl_delete_link(dev); + rtnl_unlock(); + ovs_vport_free(vport); + return ERR_PTR(err); + } + rtnl_unlock(); return vport; } diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c index 5eb7694..7eb955e 100644 --- a/net/openvswitch/vport-vxlan.c +++ b/net/openvswitch/vport-vxlan.c @@ -130,7 +130,14 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms) return ERR_CAST(dev); } - dev_change_flags(dev, dev->flags | IFF_UP); + err = dev_change_flags(dev, dev->flags | IFF_UP); + if (err < 0) { + rtnl_delete_link(dev); + rtnl_unlock(); + ovs_vport_free(vport); + goto error; + } + rtnl_unlock(); return vport; error: -- cgit v0.10.2 From 851da9ac53a5e7121f7c38a052f74b574dd73ec3 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 10 Aug 2016 23:54:09 +0200 Subject: drm/mediatek: add COMMON_CLK dependency On kernel builds without COMMON_CLK, the newly added mediatek drm driver fails to build: drivers/gpu/drm/mediatek/mtk_mipi_tx.c:130:16: error: field 'pll_hw' has incomplete type struct clk_hw pll_hw; ^~~~~~ In file included from ../include/linux/clk.h:16:0, from ../drivers/gpu/drm/mediatek/mtk_mipi_tx.c:14: drivers/gpu/drm/mediatek/mtk_mipi_tx.c: In function 'mtk_mipi_tx_from_clk_hw': include/linux/kernel.h:831:48: error: initialization from incompatible pointer type [-Werror=incompatible-pointer-types] const typeof( ((type *)0)->member ) *__mptr = (ptr); \ ^ /drivers/gpu/drm/mediatek/mtk_mipi_tx.c:136:9: note: in expansion of macro 'container_of' return container_of(hw, struct mtk_mipi_tx, pll_hw); ^~~~~~~~~~~~ drivers/gpu/drm/mediatek/mtk_mipi_tx.c: At top level: drivers/gpu/drm/mediatek/mtk_mipi_tx.c:302:21: error: variable 'mtk_mipi_tx_pll_ops' has initializer but incomplete type static const struct clk_ops mtk_mipi_tx_pll_ops = { This adds the required Kconfig dependency. Signed-off-by: Arnd Bergmann Acked-by: Philipp Zabel Link: https://patchwork.kernel.org/patch/9069061/ Signed-off-by: Philipp Zabel diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig index 23ac804..5c21637 100644 --- a/drivers/gpu/drm/mediatek/Kconfig +++ b/drivers/gpu/drm/mediatek/Kconfig @@ -2,6 +2,7 @@ config DRM_MEDIATEK tristate "DRM Support for Mediatek SoCs" depends on DRM depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST) + depends on COMMON_CLK select DRM_GEM_CMA_HELPER select DRM_KMS_HELPER select DRM_MIPI_DSI -- cgit v0.10.2 From b5db361eb7746cc9ada51554a6b996170c439f0b Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 10 Aug 2016 23:54:10 +0200 Subject: drm/mediatek: add CONFIG_OF dependency The mediatek DRM driver can be configured for compile testing with CONFIG_OF disabled, but then fails to link: drivers/gpu/built-in.o: In function `mtk_drm_bind': analogix_dp_reg.c:(.text+0x52888): undefined reference to `of_find_device_by_node' analogix_dp_reg.c:(.text+0x52930): undefined reference to `of_find_device_by_node' This adds an explicit Kconfig dependency. Signed-off-by: Arnd Bergmann Link: https://patchwork.kernel.org/patch/9120871/ Signed-off-by: Philipp Zabel diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig index 5c21637..96ebf8b 100644 --- a/drivers/gpu/drm/mediatek/Kconfig +++ b/drivers/gpu/drm/mediatek/Kconfig @@ -3,6 +3,7 @@ config DRM_MEDIATEK depends on DRM depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST) depends on COMMON_CLK + depends on OF select DRM_GEM_CMA_HELPER select DRM_KMS_HELPER select DRM_MIPI_DSI -- cgit v0.10.2 From 306d674a2ddd565adde7d9d6703157105c8444a6 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 10 Aug 2016 23:54:11 +0200 Subject: drm/mediatek: add ARM_SMCCC dependency ARM SMCCC is only set for ARMv7 and ARMv8 CPUs, but we currently allow the driver to be build for older architecture levels as well, which results in a link failure: drivers/gpu/built-in.o: In function `mtk_hdmi_hw_make_reg_writable': :(.text+0x1e737c): undefined reference to `arm_smccc_smc' This adds a Kconfig dependency. The patch applies on my two previous fixes that are not yet applied, so please apply all three to get randconfig builds to work correctly. Signed-off-by: Arnd Bergmann Fixes: 8f83f26891e1 ("drm/mediatek: Add HDMI support") Reviewed-by: Matthias Brugger Signed-off-by: Philipp Zabel diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig index 96ebf8b..294de45 100644 --- a/drivers/gpu/drm/mediatek/Kconfig +++ b/drivers/gpu/drm/mediatek/Kconfig @@ -3,6 +3,7 @@ config DRM_MEDIATEK depends on DRM depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST) depends on COMMON_CLK + depends on HAVE_ARM_SMCCC depends on OF select DRM_GEM_CMA_HELPER select DRM_KMS_HELPER -- cgit v0.10.2 From 12beb346710b766b8e74a7a3ac8165835bd68def Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 10 Aug 2016 22:47:59 +0200 Subject: Merge tag 'pxa-fixes-v4.8' of https://github.com/rjarzmik/linux into randconfig-4.8 This is the pxa changes for v4.8 cycle. This is a tiny fix couple to enable changes in includes in gpio API without breaking pxa boards. * tag 'pxa-fixes-v4.8' of https://github.com/rjarzmik/linux: ARM: pxa: add module.h for corgi symbol_get/symbol_put usage ARM: pxa: add module.h for spitz symbol_get/symbol_put usage diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c index dc109dc..10bfdb1 100644 --- a/arch/arm/mach-pxa/corgi.c +++ b/arch/arm/mach-pxa/corgi.c @@ -13,6 +13,7 @@ */ #include +#include /* symbol_get ; symbol_put */ #include #include #include diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index 1080580..2c150bf 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c @@ -13,6 +13,7 @@ */ #include +#include /* symbol_get ; symbol_put */ #include #include #include -- cgit v0.10.2 From af7752106e4f12b4ee47b4eca3e7ba4bcec6e7e5 Mon Sep 17 00:00:00 2001 From: Stefan Haberland Date: Tue, 9 Aug 2016 15:58:48 +0200 Subject: s390/dasd: fix failing CUIR assignment under LPAR On LPAR the read message buffer command should be executed on the path it was received on otherwise there is a chance that the CUIR assignment might be faulty and the wrong channel path is set online/offline. Fix by setting the path mask accordingly. On z/VM we might not be able to do I/O on this path but there it does not matter on which path the read message buffer command is executed. Therefor implement a retry with an open path mask. Signed-off-by: Stefan Haberland diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index fd2eff4..98bbec4 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -5078,6 +5078,8 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device, return PTR_ERR(cqr); } + cqr->lpm = lpum; +retry: cqr->startdev = device; cqr->memdev = device; cqr->block = NULL; @@ -5122,6 +5124,14 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device, (prssdp + 1); memcpy(messages, message_buf, sizeof(struct dasd_rssd_messages)); + } else if (cqr->lpm) { + /* + * on z/VM we might not be able to do I/O on the requested path + * but instead we get the required information on any path + * so retry with open path mask + */ + cqr->lpm = 0; + goto retry; } else DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "Reading messages failed with rc=%d\n" -- cgit v0.10.2 From 62d16b5a3fca4d186e13215e0d7d2f6d36191796 Mon Sep 17 00:00:00 2001 From: Nicolas Iooss Date: Sat, 6 Aug 2016 12:20:39 +0200 Subject: x86/mm/kaslr: Fix -Wformat-security warning debug_putstr() is used to output strings without using printf-like formatting but debug_putstr(v) is defined as early_printk(v) in arch/x86/lib/kaslr.c. This makes clang reports the following warning when building with -Wformat-security: arch/x86/lib/kaslr.c:57:15: warning: format string is not a string literal (potentially insecure) [-Wformat-security] debug_putstr(purpose); ^~~~~~~ Fix this by using "%s" in early_printk(). Signed-off-by: Nicolas Iooss Acked-by: Kees Cook Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20160806102039.27221-1-nicolas.iooss_linux@m4x.org Signed-off-by: Ingo Molnar diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c index f7dfeda..121f59c 100644 --- a/arch/x86/lib/kaslr.c +++ b/arch/x86/lib/kaslr.c @@ -19,7 +19,7 @@ #include #include -#define debug_putstr(v) early_printk(v) +#define debug_putstr(v) early_printk("%s", v) #define has_cpuflag(f) boot_cpu_has(f) #define get_boot_seed() kaslr_offset() #endif -- cgit v0.10.2 From f9bcf1e0e0145323ba2cf72ecad5264ff3883eb1 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Thu, 11 Aug 2016 13:36:35 +0800 Subject: sched/cputime: Fix steal time accounting Commit: 57430218317 ("sched/cputime: Count actually elapsed irq & softirq time") ... didn't take steal time into consideration with passing the noirqtime kernel parameter. As Paolo pointed out before: | Why not? If idle=poll, for example, any time the guest is suspended (and | thus cannot poll) does count as stolen time. This patch fixes it by reducing steal time from idle time accounting when the noirqtime parameter is true. The average idle time drops from 56.8% to 54.75% for nohz idle kvm guest(noirqtime, idle=poll, four vCPUs running on one pCPU). Signed-off-by: Wanpeng Li Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Paolo Bonzini Cc: Peter Zijlstra (Intel) Cc: Peter Zijlstra Cc: Radim Cc: Rik van Riel Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1470893795-3527-1-git-send-email-wanpeng.li@hotmail.com Signed-off-by: Ingo Molnar diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 1934f65..8b9bcc5 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -508,13 +508,20 @@ void account_process_tick(struct task_struct *p, int user_tick) */ void account_idle_ticks(unsigned long ticks) { - + cputime_t cputime, steal; if (sched_clock_irqtime) { irqtime_account_idle_ticks(ticks); return; } - account_idle_time(jiffies_to_cputime(ticks)); + cputime = cputime_one_jiffy; + steal = steal_account_process_time(cputime); + + if (steal >= cputime) + return; + + cputime -= steal; + account_idle_time(cputime); } /* -- cgit v0.10.2 From ace7fab7a6cdd363a615ec537f2aa94dbc761ee2 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Wed, 10 Aug 2016 10:23:25 -0700 Subject: x86/mm: Fix swap entry comment and macro A recent patch changed the format of a swap PTE. The comment explaining the format of the swap PTE is wrong about the bits used for the swap type field. Amusingly, the ASCII art and the patch description are correct, but the comment itself is wrong. As I was looking at this, I also noticed that the SWP_OFFSET_FIRST_BIT has an off-by-one error. This does not really hurt anything. It just wasted a bit of space in the PTE, giving us 2^59 bytes of addressable space in our swapfiles instead of 2^60. But, it doesn't match with the comments, and it wastes a bit of space, so fix it. Signed-off-by: Dave Hansen Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Luis R. Rodriguez Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Toshi Kani Fixes: 00839ee3b299 ("x86/mm: Move swap offset/type up in PTE to work around erratum") Link: http://lkml.kernel.org/r/20160810172325.E56AD7DA@viggo.jf.intel.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 7e8ec7a..1cc82ec 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -145,7 +145,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; } * * | ... | 11| 10| 9|8|7|6|5| 4| 3|2|1|0| <- bit number * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names - * | OFFSET (14->63) | TYPE (10-13) |0|X|X|X| X| X|X|X|0| <- swp entry + * | OFFSET (14->63) | TYPE (9-13) |0|X|X|X| X| X|X|X|0| <- swp entry * * G (8) is aliased and used as a PROT_NONE indicator for * !present ptes. We need to start storing swap entries above @@ -156,7 +156,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; } #define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) #define SWP_TYPE_BITS 5 /* Place the offset above the type: */ -#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS + 1) +#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS) #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) -- cgit v0.10.2 From 82ba4faca1bffad429f15c90c980ffd010366c25 Mon Sep 17 00:00:00 2001 From: Aaron Lu Date: Thu, 11 Aug 2016 15:44:30 +0800 Subject: x86/irq: Do not substract irq_tlb_count from irq_call_count Since commit: 52aec3308db8 ("x86/tlb: replace INVALIDATE_TLB_VECTOR by CALL_FUNCTION_VECTOR") the TLB remote shootdown is done through call function vector. That commit didn't take care of irq_tlb_count, which a later commit: fd0f5869724f ("x86: Distinguish TLB shootdown interrupts from other functions call interrupts") ... tried to fix. The fix assumes every increase of irq_tlb_count has a corresponding increase of irq_call_count. So the irq_call_count is always bigger than irq_tlb_count and we could substract irq_tlb_count from irq_call_count. Unfortunately this is not true for the smp_call_function_single() case. The IPI is only sent if the target CPU's call_single_queue is empty when adding a csd into it in generic_exec_single. That means if two threads are both adding flush tlb csds to the same CPU's call_single_queue, only one IPI is sent. In other words, the irq_call_count is incremented by 1 but irq_tlb_count is incremented by 2. Over time, irq_tlb_count will be bigger than irq_call_count and the substract will produce a very large irq_call_count value due to overflow. Considering that: 1) it's not worth to send more IPIs for the sake of accurate counting of irq_call_count in generic_exec_single(); 2) it's not easy to tell if the call function interrupt is for TLB shootdown in __smp_call_function_single_interrupt(). Not to exclude TLB shootdown from call function count seems to be the simplest fix and this patch just does that. This bug was found by LKP's cyclic performance regression tracking recently with the vm-scalability test suite. I have bisected to commit: 3dec0ba0be6a ("mm/rmap: share the i_mmap_rwsem") This commit didn't do anything wrong but revealed the irq_call_count problem. IIUC, the commit makes rwc->remap_one in rmap_walk_file concurrent with multiple threads. When remap_one is try_to_unmap_one(), then multiple threads could queue flush TLB to the same CPU but only one IPI will be sent. Since the commit was added in Linux v3.19, the counting problem only shows up from v3.19 onwards. Signed-off-by: Aaron Lu Cc: Alex Shi Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Davidlohr Bueso Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Huang Ying Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tomoki Sekiyama Link: http://lkml.kernel.org/r/20160811074430.GA18163@aaronlu.sh.intel.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 7178043..59405a2 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -22,10 +22,6 @@ typedef struct { #ifdef CONFIG_SMP unsigned int irq_resched_count; unsigned int irq_call_count; - /* - * irq_tlb_count is double-counted in irq_call_count, so it must be - * subtracted from irq_call_count when displaying irq_call_count - */ unsigned int irq_tlb_count; #endif #ifdef CONFIG_X86_THERMAL_VECTOR diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 61521dc..9f669fd 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -102,8 +102,7 @@ int arch_show_interrupts(struct seq_file *p, int prec) seq_puts(p, " Rescheduling interrupts\n"); seq_printf(p, "%*s: ", prec, "CAL"); for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->irq_call_count - - irq_stats(j)->irq_tlb_count); + seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); seq_puts(p, " Function call interrupts\n"); seq_printf(p, "%*s: ", prec, "TLB"); for_each_online_cpu(j) -- cgit v0.10.2 From 007b756053386af079ba963a8f5817ac651c7c59 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 10 Aug 2016 02:29:13 -0700 Subject: x86/boot: Run reserve_bios_regions() after we initialize the memory map reserve_bios_regions() is a quirk that reserves memory that we might otherwise think is available. There's no need to run it so early, and running it before we have the memory map initialized with its non-quirky inputs makes it hard to make reserve_bios_regions() more intelligent. Move it right after we populate the memblock state. Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Mario Limonciello Cc: Matt Fleming Cc: Matthew Garrett Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/59f58618911005c799c6c9979ce6ae4881d907c2.1470821230.git.luto@kernel.org Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index 2dda0bc..f16c55b 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -25,8 +25,6 @@ static void __init i386_default_early_setup(void) /* Initialize 32bit specific setup functions */ x86_init.resources.reserve_resources = i386_reserve_resources; x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc; - - reserve_bios_regions(); } asmlinkage __visible void __init i386_start_kernel(void) diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 99d48e7..54a2372 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -183,7 +183,6 @@ void __init x86_64_start_reservations(char *real_mode_data) copy_bootdata(__va(real_mode_data)); x86_early_init_platform_quirks(); - reserve_bios_regions(); switch (boot_params.hdr.hardware_subarch) { case X86_SUBARCH_INTEL_MID: diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 95cf31c..bf780e0 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1101,6 +1101,8 @@ void __init setup_arch(char **cmdline_p) efi_find_mirror(); } + reserve_bios_regions(); + /* * The EFI specification says that boot service code won't be called * after ExitBootServices(). This is, in fact, a lie. -- cgit v0.10.2 From 18bc7bd523e0fc5be8d76bf84bde733a97a8c375 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 10 Aug 2016 02:29:14 -0700 Subject: x86/boot: Synchronize trampoline_cr4_features and mmu_cr4_features directly The initialization process for trampoline_cr4_features and mmu_cr4_features was confusing. The intent is for mmu_cr4_features and *trampoline_cr4_features to stay in sync, but trampoline_cr4_features is NULL until setup_real_mode() runs. The old code synchronized *trampoline_cr4_features *twice*, once in setup_real_mode() and once in setup_arch(). It also initialized mmu_cr4_features in setup_real_mode(), which causes the actual value of mmu_cr4_features to potentially depend on when setup_real_mode() is called. With this patch, mmu_cr4_features is initialized directly in setup_arch(), and *trampoline_cr4_features is synchronized to mmu_cr4_features when the trampoline is set up. After this patch, it should be safe to defer setup_real_mode(). Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Mario Limonciello Cc: Matt Fleming Cc: Matthew Garrett Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/d48a263f9912389b957dd495a7127b009259ffe0.1470821230.git.luto@kernel.org Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index bf780e0..95c8c9c 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1131,6 +1131,16 @@ void __init setup_arch(char **cmdline_p) early_trap_pf_init(); + /* + * Update mmu_cr4_features (and, indirectly, trampoline_cr4_features) + * with the current CR4 value. This may not be necessary, but + * auditing all the early-boot CR4 manipulation would be needed to + * rule it out. + */ + if (boot_cpu_data.cpuid_level >= 0) + /* A CPU has %cr4 if and only if it has CPUID. */ + mmu_cr4_features = __read_cr4(); + setup_real_mode(); memblock_set_current_limit(get_max_mapped()); @@ -1180,13 +1190,6 @@ void __init setup_arch(char **cmdline_p) kasan_init(); - if (boot_cpu_data.cpuid_level >= 0) { - /* A CPU has %cr4 if and only if it has CPUID */ - mmu_cr4_features = __read_cr4(); - if (trampoline_cr4_features) - *trampoline_cr4_features = mmu_cr4_features; - } - #ifdef CONFIG_X86_32 /* sync back kernel address range */ clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index 705e3ff..c5bdc4e 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -4,6 +4,7 @@ #include #include #include +#include struct real_mode_header *real_mode_header; u32 *trampoline_cr4_features; @@ -84,7 +85,7 @@ void __init setup_real_mode(void) trampoline_header->start = (u64) secondary_startup_64; trampoline_cr4_features = &trampoline_header->cr4; - *trampoline_cr4_features = __read_cr4(); + *trampoline_cr4_features = mmu_cr4_features; trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); trampoline_pgd[0] = trampoline_pgd_entry.pgd; -- cgit v0.10.2 From d0de0f685db7faf2ae4597a39a59996dd84e18c7 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 10 Aug 2016 02:29:15 -0700 Subject: x86/boot: Defer setup_real_mode() to early_initcall time There's no need to run setup_real_mode() as early as we run it. Defer it to the same early_initcall that sets up the page permissions for the real mode code. This should be a code size reduction. More importantly, it give us a longer window in which we can allocate the real mode trampoline. Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Mario Limonciello Cc: Matt Fleming Cc: Matthew Garrett Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/fd62f0da4f79357695e9bf3e365623736b05f119.1470821230.git.luto@kernel.org Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h index 9c6b890..8d67777 100644 --- a/arch/x86/include/asm/realmode.h +++ b/arch/x86/include/asm/realmode.h @@ -59,6 +59,5 @@ extern unsigned char secondary_startup_64[]; #endif void reserve_real_mode(void); -void setup_real_mode(void); #endif /* _ARCH_X86_REALMODE_H */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 95c8c9c..0fa60f5 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1141,8 +1141,6 @@ void __init setup_arch(char **cmdline_p) /* A CPU has %cr4 if and only if it has CPUID. */ mmu_cr4_features = __read_cr4(); - setup_real_mode(); - memblock_set_current_limit(get_max_mapped()); /* diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index c5bdc4e..747b71e 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -30,7 +30,7 @@ void __init reserve_real_mode(void) base, (unsigned long long)mem, size); } -void __init setup_real_mode(void) +static void __init setup_real_mode(void) { u16 real_mode_seg; const u32 *rel; @@ -101,7 +101,7 @@ void __init setup_real_mode(void) * need to mark it executable at do_pre_smp_initcalls() at least, * thus run it as a early_initcall(). */ -static int __init set_real_mode_permissions(void) +static void __init set_real_mode_permissions(void) { unsigned char *base = (unsigned char *) real_mode_header; size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); @@ -120,7 +120,16 @@ static int __init set_real_mode_permissions(void) set_memory_nx((unsigned long) base, size >> PAGE_SHIFT); set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT); set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT); +} + +static int __init init_real_mode(void) +{ + if (!real_mode_header) + panic("Real mode trampoline was not allocated"); + + setup_real_mode(); + set_real_mode_permissions(); return 0; } -early_initcall(set_real_mode_permissions); +early_initcall(init_real_mode); -- cgit v0.10.2 From 5ff3e2c3c3eebe13967d81ad1f23b9468fefea81 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 10 Aug 2016 02:29:16 -0700 Subject: x86/boot: Rework reserve_real_mode() to allow multiple tries If reserve_real_mode() fails, panicing immediately means we're doomed. Make it safe to try more than once to allocate the trampoline: - Degrade a failure from panic() to pr_info(). (If we make it to setup_real_mode() without reserving the trampoline, we'll panic them.) - Factor out helpers so that platform code can supply a specific address to try. - Warn if reserve_real_mode() is called after we're done with the memblock allocator. If that were to happen, we would behave unpredictably. Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Mario Limonciello Cc: Matt Fleming Cc: Matthew Garrett Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/876e383038f3e9971aa72fd20a4f5da05f9d193d.1470821230.git.luto@kernel.org Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h index 8d67777..b2988c0 100644 --- a/arch/x86/include/asm/realmode.h +++ b/arch/x86/include/asm/realmode.h @@ -58,6 +58,15 @@ extern unsigned char boot_gdt[]; extern unsigned char secondary_startup_64[]; #endif +static inline size_t real_mode_size_needed(void) +{ + if (real_mode_header) + return 0; /* already allocated. */ + + return ALIGN(real_mode_blob_end - real_mode_blob, PAGE_SIZE); +} + +void set_real_mode_mem(phys_addr_t mem, size_t size); void reserve_real_mode(void); #endif /* _ARCH_X86_REALMODE_H */ diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index 747b71e..5db706f1 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -1,4 +1,5 @@ #include +#include #include #include @@ -12,22 +13,34 @@ u32 *trampoline_cr4_features; /* Hold the pgd entry used on booting additional CPUs */ pgd_t trampoline_pgd_entry; +void __init set_real_mode_mem(phys_addr_t mem, size_t size) +{ + void *base = __va(mem); + + real_mode_header = (struct real_mode_header *) base; + printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n", + base, (unsigned long long)mem, size); +} + void __init reserve_real_mode(void) { phys_addr_t mem; - unsigned char *base; - size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); + size_t size = real_mode_size_needed(); + + if (!size) + return; + + WARN_ON(slab_is_available()); /* Has to be under 1M so we can execute real-mode AP code. */ mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE); - if (!mem) - panic("Cannot allocate trampoline\n"); + if (!mem) { + pr_info("No sub-1M memory is available for the trampoline\n"); + return; + } - base = __va(mem); memblock_reserve(mem, size); - real_mode_header = (struct real_mode_header *) base; - printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n", - base, (unsigned long long)mem, size); + set_real_mode_mem(mem, size); } static void __init setup_real_mode(void) -- cgit v0.10.2 From 5bc653b7318217c54244a14f248f1f07abe0a865 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 10 Aug 2016 02:29:17 -0700 Subject: x86/efi: Allocate a trampoline if needed in efi_free_boot_services() On my Dell XPS 13 9350 with firmware 1.4.4 and SGX on, if I boot Fedora 24's grub2-efi off a hard disk, my first 1MB of RAM looks like: efi: mem00: [Runtime Data |RUN| | | | | | | |WB|WT|WC|UC] range=[0x0000000000000000-0x0000000000000fff] (0MB) efi: mem01: [Boot Data | | | | | | | | |WB|WT|WC|UC] range=[0x0000000000001000-0x0000000000027fff] (0MB) efi: mem02: [Loader Data | | | | | | | | |WB|WT|WC|UC] range=[0x0000000000028000-0x0000000000029fff] (0MB) efi: mem03: [Reserved | | | | | | | | |WB|WT|WC|UC] range=[0x000000000002a000-0x000000000002bfff] (0MB) efi: mem04: [Runtime Data |RUN| | | | | | | |WB|WT|WC|UC] range=[0x000000000002c000-0x000000000002cfff] (0MB) efi: mem05: [Loader Data | | | | | | | | |WB|WT|WC|UC] range=[0x000000000002d000-0x000000000002dfff] (0MB) efi: mem06: [Conventional Memory| | | | | | | | |WB|WT|WC|UC] range=[0x000000000002e000-0x0000000000057fff] (0MB) efi: mem07: [Reserved | | | | | | | | |WB|WT|WC|UC] range=[0x0000000000058000-0x0000000000058fff] (0MB) efi: mem08: [Conventional Memory| | | | | | | | |WB|WT|WC|UC] range=[0x0000000000059000-0x000000000009ffff] (0MB) My EBDA is at 0x2c000, which blocks off everything from 0x2c000 and up, and my trampoline is 0x6000 bytes (6 pages), so it doesn't fit in the loader data range at 0x28000. Without this patch, it panics due to a failure to allocate the trampoline. With this patch, it works: [ +0.001744] Base memory trampoline at [ffff880000001000] 1000 size 24576 Signed-off-by: Andy Lutomirski Reviewed-by: Matt Fleming Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Mario Limonciello Cc: Matt Fleming Cc: Matthew Garrett Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/998c77b3bf709f3dfed85cb30701ed1a5d8a438b.1470821230.git.luto@kernel.org Signed-off-by: Ingo Molnar diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 4480c06..89d1146 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -254,6 +254,7 @@ void __init efi_free_boot_services(void) for_each_efi_memory_desc(md) { unsigned long long start = md->phys_addr; unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; + size_t rm_size; if (md->type != EFI_BOOT_SERVICES_CODE && md->type != EFI_BOOT_SERVICES_DATA) @@ -263,6 +264,26 @@ void __init efi_free_boot_services(void) if (md->attribute & EFI_MEMORY_RUNTIME) continue; + /* + * Nasty quirk: if all sub-1MB memory is used for boot + * services, we can get here without having allocated the + * real mode trampoline. It's too late to hand boot services + * memory back to the memblock allocator, so instead + * try to manually allocate the trampoline if needed. + * + * I've seen this on a Dell XPS 13 9350 with firmware + * 1.4.4 with SGX enabled booting Linux via Fedora 24's + * grub2-efi on a hard disk. (And no, I don't know why + * this happened, but Linux should still try to boot rather + * panicing early.) + */ + rm_size = real_mode_size_needed(); + if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) { + set_real_mode_mem(start, rm_size); + start += rm_size; + size -= rm_size; + } + free_bootmem_late(start, size); } -- cgit v0.10.2 From f72075c9eda8a43aeea2f9dbb8d187afd4a76f0b Mon Sep 17 00:00:00 2001 From: Alex Thorlton Date: Thu, 11 Aug 2016 11:41:59 +0100 Subject: x86/platform/uv: Skip UV runtime services mapping in the efi_runtime_disabled case This problem has actually been in the UV code for a while, but we didn't catch it until recently, because we had been relying on EFI_OLD_MEMMAP to allow our systems to boot for a period of time. We noticed the issue when trying to kexec a recent community kernel, where we hit this NULL pointer dereference in efi_sync_low_kernel_mappings(): [ 0.337515] BUG: unable to handle kernel NULL pointer dereference at 0000000000000880 [ 0.346276] IP: [] efi_sync_low_kernel_mappings+0x5d/0x1b0 The problem doesn't show up with EFI_OLD_MEMMAP because we skip the chunk of setup_efi_state() that sets the efi_loader_signature for the kexec'd kernel. When the kexec'd kernel boots, it won't set EFI_BOOT in setup_arch, so we completely avoid the bug. We always kexec with noefi on the command line, so this shouldn't be an issue, but since we're not actually checking for efi_runtime_disabled in uv_bios_init(), we end up trying to do EFI runtime callbacks when we shouldn't be. This patch just adds a check for efi_runtime_disabled in uv_bios_init() so that we don't map in uv_systab when runtime_disabled == true. Signed-off-by: Alex Thorlton Signed-off-by: Matt Fleming Cc: # v4.7 Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Mike Travis Cc: Peter Zijlstra Cc: Russ Anderson Cc: Thomas Gleixner Cc: linux-efi@vger.kernel.org Link: http://lkml.kernel.org/r/1470912120-22831-2-git-send-email-matt@codeblueprint.co.uk Signed-off-by: Ingo Molnar diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c index 66b2166..0df8a03 100644 --- a/arch/x86/platform/uv/bios_uv.c +++ b/arch/x86/platform/uv/bios_uv.c @@ -187,7 +187,8 @@ EXPORT_SYMBOL_GPL(uv_bios_set_legacy_vga_target); void uv_bios_init(void) { uv_systab = NULL; - if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || !efi.uv_systab) { + if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || + !efi.uv_systab || efi_runtime_disabled()) { pr_crit("UV: UVsystab: missing\n"); return; } -- cgit v0.10.2 From 6862e6ad95e984991a6ceec592cf67831658f928 Mon Sep 17 00:00:00 2001 From: Austin Christ Date: Thu, 11 Aug 2016 11:42:00 +0100 Subject: efi/capsule: Allocate whole capsule into virtual memory According to UEFI 2.6 section 7.5.3, the capsule should be in contiguous virtual memory and firmware may consume the capsule immediately. To correctly implement this functionality, the kernel driver needs to vmap the entire capsule at the time it is made available to firmware. The virtual allocation of the capsule update has been changed from kmap, which was only allocating the first page of the update, to vmap, and allocates the entire data payload. Signed-off-by: Austin Christ Signed-off-by: Matt Fleming Reviewed-by: Matt Fleming Reviewed-by: Lee, Chun-Yi Cc: # v4.7 Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Borislav Petkov Cc: Brian Gerst Cc: Bryan O'Donoghue Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Kweh Hock Leong Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-efi@vger.kernel.org Link: http://lkml.kernel.org/r/1470912120-22831-3-git-send-email-matt@codeblueprint.co.uk Signed-off-by: Ingo Molnar diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c index c99c24b..9ae6c11 100644 --- a/drivers/firmware/efi/capsule-loader.c +++ b/drivers/firmware/efi/capsule-loader.c @@ -16,6 +16,7 @@ #include #include #include +#include #define NO_FURTHER_WRITE_ACTION -1 @@ -108,14 +109,15 @@ static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info) int ret; void *cap_hdr_temp; - cap_hdr_temp = kmap(cap_info->pages[0]); + cap_hdr_temp = vmap(cap_info->pages, cap_info->index, + VM_MAP, PAGE_KERNEL); if (!cap_hdr_temp) { - pr_debug("%s: kmap() failed\n", __func__); + pr_debug("%s: vmap() failed\n", __func__); return -EFAULT; } ret = efi_capsule_update(cap_hdr_temp, cap_info->pages); - kunmap(cap_info->pages[0]); + vunmap(cap_hdr_temp); if (ret) { pr_err("%s: efi_capsule_update() failed\n", __func__); return ret; diff --git a/drivers/firmware/efi/capsule.c b/drivers/firmware/efi/capsule.c index 53b9fd2..6eedff4 100644 --- a/drivers/firmware/efi/capsule.c +++ b/drivers/firmware/efi/capsule.c @@ -190,9 +190,9 @@ efi_capsule_update_locked(efi_capsule_header_t *capsule, * map the capsule described by @capsule with its data in @pages and * send it to the firmware via the UpdateCapsule() runtime service. * - * @capsule must be a virtual mapping of the first page in @pages - * (@pages[0]) in the kernel address space. That is, a - * capsule_header_t that describes the entire contents of the capsule + * @capsule must be a virtual mapping of the complete capsule update in the + * kernel address space, as the capsule can be consumed immediately. + * A capsule_header_t that describes the entire contents of the capsule * must be at the start of the first data page. * * Even though this function will validate that the firmware supports -- cgit v0.10.2 From 772ce81264b179c0e61340998e3b29e900b2fa6d Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Mon, 8 Aug 2016 21:50:51 +0900 Subject: usb: renesas_usbhs: Fix receiving data corrupt on R-Car Gen3 with dmac Since R-Car Gen3 SoC has the USB-DMAC, this driver should set dparam->has_usb_dmac to 1. Otherwise, behavior of this driver and the usb-dmac driver will be mismatch, then sometimes receiving data will be corrupt. Fixes: de18757e272d ("usb: renesas_usbhs: add R-Car Gen3 power control") Cc: # v4.5+ Signed-off-by: Yoshihiro Shimoda Signed-off-by: Felipe Balbi diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index 8fbbc2d..ac67bab 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -514,7 +514,8 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev) if (gpio > 0) dparam->enable_gpio = gpio; - if (dparam->type == USBHS_TYPE_RCAR_GEN2) + if (dparam->type == USBHS_TYPE_RCAR_GEN2 || + dparam->type == USBHS_TYPE_RCAR_GEN3) dparam->has_usb_dmac = 1; return info; -- cgit v0.10.2 From 9ab967e6db7412b675ecbff80d5371d53c82cb2e Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Mon, 8 Aug 2016 21:50:52 +0900 Subject: usb: renesas_usbhs: clear the BRDYSTS in usbhsg_ep_enable() This patch fixes an issue that unexpected BRDY interruption happens when the usb_ep_{enable,disable}() are called with different direction. In this case, the driver will cause the following message: renesas_usbhs e6590000.usb: irq_ready run_error 1 : -16 This issue causes the followings: 1) A pipe is enabled as transmission 2) The pipe sent a data 3) The pipe is disabled and re-enabled as reception. 4) The pipe got a queue Since the driver doesn't clear the BRDYSTS flags after 2) above, the issue happens. If we add such clearing the flags into the driver, the code will become complicate. So, this patch clears the BRDYSTS flag of reception in usbhsg_ep_enable() to avoid complicate. Cc: # v4.1+ (usbhs_xxxsts_clear() is needed) Signed-off-by: Yoshihiro Shimoda Signed-off-by: Felipe Balbi diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 50f3363..92bc83b 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -617,10 +617,13 @@ static int usbhsg_ep_enable(struct usb_ep *ep, * use dmaengine if possible. * It will use pio handler if impossible. */ - if (usb_endpoint_dir_in(desc)) + if (usb_endpoint_dir_in(desc)) { pipe->handler = &usbhs_fifo_dma_push_handler; - else + } else { pipe->handler = &usbhs_fifo_dma_pop_handler; + usbhs_xxxsts_clear(priv, BRDYSTS, + usbhs_pipe_number(pipe)); + } ret = 0; } -- cgit v0.10.2 From 700aa7ff8d2c2b9cc669c99375e2ccd06d3cd38d Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Mon, 8 Aug 2016 21:50:53 +0900 Subject: usb: renesas_usbhs: Use dmac only if the pipe type is bulk This patch fixes an issue that isochronous transfer's data is possible to be lost as a workaround. Since this driver uses a workqueue to start the dmac, the transfer is possible to be delayed when system load is high. Fixes: 6e4b74e4690d ("usb: renesas: fix scheduling in atomic context bug") Cc: # v3.4+ Signed-off-by: Yoshihiro Shimoda Signed-off-by: Felipe Balbi diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 280ed5f..857e783 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -871,7 +871,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done) /* use PIO if packet is less than pio_dma_border or pipe is DCP */ if ((len < usbhs_get_dparam(priv, pio_dma_border)) || - usbhs_pipe_is_dcp(pipe)) + usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC)) goto usbhsf_pio_prepare_push; /* check data length if this driver don't use USB-DMAC */ @@ -976,7 +976,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt, /* use PIO if packet is less than pio_dma_border or pipe is DCP */ if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) || - usbhs_pipe_is_dcp(pipe)) + usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC)) goto usbhsf_pio_prepare_pop; fifo = usbhsf_get_dma_fifo(priv, pkt); -- cgit v0.10.2 From c526c62d565ea5a5bba9433f28756079734f430d Mon Sep 17 00:00:00 2001 From: Peter Chen Date: Fri, 1 Jul 2016 15:33:28 +0800 Subject: usb: gadget: composite: fix dereference after null check coverify warning cdev->config is checked for null pointer at above code, so cdev->config might be null, fix it by adding null pointer check. Signed-off-by: Peter Chen Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index eb64848..a8ecc7a 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1913,6 +1913,8 @@ unknown: break; case USB_RECIP_ENDPOINT: + if (!cdev->config) + break; endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f); list_for_each_entry(f, &cdev->config->functions, list) { if (test_bit(endp, f->endpoints)) -- cgit v0.10.2 From 88c09eacf560c3303d0ee8cf91b8b7ff7f000350 Mon Sep 17 00:00:00 2001 From: Peter Chen Date: Fri, 1 Jul 2016 15:33:29 +0800 Subject: usb: gadget: u_ether: fix dereference after null check coverify warning dev->port_usb is checked for null pointer at above code, so dev->port_usb might be null, fix it by adding null pointer check. Signed-off-by: Peter Chen Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index a3f7e7c..5f562c1 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -556,7 +556,8 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, /* Multi frame CDC protocols may store the frame for * later which is not a dropped frame. */ - if (dev->port_usb->supports_multi_frame) + if (dev->port_usb && + dev->port_usb->supports_multi_frame) goto multiframe; goto drop; } -- cgit v0.10.2 From cee51c33f52ebf673a088a428ac0fecc33ab77fa Mon Sep 17 00:00:00 2001 From: Winter Wang Date: Wed, 27 Jul 2016 10:03:19 +0800 Subject: usb: gadget: configfs: add mutex lock before unregister gadget There may be a race condition if f_fs calls unregister_gadget_item in ffs_closed() when unregister_gadget is called by UDC store at the same time. this leads to a kernel NULL pointer dereference: [ 310.644928] Unable to handle kernel NULL pointer dereference at virtual address 00000004 [ 310.645053] init: Service 'adbd' is being killed... [ 310.658938] pgd = c9528000 [ 310.662515] [00000004] *pgd=19451831, *pte=00000000, *ppte=00000000 [ 310.669702] Internal error: Oops: 817 [#1] PREEMPT SMP ARM [ 310.675211] Modules linked in: [ 310.678294] CPU: 0 PID: 1537 Comm: ->transport Not tainted 4.1.15-03725-g793404c #2 [ 310.685958] Hardware name: Freescale i.MX6 Quad/DualLite (Device Tree) [ 310.692493] task: c8e24200 ti: c945e000 task.ti: c945e000 [ 310.697911] PC is at usb_gadget_unregister_driver+0xb4/0xd0 [ 310.703502] LR is at __mutex_lock_slowpath+0x10c/0x16c [ 310.708648] pc : [] lr : [] psr: 600f0113 [ 311.565585] [] (usb_gadget_unregister_driver) from [] (unregister_gadget_item+0x1c/0x34) [ 311.575426] [] (unregister_gadget_item) from [] (ffs_closed+0x8c/0x9c) [ 311.583702] [] (ffs_closed) from [] (ffs_data_reset+0xc/0xa0) [ 311.591194] [] (ffs_data_reset) from [] (ffs_data_closed+0x90/0xd0) [ 311.599208] [] (ffs_data_closed) from [] (ffs_ep0_release+0xc/0x14) [ 311.607224] [] (ffs_ep0_release) from [] (__fput+0x80/0x1d0) [ 311.614635] [] (__fput) from [] (task_work_run+0xb0/0xe8) [ 311.621788] [] (task_work_run) from [] (do_work_pending+0x7c/0xa4) [ 311.629718] [] (do_work_pending) from [] (work_pending+0xc/0x20) for functions using functionFS, i.e. android adbd will close /dev/usb-ffs/adb/ep0 when usb IO thread fails, but switch adb from on to off also triggers write "none" > UDC. These 2 operations both call unregister_gadget, which will lead to the panic above. add a mutex before calling unregister_gadget for api used in f_fs. Signed-off-by: Winter Wang Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 70cf347..f9237fe 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -1490,7 +1490,9 @@ void unregister_gadget_item(struct config_item *item) { struct gadget_info *gi = to_gadget_info(item); + mutex_lock(&gi->lock); unregister_gadget(gi); + mutex_unlock(&gi->lock); } EXPORT_SYMBOL_GPL(unregister_gadget_item); -- cgit v0.10.2 From ec57fcd042a9448096705847290d4e4e97fbb7e1 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 26 Jul 2016 14:48:39 +0000 Subject: usb: phy: omap-otg: Fix missing platform_set_drvdata() in omap_otg_probe() Add missing platform_set_drvdata() in omap_otg_probe(), otherwise calling platform_get_drvdata() in remove returns NULL. This is detected by Coccinelle semantic patch. Signed-off-by: Wei Yongjun Signed-off-by: Felipe Balbi diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c index 6f6d2a7..6523af4 100644 --- a/drivers/usb/phy/phy-omap-otg.c +++ b/drivers/usb/phy/phy-omap-otg.c @@ -140,6 +140,8 @@ static int omap_otg_probe(struct platform_device *pdev) (rev >> 4) & 0xf, rev & 0xf, config->extcon, otg_dev->id, otg_dev->vbus); + platform_set_drvdata(pdev, otg_dev); + return 0; } -- cgit v0.10.2 From 4c4f106c032ff32b89c98a4d819e68e6e643c14e Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 26 Jul 2016 14:47:00 +0000 Subject: usb: dwc3: fix missing platform_set_drvdata() in dwc3_of_simple_probe() Add missing platform_set_drvdata() in dwc3_of_simple_probe(), otherwise calling platform_get_drvdata() in remove returns NULL. This is detected by Coccinelle semantic patch. Signed-off-by: Wei Yongjun Signed-off-by: Felipe Balbi diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index 9743353..e56d59b 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c @@ -61,6 +61,7 @@ static int dwc3_of_simple_probe(struct platform_device *pdev) if (!simple->clks) return -ENOMEM; + platform_set_drvdata(pdev, simple); simple->dev = dev; for (i = 0; i < simple->num_clocks; i++) { -- cgit v0.10.2 From 528d28138f91009f230903bd89ccd44719667831 Mon Sep 17 00:00:00 2001 From: Peter Chen Date: Tue, 26 Jul 2016 16:01:30 +0800 Subject: usb: misc: usbtest: usbtest_do_ioctl may return positive integer For case 14 and case 21, their correct return value is the number of bytes transferred, so it is a positive integer. But in usbtest_ioctl, it takes non-zero as false return value for usbtest_do_ioctl, so it will treat the correct test as wrong test, then the time on tests will be the minus value. Signed-off-by: Peter Chen Cc: stable Fixes: 18fc4ebdc705 ("usb: misc: usbtest: Remove timeval usage") Signed-off-by: Felipe Balbi diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 6b978f0..5e3464e 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -2602,7 +2602,7 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf) ktime_get_ts64(&start); retval = usbtest_do_ioctl(intf, param_32); - if (retval) + if (retval < 0) goto free_mutex; ktime_get_ts64(&end); -- cgit v0.10.2 From 63196e989699ddffb4e3d30ca8c3567d408820f4 Mon Sep 17 00:00:00 2001 From: Binyamin Sharet Date: Thu, 7 Jul 2016 22:22:04 +0300 Subject: usb: gadget: fix check in sync read from ep in gadgetfs When reading synchronously from a non-zero endpoint, gadgetfs will return -EFAULT even if the read succeeds, due to a bad check of the copy_to_iter() return value. This fix compares the return value of copy_to_iter to the amount of bytes that was passed, and only fails if they are not the same. Signed-off-by: Binyamin Sharet Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index aa3707b..8560f2f 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -606,7 +606,7 @@ ep_read_iter(struct kiocb *iocb, struct iov_iter *to) } if (is_sync_kiocb(iocb)) { value = ep_io(epdata, buf, len); - if (value >= 0 && copy_to_iter(buf, value, to)) + if (value >= 0 && (copy_to_iter(buf, value, to) != value)) value = -EFAULT; } else { struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); -- cgit v0.10.2 From bd610c5aa9fcc9817d2629274a27aab81aa77cec Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 16 Jul 2016 09:04:40 +0200 Subject: usb: gadget: uvc: Fix return value in case of error If this memory allocation fail, we will return 0, which means success. Return -ENOMEM instead. Reviewed-by: Laurent Pinchart Signed-off-by: Christophe JAILLET Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c index 66753ba..31125a4 100644 --- a/drivers/usb/gadget/function/uvc_configfs.c +++ b/drivers/usb/gadget/function/uvc_configfs.c @@ -2023,7 +2023,7 @@ static int uvcg_streaming_class_allow_link(struct config_item *src, if (!data) { kfree(*class_array); *class_array = NULL; - ret = PTR_ERR(data); + ret = -ENOMEM; goto unlock; } cl_arr = *class_array; -- cgit v0.10.2 From 3887db5c2b6531c59e9649a4293071b575d6eb3b Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 16 Jul 2016 08:34:33 +0200 Subject: usb: gadget: composite: Fix return value in case of error In 'composite_os_desc_req_prepare', if one of the memory allocations fail, 0 will be returned, which means success. We should return -ENOMEM instead. Signed-off-by: Christophe JAILLET Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index a8ecc7a..5ebe6af7 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -2126,14 +2126,14 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev, cdev->os_desc_req = usb_ep_alloc_request(ep0, GFP_KERNEL); if (!cdev->os_desc_req) { - ret = PTR_ERR(cdev->os_desc_req); + ret = -ENOMEM; goto end; } /* OS feature descriptor length <= 4kB */ cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL); if (!cdev->os_desc_req->buf) { - ret = PTR_ERR(cdev->os_desc_req->buf); + ret = -ENOMEM; kfree(cdev->os_desc_req); goto end; } -- cgit v0.10.2 From 327b21da884fe1a29f733e41792ddd53e4a30379 Mon Sep 17 00:00:00 2001 From: Mathieu Laurendeau Date: Fri, 15 Jul 2016 14:58:41 +0200 Subject: usb/gadget: fix gadgetfs aio support. Fix io submissions failing with ENODEV. Signed-off-by: Mathieu Laurendeau Fixes: 7fe3976e0f3a ("gadget: switch ep_io_operations to ->read_iter/->write_iter") Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 8560f2f..16104b5e 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -542,7 +542,7 @@ static ssize_t ep_aio(struct kiocb *iocb, */ spin_lock_irq(&epdata->dev->lock); value = -ENODEV; - if (unlikely(epdata->ep)) + if (unlikely(epdata->ep == NULL)) goto fail; req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); -- cgit v0.10.2 From 7442e6db5bdd0dce4615205508301f9b22e502d6 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 13 Jul 2016 13:14:33 +0300 Subject: usb: gadget: fsl_qe_udc: off by one in setup_received_handle() The udc->eps[] array has USB_MAX_ENDPOINTS elements so > should be >=. Fixes: 3948f0e0c999 ('usb: add Freescale QE/CPM USB peripheral controller driver') Acked-by: Peter Chen Signed-off-by: Dan Carpenter Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c index 93d28cb..cf8819a 100644 --- a/drivers/usb/gadget/udc/fsl_qe_udc.c +++ b/drivers/usb/gadget/udc/fsl_qe_udc.c @@ -2053,7 +2053,7 @@ static void setup_received_handle(struct qe_udc *udc, struct qe_ep *ep; if (wValue != 0 || wLength != 0 - || pipe > USB_MAX_ENDPOINTS) + || pipe >= USB_MAX_ENDPOINTS) break; ep = &udc->eps[pipe]; -- cgit v0.10.2 From 17a1dc5e22d25ba374d8c8dd8d5bf10bd20d3265 Mon Sep 17 00:00:00 2001 From: Peter Chen Date: Mon, 11 Jul 2016 09:58:16 +0800 Subject: usb: udc: core: fix error handling The udc device needs to be deleted if error occurs Fixes: 855ed04a3758 ("usb: gadget: udc-core: independent registration of gadgets and gadget drivers") Signed-off-by: Peter Chen Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index ff8685e..934f838 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -1145,7 +1145,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, if (ret != -EPROBE_DEFER) list_del(&driver->pending); if (ret) - goto err4; + goto err5; break; } } @@ -1154,6 +1154,9 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, return 0; +err5: + device_del(&udc->dev); + err4: list_del(&udc->list); mutex_unlock(&udc_lock); -- cgit v0.10.2 From 207707d8fd48ebc977fb2b2794004a020e1ee08e Mon Sep 17 00:00:00 2001 From: Xerox Lin Date: Wed, 29 Jun 2016 14:34:21 +0530 Subject: usb: gadget: rndis: free response queue during REMOTE_NDIS_RESET_MSG When rndis data transfer is in progress, some Windows7 Host PC is not sending the GET_ENCAPSULATED_RESPONSE command for receiving the response for the previous SEND_ENCAPSULATED_COMMAND processed. The rndis function driver appends each response for the SEND_ENCAPSULATED_COMMAND in a queue. As the above process got corrupted, the Host sends a REMOTE_NDIS_RESET_MSG command to do a soft-reset. As the rndis response queue is not freed, the previous response is sent as a part of this REMOTE_NDIS_RESET_MSG's reset response and the Host block any more Rndis transfers. Hence free the rndis response queue as a part of this soft-reset so that the correct response for REMOTE_NDIS_RESET_MSG is sent properly during the response command. Signed-off-by: Rajkumar Raghupathy Signed-off-by: Xerox Lin [AmitP: Cherry-picked this patch and folded other relevant fixes from Android common kernel android-4.4] Signed-off-by: Amit Pundir Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c index 943c21a..ab6ac1b 100644 --- a/drivers/usb/gadget/function/rndis.c +++ b/drivers/usb/gadget/function/rndis.c @@ -680,6 +680,12 @@ static int rndis_reset_response(struct rndis_params *params, { rndis_reset_cmplt_type *resp; rndis_resp_t *r; + u8 *xbuf; + u32 length; + + /* drain the response queue */ + while ((xbuf = rndis_get_next_response(params, &length))) + rndis_free_response(params, xbuf); r = rndis_add_response(params, sizeof(rndis_reset_cmplt_type)); if (!r) -- cgit v0.10.2 From 79d17482a4091056e128a5048e253f2ed53440cc Mon Sep 17 00:00:00 2001 From: Janusz Dziedzic Date: Thu, 7 Jul 2016 08:39:09 +0200 Subject: usb: dwc3: don't set last bit for ISOC endpoints According to Synopsys Databook 2.60a, section 8.3.4, it's stated that: The LST bit should be set to 0 (isochronous transfers normally continue until the endpoint is removed entirely, at which time an End Transfer command is used to stop the transfer). This patch makes sure that detail is observed and fixes a regression with Android Audio playback caused by recent changes to DWC3 gadget. Signed-off-by: Janusz Dziedzic Signed-off-by: Felipe Balbi diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index eb820e4..7a4d4d2 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -829,7 +829,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, if (!req->request.no_interrupt && !chain) trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI; - if (last) + if (last && !usb_endpoint_xfer_isoc(dep->endpoint.desc)) trb->ctrl |= DWC3_TRB_CTRL_LST; if (chain) -- cgit v0.10.2 From a0ad85ae866f8a01f29a18ffa1e9b1aa8ca888bd Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Wed, 10 Aug 2016 18:07:46 +0300 Subject: usb: dwc3: gadget: stop processing on HWO set stop consuming TRBs when we reach one with HWO bit already set. This will prevent us from prematurely retiring a TRB. Signed-off-by: Felipe Balbi diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 7a4d4d2..1f5597e 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1979,16 +1979,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, trb->ctrl &= ~DWC3_TRB_CTRL_HWO; if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) - /* - * We continue despite the error. There is not much we - * can do. If we don't clean it up we loop forever. If - * we skip the TRB then it gets overwritten after a - * while since we use them in a ring buffer. A BUG() - * would help. Lets hope that if this occurs, someone - * fixes the root cause instead of looking away :) - */ - dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", - dep->name, trb); + return 1; count = trb->size & DWC3_TRB_SIZE_MASK; -- cgit v0.10.2 From 70fcad495b9903d362bdbf9ffba23259294064c8 Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Thu, 11 Aug 2016 14:39:00 +0100 Subject: ASoC: Fix leak of rtd in soc_bind_dai_link If we fail to find a platform we simply return EPROBE_DEFER, but we have allocated the rtd pointer. All error paths before soc_add_pcm_runtime need to call soc_free_pcm_runtime first to avoid leaking the rtd pointer. A suitable error path already exists and is used else where in the function so simply use that here as well. Signed-off-by: Charles Keepax Signed-off-by: Mark Brown diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 16369ca..b0e23db 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -1056,7 +1056,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card, if (!rtd->platform) { dev_err(card->dev, "ASoC: platform %s not registered\n", dai_link->platform_name); - return -EPROBE_DEFER; + goto _err_defer; } soc_add_pcm_runtime(card, rtd); -- cgit v0.10.2 From fa54aade5338937146bb6b8af93d2c27f6787ae1 Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Thu, 11 Aug 2016 14:40:04 +0100 Subject: ASoC: wm2000: Fix return of uninitialised varible Anything that sets ret in wm2000_anc_transition will have immediately returned anyway as such we will always return an uninitialised ret at the bottom of the function. Simply replace the return with a return 0; Signed-off-by: Charles Keepax Signed-off-by: Mark Brown diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c index a67ea10..f266439 100644 --- a/sound/soc/codecs/wm2000.c +++ b/sound/soc/codecs/wm2000.c @@ -581,7 +581,7 @@ static int wm2000_anc_transition(struct wm2000_priv *wm2000, if (anc_transitions[i].dest == ANC_OFF) clk_disable_unprepare(wm2000->mclk); - return ret; + return 0; } static int wm2000_anc_set_mode(struct wm2000_priv *wm2000) -- cgit v0.10.2 From 26f2c75cd2cf10a6120ef02ca9a94db77cc9c8e0 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 11 Aug 2016 14:58:24 +0200 Subject: sched/cputime: Fix omitted ticks passed in parameter Commit: f9bcf1e0e014 ("sched/cputime: Fix steal time accounting") ... fixes a leak on steal time accounting but forgets to account the ticks passed in parameters, assuming there is only one to take into account. Let's consider that parameter back. Signed-off-by: Frederic Weisbecker Acked-by: Wanpeng Li Cc: Linus Torvalds Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Radim Cc: Rik van Riel Cc: Thomas Gleixner Cc: Wanpeng Li Cc: linux-tip-commits@vger.kernel.org Link: http://lkml.kernel.org/r/20160811125822.GB4214@lerouge Signed-off-by: Ingo Molnar diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 8b9bcc5..9858266 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -509,12 +509,13 @@ void account_process_tick(struct task_struct *p, int user_tick) void account_idle_ticks(unsigned long ticks) { cputime_t cputime, steal; + if (sched_clock_irqtime) { irqtime_account_idle_ticks(ticks); return; } - cputime = cputime_one_jiffy; + cputime = jiffies_to_cputime(ticks); steal = steal_account_process_time(cputime); if (steal >= cputime) -- cgit v0.10.2 From d52c0569bab4edc888832df44dc7ac28517134f6 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 11 Aug 2016 16:08:35 +0200 Subject: x86/apic/x2apic, smp/hotplug: Don't use before alloc in x2apic_cluster_probe() I made a mistake while converting the driver to the hotplug state machine and as a result x2apic_cluster_probe() was accessing cpus_in_cluster before allocating it. This patch fixes it by setting the cpumask after the allocation the memory succeeded. While at it, I marked two functions static which are only used within this file. Reported-by: Laura Abbott Signed-off-by: Sebastian Andrzej Siewior Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: 6b2c28471de5 ("x86/x2apic: Convert to CPU hotplug state machine") Link: http://lkml.kernel.org/r/1470924515-9444-1-git-send-email-bigeasy@linutronix.de Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index 6368fa6..54f35d9 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -155,7 +155,7 @@ static void init_x2apic_ldr(void) /* * At CPU state changes, update the x2apic cluster sibling info. */ -int x2apic_prepare_cpu(unsigned int cpu) +static int x2apic_prepare_cpu(unsigned int cpu) { if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL)) return -ENOMEM; @@ -168,7 +168,7 @@ int x2apic_prepare_cpu(unsigned int cpu) return 0; } -int x2apic_dead_cpu(unsigned int this_cpu) +static int x2apic_dead_cpu(unsigned int this_cpu) { int cpu; @@ -186,13 +186,18 @@ int x2apic_dead_cpu(unsigned int this_cpu) static int x2apic_cluster_probe(void) { int cpu = smp_processor_id(); + int ret; if (!x2apic_mode) return 0; + ret = cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "X2APIC_PREPARE", + x2apic_prepare_cpu, x2apic_dead_cpu); + if (ret < 0) { + pr_err("Failed to register X2APIC_PREPARE\n"); + return 0; + } cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu)); - cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "X2APIC_PREPARE", - x2apic_prepare_cpu, x2apic_dead_cpu); return 1; } -- cgit v0.10.2 From c21377f8366c95440d533edbe47d070f662c62ef Mon Sep 17 00:00:00 2001 From: Gabriel Krisman Bertazi Date: Thu, 11 Aug 2016 09:35:57 -0600 Subject: nvme: Suspend all queues before deletion When nvme_delete_queue fails in the first pass of the nvme_disable_io_queues() loop, we return early, failing to suspend all of the IO queues. Later, on the nvme_pci_disable path, this causes us to disable MSI without actually having freed all the IRQs, which triggers the BUG_ON in free_msi_irqs(), as show below. This patch refactors nvme_disable_io_queues to suspend all queues before start submitting delete queue commands. This way, we ensure that we have at least returned every IRQ before continuing with the removal path. [ 487.529200] kernel BUG at ../drivers/pci/msi.c:368! cpu 0x46: Vector: 700 (Program Check) at [c0000078c5b83650] pc: c000000000627a50: free_msi_irqs+0x90/0x200 lr: c000000000627a40: free_msi_irqs+0x80/0x200 sp: c0000078c5b838d0 msr: 9000000100029033 current = 0xc0000078c5b40000 paca = 0xc000000002bd7600 softe: 0 irq_happened: 0x01 pid = 1376, comm = kworker/70:1H kernel BUG at ../drivers/pci/msi.c:368! Linux version 4.7.0.mainline+ (root@iod76) (gcc version 5.3.1 20160413 (Ubuntu/IBM 5.3.1-14ubuntu2.1) ) #104 SMP Fri Jul 29 09:20:17 CDT 2016 enter ? for help [c0000078c5b83920] d0000000363b0cd8 nvme_dev_disable+0x208/0x4f0 [nvme] [c0000078c5b83a10] d0000000363b12a4 nvme_timeout+0xe4/0x250 [nvme] [c0000078c5b83ad0] c0000000005690e4 blk_mq_rq_timed_out+0x64/0x110 [c0000078c5b83b40] c00000000056c930 bt_for_each+0x160/0x170 [c0000078c5b83bb0] c00000000056d928 blk_mq_queue_tag_busy_iter+0x78/0x110 [c0000078c5b83c00] c0000000005675d8 blk_mq_timeout_work+0xd8/0x1b0 [c0000078c5b83c50] c0000000000e8cf0 process_one_work+0x1e0/0x590 [c0000078c5b83ce0] c0000000000e9148 worker_thread+0xa8/0x660 [c0000078c5b83d80] c0000000000f2090 kthread+0x110/0x130 [c0000078c5b83e30] c0000000000095f0 ret_from_kernel_thread+0x5c/0x6c Signed-off-by: Gabriel Krisman Bertazi Cc: Brian King Cc: Keith Busch Cc: linux-nvme@lists.infradead.org Signed-off-by: Jens Axboe diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index d7c33f9..8dcf5a9 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1543,15 +1543,10 @@ static void nvme_disable_io_queues(struct nvme_dev *dev) reinit_completion(&dev->ioq_wait); retry: timeout = ADMIN_TIMEOUT; - for (; i > 0; i--) { - struct nvme_queue *nvmeq = dev->queues[i]; - - if (!pass) - nvme_suspend_queue(nvmeq); - if (nvme_delete_queue(nvmeq, opcode)) + for (; i > 0; i--, sent++) + if (nvme_delete_queue(dev->queues[i], opcode)) break; - ++sent; - } + while (sent--) { timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout); if (timeout == 0) @@ -1693,11 +1688,12 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) nvme_stop_queues(&dev->ctrl); csts = readl(dev->bar + NVME_REG_CSTS); } + + for (i = dev->queue_count - 1; i > 0; i--) + nvme_suspend_queue(dev->queues[i]); + if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { - for (i = dev->queue_count - 1; i >= 0; i--) { - struct nvme_queue *nvmeq = dev->queues[i]; - nvme_suspend_queue(nvmeq); - } + nvme_suspend_queue(dev->queues[0]); } else { nvme_disable_io_queues(dev); nvme_disable_admin_queue(dev, shutdown); -- cgit v0.10.2 From 005411ea7ee776a56b1e0120a31c65efdee5cab1 Mon Sep 17 00:00:00 2001 From: Joe Lawrence Date: Tue, 9 Aug 2016 14:01:30 -0400 Subject: doc: update block/queue-sysfs.txt entries Add descriptions for dax, io_poll, and write_same_max_bytes files. Signed-off-by: Joe Lawrence Signed-off-by: Jens Axboe diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt index d515d58..2a39040 100644 --- a/Documentation/block/queue-sysfs.txt +++ b/Documentation/block/queue-sysfs.txt @@ -14,6 +14,12 @@ add_random (RW) This file allows to turn off the disk entropy contribution. Default value of this file is '1'(on). +dax (RO) +-------- +This file indicates whether the device supports Direct Access (DAX), +used by CPU-addressable storage to bypass the pagecache. It shows '1' +if true, '0' if not. + discard_granularity (RO) ----------------------- This shows the size of internal allocation of the device in bytes, if @@ -46,6 +52,12 @@ hw_sector_size (RO) ------------------- This is the hardware sector size of the device, in bytes. +io_poll (RW) +------------ +When read, this file shows the total number of block IO polls and how +many returned success. Writing '0' to this file will disable polling +for this device. Writing any non-zero value will enable this feature. + iostats (RW) ------------- This file is used to control (on/off) the iostats accounting of the @@ -151,5 +163,11 @@ device state. This means that it might not be safe to toggle the setting from "write back" to "write through", since that will also eliminate cache flushes issued by the kernel. +write_same_max_bytes (RO) +------------------------- +This is the number of bytes the device can write in a single write-same +command. A value of '0' means write-same is not supported by this +device. + Jens Axboe , February 2009 -- cgit v0.10.2 From 1ea049b2de5d803374fdbf43add23c8d1c518e7b Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 11 Aug 2016 10:15:56 +0200 Subject: bvec: avoid variable shadowing warning Due to the (indirect) nesting of min(..., min(...)), sparse will show a variable shadowing warning whenever bvec.h is included. Avoid that by assigning the inner min() to a temporary variable first. Signed-off-by: Johannes Berg Signed-off-by: Jens Axboe diff --git a/include/linux/bvec.h b/include/linux/bvec.h index 701b64a..89b65b8 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -74,7 +74,8 @@ static inline void bvec_iter_advance(const struct bio_vec *bv, "Attempted to advance past end of bvec iter\n"); while (bytes) { - unsigned len = min(bytes, bvec_iter_len(bv, *iter)); + unsigned iter_len = bvec_iter_len(bv, *iter); + unsigned len = min(bytes, iter_len); bytes -= len; iter->bi_size -= len; -- cgit v0.10.2 From 502aa0a5be633e6558574ebcf63b65afdfbfcd7a Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 10 Aug 2016 14:46:27 -0400 Subject: nfsd: fix dentry refcounting on create b44061d0b9 introduced a dentry ref counting bug. Previously we were grabbing one ref to dchild in nfsd_create(), but with the creation of nfsd_create_locked() we have a ref for dchild from the lookup in nfsd_create(), and then another ref in nfsd_create_locked(). The ref from the lookup in nfsd_create() is never dropped and results in dentries still in use at unmount. Signed-off-by: Josef Bacik Fixes: b44061d0b9 "nfsd: reorganize nfsd_create" Reported-by: kernel test robot Reviewed-by: Jeff Layton Acked-by: Al Viro Signed-off-by: J. Bruce Fields diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index ba944123..ff476e6 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1252,10 +1252,13 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, if (IS_ERR(dchild)) return nfserrno(host_err); err = fh_compose(resfhp, fhp->fh_export, dchild, fhp); - if (err) { - dput(dchild); + /* + * We unconditionally drop our ref to dchild as fh_compose will have + * already grabbed its own ref for it. + */ + dput(dchild); + if (err) return err; - } return nfsd_create_locked(rqstp, fhp, fname, flen, iap, type, rdev, resfhp); } -- cgit v0.10.2 From 539587511835ea12d8daa444cbed766cf2bc3612 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Thu, 11 Aug 2016 10:31:14 +0800 Subject: usb: misc: usbtest: add fix for driver hang In sg_timeout(), req->status is set to "-ETIMEDOUT" before calling into usb_sg_cancel(). usb_sg_cancel() will do nothing and return directly if req->status has been set to a non-zero value. This will cause driver hang whenever transfer time out is triggered. This patch fixes this issue. It could be backported to stable kernel with version later than v3.15. Cc: stable@vger.kernel.org # 3.15+ Cc: Alan Stern Signed-off-by: Lu Baolu Suggested-by: Alan Stern Acked-by: Alan Stern Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 5e3464e..5c8210d 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -585,7 +585,6 @@ static void sg_timeout(unsigned long _req) { struct usb_sg_request *req = (struct usb_sg_request *) _req; - req->status = -ETIMEDOUT; usb_sg_cancel(req); } @@ -616,8 +615,10 @@ static int perform_sglist( mod_timer(&sg_timer, jiffies + msecs_to_jiffies(SIMPLE_IO_TIMEOUT)); usb_sg_wait(req); - del_timer_sync(&sg_timer); - retval = req->status; + if (!del_timer_sync(&sg_timer)) + retval = -ETIMEDOUT; + else + retval = req->status; /* FIXME check resulting data pattern */ -- cgit v0.10.2 From ad05711cec12131e1277ce749a99d08ecf233aa7 Mon Sep 17 00:00:00 2001 From: "David A. Long" Date: Wed, 10 Aug 2016 16:44:51 -0400 Subject: arm64: Remove stack duplicating code from jprobes Because the arm64 calling standard allows stacked function arguments to be anywhere in the stack frame, do not attempt to duplicate the stack frame for jprobes handler functions. Documentation changes to describe this issue have been broken out into a separate patch in order to simultaneously address them in other architecture(s). Signed-off-by: David A. Long Acked-by: Masami Hiramatsu Acked-by: Marc Zyngier Signed-off-by: Catalin Marinas diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h index 61b4915..1737aec 100644 --- a/arch/arm64/include/asm/kprobes.h +++ b/arch/arm64/include/asm/kprobes.h @@ -22,7 +22,6 @@ #define __ARCH_WANT_KPROBES_INSN_SLOT #define MAX_INSN_SIZE 1 -#define MAX_STACK_SIZE 128 #define flush_insn_slot(p) do { } while (0) #define kretprobe_blacklist_size 0 @@ -47,7 +46,6 @@ struct kprobe_ctlblk { struct prev_kprobe prev_kprobe; struct kprobe_step_ctx ss_ctx; struct pt_regs jprobe_saved_regs; - char jprobes_stack[MAX_STACK_SIZE]; }; void arch_remove_kprobe(struct kprobe *); diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index bf97685..c6b0f40 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -41,18 +41,6 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); static void __kprobes post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); -static inline unsigned long min_stack_size(unsigned long addr) -{ - unsigned long size; - - if (on_irq_stack(addr, raw_smp_processor_id())) - size = IRQ_STACK_PTR(raw_smp_processor_id()) - addr; - else - size = (unsigned long)current_thread_info() + THREAD_START_SP - addr; - - return min(size, FIELD_SIZEOF(struct kprobe_ctlblk, jprobes_stack)); -} - static void __kprobes arch_prepare_ss_slot(struct kprobe *p) { /* prepare insn slot */ @@ -489,20 +477,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct jprobe *jp = container_of(p, struct jprobe, kp); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - long stack_ptr = kernel_stack_pointer(regs); kcb->jprobe_saved_regs = *regs; /* - * As Linus pointed out, gcc assumes that the callee - * owns the argument space and could overwrite it, e.g. - * tailcall optimization. So, to be absolutely safe - * we also save and restore enough stack bytes to cover - * the argument area. + * Since we can't be sure where in the stack frame "stacked" + * pass-by-value arguments are stored we just don't try to + * duplicate any of the stack. Do not use jprobes on functions that + * use more than 64 bytes (after padding each to an 8 byte boundary) + * of arguments, or pass individual arguments larger than 16 bytes. */ - kasan_disable_current(); - memcpy(kcb->jprobes_stack, (void *)stack_ptr, - min_stack_size(stack_ptr)); - kasan_enable_current(); instruction_pointer_set(regs, (unsigned long) jp->entry); preempt_disable(); @@ -554,10 +537,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) } unpause_graph_tracing(); *regs = kcb->jprobe_saved_regs; - kasan_disable_current(); - memcpy((void *)stack_addr, kcb->jprobes_stack, - min_stack_size(stack_addr)); - kasan_enable_current(); preempt_enable_no_resched(); return 1; } -- cgit v0.10.2 From 104a493390940e85fb7c840a9fd5214aba5cb3bd Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Thu, 11 Aug 2016 18:15:56 +0800 Subject: macvtap: fix use after free for skb_array during release We've clean skb_array in macvtap_put_queue() but still try to pop from it during macvtap_sock_destruct(). Fix this use after free by moving the skb array cleanup to macvtap_sock_destruct() instead. Fixes: 362899b8725b ("macvtap: switch to use skb array") Reported-by: Cornelia Huck Tested-by: Cornelia Huck Signed-off-by: Jason Wang Signed-off-by: David S. Miller diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index a38c0da..070e329 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -275,7 +275,6 @@ static void macvtap_put_queue(struct macvtap_queue *q) rtnl_unlock(); synchronize_rcu(); - skb_array_cleanup(&q->skb_array); sock_put(&q->sk); } @@ -533,10 +532,8 @@ static void macvtap_sock_write_space(struct sock *sk) static void macvtap_sock_destruct(struct sock *sk) { struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk); - struct sk_buff *skb; - while ((skb = skb_array_consume(&q->skb_array)) != NULL) - kfree_skb(skb); + skb_array_cleanup(&q->skb_array); } static int macvtap_open(struct inode *inode, struct file *file) -- cgit v0.10.2 From bbe11fab0b6c1d113776b2898e085bf4d1fdc607 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Thu, 11 Aug 2016 15:24:27 +0200 Subject: macsec: use after free when deleting the underlying device macsec_notify() loops over the list of macsec devices configured on the underlying device when this device is being removed. This list is part of the rx_handler data. However, macsec_dellink unregisters the rx_handler and frees the rx_handler data when the last macsec device is removed from the underlying device. Add macsec_common_dellink() to delete macsec devices without unregistering the rx_handler and freeing the associated data. Fixes: 960d5848dbf1 ("macsec: fix memory leaks around rx_handler (un)registration") Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index d13e6e1..dbd590a 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -3047,22 +3047,29 @@ static void macsec_del_dev(struct macsec_dev *macsec) } } +static void macsec_common_dellink(struct net_device *dev, struct list_head *head) +{ + struct macsec_dev *macsec = macsec_priv(dev); + + unregister_netdevice_queue(dev, head); + list_del_rcu(&macsec->secys); + macsec_del_dev(macsec); + + macsec_generation++; +} + static void macsec_dellink(struct net_device *dev, struct list_head *head) { struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev; struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); - macsec_generation++; + macsec_common_dellink(dev, head); - unregister_netdevice_queue(dev, head); - list_del_rcu(&macsec->secys); if (list_empty(&rxd->secys)) { netdev_rx_handler_unregister(real_dev); kfree(rxd); } - - macsec_del_dev(macsec); } static int register_macsec_dev(struct net_device *real_dev, @@ -3382,8 +3389,12 @@ static int macsec_notify(struct notifier_block *this, unsigned long event, rxd = macsec_data_rtnl(real_dev); list_for_each_entry_safe(m, n, &rxd->secys, secys) { - macsec_dellink(m->secy.netdev, &head); + macsec_common_dellink(m->secy.netdev, &head); } + + netdev_rx_handler_unregister(real_dev); + kfree(rxd); + unregister_netdevice_many(&head); break; } -- cgit v0.10.2 From 42691398be08bd1fe99326911a0aa31f2c041d53 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 Aug 2016 10:37:30 -0400 Subject: nfsd: Fix race between FREE_STATEID and LOCK When running LTP's nfslock01 test, the Linux client can send a LOCK and a FREE_STATEID request at the same time. The outcome is: Frame 324 R OPEN stateid [2,O] Frame 115004 C LOCK lockowner_is_new stateid [2,O] offset 672000 len 64 Frame 115008 R LOCK stateid [1,L] Frame 115012 C WRITE stateid [0,L] offset 672000 len 64 Frame 115016 R WRITE NFS4_OK Frame 115019 C LOCKU stateid [1,L] offset 672000 len 64 Frame 115022 R LOCKU NFS4_OK Frame 115025 C FREE_STATEID stateid [2,L] Frame 115026 C LOCK lockowner_is_new stateid [2,O] offset 672128 len 64 Frame 115029 R FREE_STATEID NFS4_OK Frame 115030 R LOCK stateid [3,L] Frame 115034 C WRITE stateid [0,L] offset 672128 len 64 Frame 115038 R WRITE NFS4ERR_BAD_STATEID In other words, the server returns stateid L in a successful LOCK reply, but it has already released it. Subsequent uses of stateid L fail. To address this, protect the generation check in nfsd4_free_stateid with the st_mutex. This should guarantee that only one of two outcomes occurs: either LOCK returns a fresh valid stateid, or FREE_STATEID returns NFS4ERR_LOCKS_HELD. Reported-by: Alexey Kodanev Fix-suggested-by: Jeff Layton Signed-off-by: Chuck Lever Tested-by: Alexey Kodanev Cc: stable@vger.kernel.org Signed-off-by: J. Bruce Fields diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 8410ca2..0edc182 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -4903,6 +4903,32 @@ nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, return nfs_ok; } +static __be32 +nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s) +{ + struct nfs4_ol_stateid *stp = openlockstateid(s); + __be32 ret; + + mutex_lock(&stp->st_mutex); + + ret = check_stateid_generation(stateid, &s->sc_stateid, 1); + if (ret) + goto out; + + ret = nfserr_locks_held; + if (check_for_locks(stp->st_stid.sc_file, + lockowner(stp->st_stateowner))) + goto out; + + release_lock_stateid(stp); + ret = nfs_ok; + +out: + mutex_unlock(&stp->st_mutex); + nfs4_put_stid(s); + return ret; +} + __be32 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *free_stateid) @@ -4910,7 +4936,6 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, stateid_t *stateid = &free_stateid->fr_stateid; struct nfs4_stid *s; struct nfs4_delegation *dp; - struct nfs4_ol_stateid *stp; struct nfs4_client *cl = cstate->session->se_client; __be32 ret = nfserr_bad_stateid; @@ -4929,18 +4954,9 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, ret = nfserr_locks_held; break; case NFS4_LOCK_STID: - ret = check_stateid_generation(stateid, &s->sc_stateid, 1); - if (ret) - break; - stp = openlockstateid(s); - ret = nfserr_locks_held; - if (check_for_locks(stp->st_stid.sc_file, - lockowner(stp->st_stateowner))) - break; - WARN_ON(!unhash_lock_stateid(stp)); + atomic_inc(&s->sc_count); spin_unlock(&cl->cl_lock); - nfs4_put_stid(s); - ret = nfs_ok; + ret = nfsd4_free_lock_stateid(stateid, s); goto out; case NFS4_REVOKED_DELEG_STID: dp = delegstateid(s); -- cgit v0.10.2 From c1470b33bb6e18cddd361fef339ef225b8339fe7 Mon Sep 17 00:00:00 2001 From: zhong jiang Date: Thu, 11 Aug 2016 15:32:55 -0700 Subject: mm/hugetlb: fix incorrect hugepages count during mem hotplug When memory hotplug operates, free hugepages will be freed if the movable node is offline. Therefore, /proc/sys/vm/nr_hugepages will be incorrect. Fix it by reducing max_huge_pages when the node is offlined. n-horiguchi@ah.jp.nec.com said: : dissolve_free_huge_page intends to break a hugepage into buddy, and the : destination hugepage is supposed to be allocated from the pool of the : destination node, so the system-wide pool size is reduced. So adding : h->max_huge_pages-- makes sense to me. Link: http://lkml.kernel.org/r/1470624546-902-1-git-send-email-zhongjiang@huawei.com Signed-off-by: zhong jiang Cc: Mike Kravetz Acked-by: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/hugetlb.c b/mm/hugetlb.c index b9aa1b0..87e11d8 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1448,6 +1448,7 @@ static void dissolve_free_huge_page(struct page *page) list_del(&page->lru); h->free_huge_pages--; h->free_huge_pages_node[nid]--; + h->max_huge_pages--; update_and_free_page(h, page); } spin_unlock(&hugetlb_lock); -- cgit v0.10.2 From 2f95ff90b99600f53df4a0aa652322d349d67957 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Thu, 11 Aug 2016 15:32:57 -0700 Subject: proc, meminfo: use correct helpers for calculating LRU sizes in meminfo meminfo_proc_show() and si_mem_available() are using the wrong helpers for calculating the size of the LRUs. The user-visible impact is that there appears to be an abnormally high number of unevictable pages. Link: http://lkml.kernel.org/r/20160805105805.GR2799@techsingularity.net Signed-off-by: Mel Gorman Cc: Dave Chinner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 09e18fd..b9a8c81 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -46,7 +46,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) cached = 0; for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) - pages[lru] = global_page_state(NR_LRU_BASE + lru); + pages[lru] = global_node_page_state(NR_LRU_BASE + lru); available = si_mem_available(); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ab2c0ff..3fbe73a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4060,7 +4060,7 @@ long si_mem_available(void) int lru; for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) - pages[lru] = global_page_state(NR_LRU_BASE + lru); + pages[lru] = global_node_page_state(NR_LRU_BASE + lru); for_each_zone(zone) wmark_low += zone->watermark[WMARK_LOW]; -- cgit v0.10.2 From 1f47b61fb4077936465dcde872a4e5cc4fe708da Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 11 Aug 2016 15:33:00 -0700 Subject: mm: memcontrol: fix swap counter leak on swapout from offline cgroup An offline memory cgroup might have anonymous memory or shmem left charged to it and no swap. Since only swap entries pin the id of an offline cgroup, such a cgroup will have no id and so an attempt to swapout its anon/shmem will not store memory cgroup info in the swap cgroup map. As a result, memcg->swap or memcg->memsw will never get uncharged from it and any of its ascendants. Fix this by always charging swapout to the first ancestor cgroup that hasn't released its id yet. [hannes@cmpxchg.org: add comment to mem_cgroup_swapout] [vdavydov@virtuozzo.com: use WARN_ON_ONCE() in mem_cgroup_id_get_online()] Link: http://lkml.kernel.org/r/20160803123445.GJ13263@esperanza Fixes: 73f576c04b941 ("mm: memcontrol: fix cgroup creation failure after many small jobs") Link: http://lkml.kernel.org/r/5336daa5c9a32e776067773d9da655d2dc126491.1470219853.git.vdavydov@virtuozzo.com Signed-off-by: Vladimir Davydov Acked-by: Johannes Weiner Acked-by: Michal Hocko Cc: [3.19+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e74d708..791b00c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4082,6 +4082,24 @@ static void mem_cgroup_id_get(struct mem_cgroup *memcg) atomic_inc(&memcg->id.ref); } +static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) +{ + while (!atomic_inc_not_zero(&memcg->id.ref)) { + /* + * The root cgroup cannot be destroyed, so it's refcount must + * always be >= 1. + */ + if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { + VM_BUG_ON(1); + break; + } + memcg = parent_mem_cgroup(memcg); + if (!memcg) + memcg = root_mem_cgroup; + } + return memcg; +} + static void mem_cgroup_id_put(struct mem_cgroup *memcg) { if (atomic_dec_and_test(&memcg->id.ref)) { @@ -5800,7 +5818,7 @@ subsys_initcall(mem_cgroup_init); */ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) { - struct mem_cgroup *memcg; + struct mem_cgroup *memcg, *swap_memcg; unsigned short oldid; VM_BUG_ON_PAGE(PageLRU(page), page); @@ -5815,16 +5833,27 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) if (!memcg) return; - mem_cgroup_id_get(memcg); - oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); + /* + * In case the memcg owning these pages has been offlined and doesn't + * have an ID allocated to it anymore, charge the closest online + * ancestor for the swap instead and transfer the memory+swap charge. + */ + swap_memcg = mem_cgroup_id_get_online(memcg); + oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg)); VM_BUG_ON_PAGE(oldid, page); - mem_cgroup_swap_statistics(memcg, true); + mem_cgroup_swap_statistics(swap_memcg, true); page->mem_cgroup = NULL; if (!mem_cgroup_is_root(memcg)) page_counter_uncharge(&memcg->memory, 1); + if (memcg != swap_memcg) { + if (!mem_cgroup_is_root(swap_memcg)) + page_counter_charge(&swap_memcg->memsw, 1); + page_counter_uncharge(&memcg->memsw, 1); + } + /* * Interrupts should be disabled here because the caller holds the * mapping->tree_lock lock which is taken with interrupts-off. It is @@ -5863,11 +5892,14 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) if (!memcg) return 0; + memcg = mem_cgroup_id_get_online(memcg); + if (!mem_cgroup_is_root(memcg) && - !page_counter_try_charge(&memcg->swap, 1, &counter)) + !page_counter_try_charge(&memcg->swap, 1, &counter)) { + mem_cgroup_id_put(memcg); return -ENOMEM; + } - mem_cgroup_id_get(memcg); oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); VM_BUG_ON_PAGE(oldid, page); mem_cgroup_swap_statistics(memcg, true); -- cgit v0.10.2 From 615d66c37c755c49ce022c9e5ac0875d27d2603d Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 11 Aug 2016 15:33:03 -0700 Subject: mm: memcontrol: fix memcg id ref counter on swap charge move Since commit 73f576c04b94 ("mm: memcontrol: fix cgroup creation failure after many small jobs") swap entries do not pin memcg->css.refcnt directly. Instead, they pin memcg->id.ref. So we should adjust the reference counters accordingly when moving swap charges between cgroups. Fixes: 73f576c04b941 ("mm: memcontrol: fix cgroup creation failure after many small jobs") Link: http://lkml.kernel.org/r/9ce297c64954a42dc90b543bc76106c4a94f07e8.1470219853.git.vdavydov@virtuozzo.com Signed-off-by: Vladimir Davydov Acked-by: Michal Hocko Acked-by: Johannes Weiner Cc: [3.19+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 791b00c..2ff0289 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4077,9 +4077,9 @@ static struct cftype mem_cgroup_legacy_files[] = { static DEFINE_IDR(mem_cgroup_idr); -static void mem_cgroup_id_get(struct mem_cgroup *memcg) +static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) { - atomic_inc(&memcg->id.ref); + atomic_add(n, &memcg->id.ref); } static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) @@ -4100,9 +4100,9 @@ static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) return memcg; } -static void mem_cgroup_id_put(struct mem_cgroup *memcg) +static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) { - if (atomic_dec_and_test(&memcg->id.ref)) { + if (atomic_sub_and_test(n, &memcg->id.ref)) { idr_remove(&mem_cgroup_idr, memcg->id.id); memcg->id.id = 0; @@ -4111,6 +4111,16 @@ static void mem_cgroup_id_put(struct mem_cgroup *memcg) } } +static inline void mem_cgroup_id_get(struct mem_cgroup *memcg) +{ + mem_cgroup_id_get_many(memcg, 1); +} + +static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) +{ + mem_cgroup_id_put_many(memcg, 1); +} + /** * mem_cgroup_from_id - look up a memcg from a memcg id * @id: the memcg id to look up @@ -4745,6 +4755,8 @@ static void __mem_cgroup_clear_mc(void) if (!mem_cgroup_is_root(mc.from)) page_counter_uncharge(&mc.from->memsw, mc.moved_swap); + mem_cgroup_id_put_many(mc.from, mc.moved_swap); + /* * we charged both to->memory and to->memsw, so we * should uncharge to->memory. @@ -4752,9 +4764,9 @@ static void __mem_cgroup_clear_mc(void) if (!mem_cgroup_is_root(mc.to)) page_counter_uncharge(&mc.to->memory, mc.moved_swap); - css_put_many(&mc.from->css, mc.moved_swap); + mem_cgroup_id_get_many(mc.to, mc.moved_swap); + css_put_many(&mc.to->css, mc.moved_swap); - /* we've already done css_get(mc.to) */ mc.moved_swap = 0; } memcg_oom_recover(from); -- cgit v0.10.2 From bcbf0d566b6e59a6e873bfe415cc415111a819e2 Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Thu, 11 Aug 2016 15:33:06 -0700 Subject: kasan: remove the unnecessary WARN_ONCE from quarantine.c It's quite unlikely that the user will so little memory that the per-CPU quarantines won't fit into the given fraction of the available memory. Even in that case he won't be able to do anything with the information given in the warning. Link: http://lkml.kernel.org/r/1470929182-101413-1-git-send-email-glider@google.com Signed-off-by: Alexander Potapenko Acked-by: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Andrey Konovalov Cc: Christoph Lameter Cc: Joonsoo Kim Cc: Kuthonuzo Luruo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index b6728a3..baabaad 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c @@ -217,11 +217,8 @@ void quarantine_reduce(void) new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) / QUARANTINE_FRACTION; percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus(); - if (WARN_ONCE(new_quarantine_size < percpu_quarantines, - "Too little memory, disabling global KASAN quarantine.\n")) - new_quarantine_size = 0; - else - new_quarantine_size -= percpu_quarantines; + new_quarantine_size = (new_quarantine_size < percpu_quarantines) ? + 0 : new_quarantine_size - percpu_quarantines; WRITE_ONCE(quarantine_size, new_quarantine_size); last = global_quarantine.head; -- cgit v0.10.2 From f33e6f0671b3ba81acef4d7c078af86afcc855c4 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 11 Aug 2016 15:33:09 -0700 Subject: mm, oom: fix uninitialized ret in task_will_free_mem() mm/oom_kill.c: In function `task_will_free_mem': mm/oom_kill.c:767: warning: `ret' may be used uninitialized in this function If __task_will_free_mem() is never called inside the for_each_process() loop, ret will not be initialized. Fixes: 1af8bb43269563e4 ("mm, oom: fortify task_will_free_mem()") Link: http://lkml.kernel.org/r/1470255599-24841-1-git-send-email-geert@linux-m68k.org Signed-off-by: Geert Uytterhoeven Acked-by: Tetsuo Handa Acked-by: Michal Hocko Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 7d0a275..d53a9aa 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -764,7 +764,7 @@ bool task_will_free_mem(struct task_struct *task) { struct mm_struct *mm = task->mm; struct task_struct *p; - bool ret; + bool ret = true; /* * Skip tasks without mm because it might have passed its exit_mm and -- cgit v0.10.2 From 5830169f47269f78f6624bd70165eb571270da82 Mon Sep 17 00:00:00 2001 From: Reza Arbab Date: Thu, 11 Aug 2016 15:33:12 -0700 Subject: mm/memory_hotplug.c: initialize per_cpu_nodestats for hotadded pgdats The following oops occurs after a pgdat is hotadded: Unable to handle kernel paging request for data at address 0x00c30001 Faulting instruction address: 0xc00000000022f8f4 Oops: Kernel access of bad area, sig: 11 [#1] SMP NR_CPUS=2048 NUMA pSeries Modules linked in: ip6t_rpfilter ip6t_REJECT nf_reject_ipv6 ipt_REJECT nf_reject_ipv4 xt_conntrack ebtable_nat ebtable_broute bridge stp llc ebtable_filter ebtables ip6table_nat nf_conntrack_ipv6 nf_defrag_ipv6 nf_nat_ipv6 ip6table_mangle ip6table_security ip6table_raw ip6table_filter ip6_tables iptable_nat nf_conntrack_ipv4 nf_defrag_ipv4 nf_nat_ipv4 nf_nat nf_conntrack iptable_mangle iptable_security iptable_raw iptable_filter nls_utf8 isofs sg virtio_balloon uio_pdrv_genirq uio ip_tables xfs libcrc32c sr_mod cdrom sd_mod virtio_net ibmvscsi scsi_transport_srp virtio_pci virtio_ring virtio dm_mirror dm_region_hash dm_log dm_mod CPU: 0 PID: 0 Comm: swapper/0 Tainted: G W 4.8.0-rc1-device #110 task: c000000000ef3080 task.stack: c000000000f6c000 NIP: c00000000022f8f4 LR: c00000000022f948 CTR: 0000000000000000 REGS: c000000000f6fa50 TRAP: 0300 Tainted: G W (4.8.0-rc1-device) MSR: 800000010280b033 CR: 84002028 XER: 20000000 CFAR: d000000001d2013c DAR: 0000000000c30001 DSISR: 40000000 SOFTE: 0 NIP refresh_cpu_vm_stats+0x1a4/0x2f0 LR refresh_cpu_vm_stats+0x1f8/0x2f0 Call Trace: refresh_cpu_vm_stats+0x1f8/0x2f0 (unreliable) Add per_cpu_nodestats initialization to the hotplug codepath. Link: http://lkml.kernel.org/r/1470931473-7090-1-git-send-email-arbab@linux.vnet.ibm.com Signed-off-by: Reza Arbab Cc: Mel Gorman Cc: Paul Mackerras Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 3894b65..41266dc 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1219,6 +1219,7 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) /* init node's zones as empty zones, we don't have any present pages.*/ free_area_init_node(nid, zones_size, start_pfn, zholes_size); + pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); /* * The node we allocated has no zone fallback lists. For avoiding @@ -1249,6 +1250,7 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) static void rollback_node_hotadd(int nid, pg_data_t *pgdat) { arch_refresh_nodedata(nid, NULL); + free_percpu(pgdat->per_cpu_nodestats); arch_free_nodedata(pgdat); return; } -- cgit v0.10.2 From aa8b187eeab5e6757e959b81889ddbfda7d02ad1 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Tue, 9 Aug 2016 22:09:19 -0700 Subject: hwmon: (it87) Features mask must be 32 bit wide Coverity reports: result_independent_of_operands: data->features & (65536UL /* 1UL << 16 */) is always 0 regardless of the values of its operands. This occurs as the logical operand of if. data->features needs to be 32 bit wide since there are more than 16 features. Fixes: cc18da79d9b7 ("hwmon: (it87) Support up to 6 temperature sensors ... "); Signed-off-by: Guenter Roeck diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index 730d840..d0203a1 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c @@ -491,7 +491,7 @@ struct it87_sio_data { struct it87_data { const struct attribute_group *groups[7]; enum chips type; - u16 features; + u32 features; u8 peci_mask; u8 old_peci_mask; -- cgit v0.10.2 From 68187872c76a96ed4db7bfb064272591f02e208b Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Thu, 11 Aug 2016 17:45:21 +0200 Subject: uprobes/x86: Fix RIP-relative handling of EVEX-encoded instructions Since instruction decoder now supports EVEX-encoded instructions, two fixes are needed to correctly handle them in uprobes. Extended bits for MODRM.rm field need to be sanitized just like we do it for VEX3, to avoid encoding wrong register for register-relative access. EVEX has _two_ extended bits: b and x. Theoretically, EVEX.x should be ignored by the CPU (since GPRs go only up to 15, not 31), but let's be paranoid here: proper encoding for register-relative access should have EVEX.x = 1. Secondly, we should fetch vex.vvvv for EVEX too. This is now super easy because instruction decoder populates vex_prefix.bytes[2] for all flavors of (e)vex encodings, even for VEX2. Signed-off-by: Denys Vlasenko Acked-by: Masami Hiramatsu Acked-by: Srikar Dronamraju Cc: Alexander Shishkin Cc: Andy Lutomirski Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: Brian Gerst Cc: H. Peter Anvin Cc: Jim Keniston Cc: Jiri Olsa Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: linux-kernel@vger.kernel.org Cc: # v4.1+ Fixes: 8a764a875fe3 ("x86/asm/decoder: Create artificial 3rd byte for 2-byte VEX") Link: http://lkml.kernel.org/r/20160811154521.20469-1-dvlasenk@redhat.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 6c1ff31..495c776 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -357,20 +357,22 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) *cursor &= 0xfe; } /* - * Similar treatment for VEX3 prefix. - * TODO: add XOP/EVEX treatment when insn decoder supports them + * Similar treatment for VEX3/EVEX prefix. + * TODO: add XOP treatment when insn decoder supports them */ - if (insn->vex_prefix.nbytes == 3) { + if (insn->vex_prefix.nbytes >= 3) { /* * vex2: c5 rvvvvLpp (has no b bit) * vex3/xop: c4/8f rxbmmmmm wvvvvLpp * evex: 62 rxbR00mm wvvvv1pp zllBVaaa - * (evex will need setting of both b and x since - * in non-sib encoding evex.x is 4th bit of MODRM.rm) - * Setting VEX3.b (setting because it has inverted meaning): + * Setting VEX3.b (setting because it has inverted meaning). + * Setting EVEX.x since (in non-SIB encoding) EVEX.x + * is the 4th bit of MODRM.rm, and needs the same treatment. + * For VEX3-encoded insns, VEX3.x value has no effect in + * non-SIB encoding, the change is superfluous but harmless. */ cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1; - *cursor |= 0x20; + *cursor |= 0x60; } /* @@ -415,12 +417,10 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) reg = MODRM_REG(insn); /* Fetch modrm.reg */ reg2 = 0xff; /* Fetch vex.vvvv */ - if (insn->vex_prefix.nbytes == 2) - reg2 = insn->vex_prefix.bytes[1]; - else if (insn->vex_prefix.nbytes == 3) + if (insn->vex_prefix.nbytes) reg2 = insn->vex_prefix.bytes[2]; /* - * TODO: add XOP, EXEV vvvv reading. + * TODO: add XOP vvvv reading. * * vex.vvvv field is in bits 6-3, bits are inverted. * But in 32-bit mode, high-order bit may be ignored. -- cgit v0.10.2 From 10e9e7bd598f9a66a11a22514c68c13c41fc821b Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 11 Aug 2016 07:30:20 -0700 Subject: perf/x86/intel/uncore: Fix uncore num_counters Some uncore boxes' num_counters value for Haswell server and Broadwell server are not correct (too large, off by one). This issue was found by comparing the code with the document. Although there is no bug report from users yet, accessing non-existent counters is dangerous and the behavior is undefined: it may cause miscounting or even crashes. This patch makes them consistent with the uncore document. Reported-by: Lukasz Odzioba Signed-off-by: Kan Liang Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: Link: http://lkml.kernel.org/r/1470925820-59847-1-git-send-email-kan.liang@intel.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 824e540..8aee83b 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -2626,7 +2626,7 @@ void hswep_uncore_cpu_init(void) static struct intel_uncore_type hswep_uncore_ha = { .name = "ha", - .num_counters = 5, + .num_counters = 4, .num_boxes = 2, .perf_ctr_bits = 48, SNBEP_UNCORE_PCI_COMMON_INIT(), @@ -2645,7 +2645,7 @@ static struct uncore_event_desc hswep_uncore_imc_events[] = { static struct intel_uncore_type hswep_uncore_imc = { .name = "imc", - .num_counters = 5, + .num_counters = 4, .num_boxes = 8, .perf_ctr_bits = 48, .fixed_ctr_bits = 48, @@ -2691,7 +2691,7 @@ static struct intel_uncore_type hswep_uncore_irp = { static struct intel_uncore_type hswep_uncore_qpi = { .name = "qpi", - .num_counters = 5, + .num_counters = 4, .num_boxes = 3, .perf_ctr_bits = 48, .perf_ctr = SNBEP_PCI_PMON_CTR0, @@ -2773,7 +2773,7 @@ static struct event_constraint hswep_uncore_r3qpi_constraints[] = { static struct intel_uncore_type hswep_uncore_r3qpi = { .name = "r3qpi", - .num_counters = 4, + .num_counters = 3, .num_boxes = 3, .perf_ctr_bits = 44, .constraints = hswep_uncore_r3qpi_constraints, @@ -2972,7 +2972,7 @@ static struct intel_uncore_type bdx_uncore_ha = { static struct intel_uncore_type bdx_uncore_imc = { .name = "imc", - .num_counters = 5, + .num_counters = 4, .num_boxes = 8, .perf_ctr_bits = 48, .fixed_ctr_bits = 48, -- cgit v0.10.2 From 95f3be798472f63b495ca4712af005ea5ac7aa47 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 11 Aug 2016 07:31:14 -0700 Subject: perf/x86/intel/uncore: Add enable_box for client MSR uncore There are bug reports about miscounting uncore counters on some client machines like Sandybridge, Broadwell and Skylake. It is very likely to be observed on idle systems. This issue is caused by a hardware issue. PERF_GLOBAL_CTL could be cleared after Package C7, and nothing will be count. The related errata (HSD 158) could be found in: www.intel.com/content/dam/www/public/us/en/documents/specification-updates/4th-gen-core-family-desktop-specification-update.pdf This patch tries to work around this issue by re-enabling PERF_GLOBAL_CTL in ->enable_box(). The workaround does not cover all cases. It helps for new events after returning from C7. But it cannot prevent C7, it will still miscount if a counter is already active. There is no drawback in leaving it enabled, so it does not need disable_box() here. Signed-off-by: Kan Liang Cc: Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Link: http://lkml.kernel.org/r/1470925874-59943-1-git-send-email-kan.liang@intel.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index 97a69db..9d35ec0 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -100,6 +100,12 @@ static void snb_uncore_msr_init_box(struct intel_uncore_box *box) } } +static void snb_uncore_msr_enable_box(struct intel_uncore_box *box) +{ + wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, + SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); +} + static void snb_uncore_msr_exit_box(struct intel_uncore_box *box) { if (box->pmu->pmu_idx == 0) @@ -127,6 +133,7 @@ static struct attribute_group snb_uncore_format_group = { static struct intel_uncore_ops snb_uncore_msr_ops = { .init_box = snb_uncore_msr_init_box, + .enable_box = snb_uncore_msr_enable_box, .exit_box = snb_uncore_msr_exit_box, .disable_event = snb_uncore_msr_disable_event, .enable_event = snb_uncore_msr_enable_event, @@ -192,6 +199,12 @@ static void skl_uncore_msr_init_box(struct intel_uncore_box *box) } } +static void skl_uncore_msr_enable_box(struct intel_uncore_box *box) +{ + wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, + SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); +} + static void skl_uncore_msr_exit_box(struct intel_uncore_box *box) { if (box->pmu->pmu_idx == 0) @@ -200,6 +213,7 @@ static void skl_uncore_msr_exit_box(struct intel_uncore_box *box) static struct intel_uncore_ops skl_uncore_msr_ops = { .init_box = skl_uncore_msr_init_box, + .enable_box = skl_uncore_msr_enable_box, .exit_box = skl_uncore_msr_exit_box, .disable_event = snb_uncore_msr_disable_event, .enable_event = snb_uncore_msr_enable_event, -- cgit v0.10.2 From 75a4615c95a2de4fa592c1312fb8446e74b4e5eb Mon Sep 17 00:00:00 2001 From: Julius Niedworok Date: Wed, 3 Aug 2016 16:39:54 +0200 Subject: KVM: s390: set the prefix initially properly When KVM_RUN is triggered on a VCPU without an initial reset, a validity intercept occurs. Setting the prefix will set the KVM_REQ_MMU_RELOAD bit initially, thus preventing the bug. Reviewed-by: David Hildenbrand Acked-by: Cornelia Huck Signed-off-by: Julius Niedworok Signed-off-by: Christian Borntraeger diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 3f3ae48..e63f6ed 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1672,6 +1672,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) KVM_SYNC_CRS | KVM_SYNC_ARCH0 | KVM_SYNC_PFAULT; + kvm_s390_set_prefix(vcpu, 0); if (test_kvm_facility(vcpu->kvm, 64)) vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; /* fprs can be synchronized via vrs, even if the guest has no vx. With -- cgit v0.10.2 From aca411a4b17a4aebe14ecdf253373db5b7ee6058 Mon Sep 17 00:00:00 2001 From: Julius Niedworok Date: Wed, 3 Aug 2016 16:39:55 +0200 Subject: KVM: s390: reset KVM_REQ_MMU_RELOAD if mapping the prefix failed When triggering KVM_RUN without a user memory region being mapped (KVM_SET_USER_MEMORY_REGION) a validity intercept occurs. This could happen, if the user memory region was not mapped initially or if it was unmapped after the vcpu is initialized. The function kvm_s390_handle_requests checks for the KVM_REQ_MMU_RELOAD bit. The check function always clears this bit. If gmap_mprotect_notify returns an error code, the mapping failed, but the KVM_REQ_MMU_RELOAD was not set anymore. So the next time kvm_s390_handle_requests is called, the execution would fall trough the check for KVM_REQ_MMU_RELOAD. The bit needs to be resetted, if gmap_mprotect_notify returns an error code. Resetting the bit with kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu) fixes the bug. Reviewed-by: David Hildenbrand Signed-off-by: Julius Niedworok Signed-off-by: Christian Borntraeger diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index e63f6ed..f142215 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2362,8 +2362,10 @@ retry: rc = gmap_mprotect_notify(vcpu->arch.gmap, kvm_s390_get_prefix(vcpu), PAGE_SIZE * 2, PROT_WRITE); - if (rc) + if (rc) { + kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); return rc; + } goto retry; } -- cgit v0.10.2 From 023e9fddc3616b005c3753fc1bb6526388cd7a30 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Tue, 9 Aug 2016 19:13:00 +0200 Subject: KVM: PPC: Move xics_debugfs_init out of create MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As we are about to hold the kvm->lock during the create operation on KVM devices, we should move the call to xics_debugfs_init into its own function, since holding a mutex over extended amounts of time might not be a good idea. Introduce an init operation on the kvm_device_ops struct which cannot fail and call this, if configured, after the device has been created. Signed-off-by: Christoffer Dall Reviewed-by: Paolo Bonzini Signed-off-by: Radim Krčmář diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index a75ba38..f2def8e 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c @@ -1341,8 +1341,6 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type) return ret; } - xics_debugfs_init(xics); - #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE if (cpu_has_feature(CPU_FTR_ARCH_206)) { /* Enable real mode support */ @@ -1354,9 +1352,17 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type) return 0; } +static void kvmppc_xics_init(struct kvm_device *dev) +{ + struct kvmppc_xics *xics = (struct kvmppc_xics *)dev->private; + + xics_debugfs_init(xics); +} + struct kvm_device_ops kvm_xics_ops = { .name = "kvm-xics", .create = kvmppc_xics_create, + .init = kvmppc_xics_init, .destroy = kvmppc_xics_free, .set_attr = xics_set_attr, .get_attr = xics_get_attr, diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 01e908a..d3c9b82 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1116,6 +1116,12 @@ struct kvm_device_ops { int (*create)(struct kvm_device *dev, u32 type); /* + * init is called after create if create is successful and is called + * outside of holding kvm->lock. + */ + void (*init)(struct kvm_device *dev); + + /* * Destroy is responsible for freeing dev. * * Destroy may be called before or after destructors are called diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index cc081cc..ae64245 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2838,6 +2838,9 @@ static int kvm_ioctl_create_device(struct kvm *kvm, return ret; } + if (ops->init) + ops->init(dev); + ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); if (ret < 0) { ops->destroy(dev); -- cgit v0.10.2 From a28ebea2adc4a2bef5989a5a181ec238f59fbcad Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Tue, 9 Aug 2016 19:13:01 +0200 Subject: KVM: Protect device ops->create and list_add with kvm->lock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit KVM devices were manipulating list data structures without any form of synchronization, and some implementations of the create operations also suffered from a lack of synchronization. Now when we've split the xics create operation into create and init, we can hold the kvm->lock mutex while calling the create operation and when manipulating the devices list. The error path in the generic code gets slightly ugly because we have to take the mutex again and delete the device from the list, but holding the mutex during anon_inode_getfd or releasing/locking the mutex in the common non-error path seemed wrong. Signed-off-by: Christoffer Dall Reviewed-by: Paolo Bonzini Acked-by: Christian Borntraeger Signed-off-by: Radim Krčmář diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index d94bb90..75f130e 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -1009,9 +1009,13 @@ long kvm_arch_vm_ioctl(struct file *filp, switch (ioctl) { case KVM_CREATE_IRQCHIP: { + int ret; if (!vgic_present) return -ENXIO; - return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); + mutex_lock(&kvm->lock); + ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); + mutex_unlock(&kvm->lock); + return ret; } case KVM_ARM_SET_DEVICE_ADDR: { struct kvm_arm_device_addr dev_addr; diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index f2def8e..05aa113 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c @@ -1329,12 +1329,10 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type) xics->kvm = kvm; /* Already there ? */ - mutex_lock(&kvm->lock); if (kvm->arch.xics) ret = -EEXIST; else kvm->arch.xics = xics; - mutex_unlock(&kvm->lock); if (ret) { kfree(xics); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d3c9b82..9c28b4d 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1113,6 +1113,12 @@ struct kvm_device { /* create, destroy, and name are mandatory */ struct kvm_device_ops { const char *name; + + /* + * create is called holding kvm->lock and any operations not suitable + * to do while holding the lock should be deferred to init (see + * below). + */ int (*create)(struct kvm_device *dev, u32 type); /* diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index fb4b0a7..83777c1 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -73,12 +73,8 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) int i, vcpu_lock_idx = -1, ret; struct kvm_vcpu *vcpu; - mutex_lock(&kvm->lock); - - if (irqchip_in_kernel(kvm)) { - ret = -EEXIST; - goto out; - } + if (irqchip_in_kernel(kvm)) + return -EEXIST; /* * This function is also called by the KVM_CREATE_IRQCHIP handler, @@ -87,10 +83,8 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) * the proper checks already. */ if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && - !kvm_vgic_global_state.can_emulate_gicv2) { - ret = -ENODEV; - goto out; - } + !kvm_vgic_global_state.can_emulate_gicv2) + return -ENODEV; /* * Any time a vcpu is run, vcpu_load is called which tries to grab the @@ -138,9 +132,6 @@ out_unlock: vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx); mutex_unlock(&vcpu->mutex); } - -out: - mutex_unlock(&kvm->lock); return ret; } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index ae64245..1950782 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -696,6 +696,11 @@ static void kvm_destroy_devices(struct kvm *kvm) { struct kvm_device *dev, *tmp; + /* + * We do not need to take the kvm->lock here, because nobody else + * has a reference to the struct kvm at this point and therefore + * cannot access the devices list anyhow. + */ list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { list_del(&dev->vm_node); dev->ops->destroy(dev); @@ -2832,11 +2837,15 @@ static int kvm_ioctl_create_device(struct kvm *kvm, dev->ops = ops; dev->kvm = kvm; + mutex_lock(&kvm->lock); ret = ops->create(dev, cd->type); if (ret < 0) { + mutex_unlock(&kvm->lock); kfree(dev); return ret; } + list_add(&dev->vm_node, &kvm->devices); + mutex_unlock(&kvm->lock); if (ops->init) ops->init(dev); @@ -2844,10 +2853,12 @@ static int kvm_ioctl_create_device(struct kvm *kvm, ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); if (ret < 0) { ops->destroy(dev); + mutex_lock(&kvm->lock); + list_del(&dev->vm_node); + mutex_unlock(&kvm->lock); return ret; } - list_add(&dev->vm_node, &kvm->devices); kvm_get_kvm(kvm); cd->fd = ret; return 0; -- cgit v0.10.2 From c604cffa93478f8888bec62b23d6073dad03d43a Mon Sep 17 00:00:00 2001 From: James Hogan Date: Thu, 11 Aug 2016 11:58:12 +0100 Subject: MIPS: KVM: Fix mapped fault broken commpage handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit kvm_mips_handle_mapped_seg_tlb_fault() appears to map the guest page at virtual address 0 to PFN 0 if the guest has created its own mapping there. The intention is unclear, but it may have been an attempt to protect the zero page from being mapped to anything but the comm page in code paths you wouldn't expect from genuine commpage accesses (guest kernel mode cache instructions on that address, hitting trapping instructions when executing from that address with a coincidental TLB eviction during the KVM handling, and guest user mode accesses to that address). Fix this to check for mappings exactly at KVM_GUEST_COMMPAGE_ADDR (it may not be at address 0 since commit 42aa12e74e91 ("MIPS: KVM: Move commpage so 0x0 is unmapped")), and set the corresponding EntryLo to be interpreted as 0 (invalid). Fixes: 858dd5d45733 ("KVM/MIPS32: MMU/TLB operations for the Guest.") Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: "Radim Krčmář" Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org Cc: # 3.10.x- Signed-off-by: Radim Krčmář diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 57319ee..96e0b24 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -138,35 +138,42 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; struct kvm *kvm = vcpu->kvm; kvm_pfn_t pfn0, pfn1; + long tlb_lo[2]; int ret; - if ((tlb->tlb_hi & VPN2_MASK) == 0) { - pfn0 = 0; - pfn1 = 0; - } else { - if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[0]) - >> PAGE_SHIFT) < 0) - return -1; - - if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[1]) - >> PAGE_SHIFT) < 0) - return -1; - - pfn0 = kvm->arch.guest_pmap[ - mips3_tlbpfn_to_paddr(tlb->tlb_lo[0]) >> PAGE_SHIFT]; - pfn1 = kvm->arch.guest_pmap[ - mips3_tlbpfn_to_paddr(tlb->tlb_lo[1]) >> PAGE_SHIFT]; - } + tlb_lo[0] = tlb->tlb_lo[0]; + tlb_lo[1] = tlb->tlb_lo[1]; + + /* + * The commpage address must not be mapped to anything else if the guest + * TLB contains entries nearby, or commpage accesses will break. + */ + if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) & + VPN2_MASK & (PAGE_MASK << 1))) + tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0; + + if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb_lo[0]) + >> PAGE_SHIFT) < 0) + return -1; + + if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb_lo[1]) + >> PAGE_SHIFT) < 0) + return -1; + + pfn0 = kvm->arch.guest_pmap[ + mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT]; + pfn1 = kvm->arch.guest_pmap[ + mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT]; /* Get attributes from the Guest TLB */ entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) | - (tlb->tlb_lo[0] & ENTRYLO_D) | - (tlb->tlb_lo[0] & ENTRYLO_V); + (tlb_lo[0] & ENTRYLO_D) | + (tlb_lo[0] & ENTRYLO_V); entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) | - (tlb->tlb_lo[1] & ENTRYLO_D) | - (tlb->tlb_lo[1] & ENTRYLO_V); + (tlb_lo[1] & ENTRYLO_D) | + (tlb_lo[1] & ENTRYLO_V); kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, tlb->tlb_lo[0], tlb->tlb_lo[1]); -- cgit v0.10.2 From 8985d50382359e5bf118fdbefc859d0dbf6cebc7 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Thu, 11 Aug 2016 11:58:13 +0100 Subject: MIPS: KVM: Add missing gfn range check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit kvm_mips_handle_mapped_seg_tlb_fault() calculates the guest frame number based on the guest TLB EntryLo values, however it is not range checked to ensure it lies within the guest_pmap. If the physical memory the guest refers to is out of range then dump the guest TLB and emit an internal error. Fixes: 858dd5d45733 ("KVM/MIPS32: MMU/TLB operations for the Guest.") Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: "Radim Krčmář" Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org Cc: # 3.10.x- Signed-off-by: Radim Krčmář diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 96e0b24..701c1b6 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -138,6 +138,7 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; struct kvm *kvm = vcpu->kvm; kvm_pfn_t pfn0, pfn1; + gfn_t gfn0, gfn1; long tlb_lo[2]; int ret; @@ -152,18 +153,24 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, VPN2_MASK & (PAGE_MASK << 1))) tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0; - if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb_lo[0]) - >> PAGE_SHIFT) < 0) + gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT; + gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT; + if (gfn0 >= kvm->arch.guest_pmap_npages || + gfn1 >= kvm->arch.guest_pmap_npages) { + kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n", + __func__, gfn0, gfn1, tlb->tlb_hi); + kvm_mips_dump_guest_tlbs(vcpu); + return -1; + } + + if (kvm_mips_map_page(kvm, gfn0) < 0) return -1; - if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb_lo[1]) - >> PAGE_SHIFT) < 0) + if (kvm_mips_map_page(kvm, gfn1) < 0) return -1; - pfn0 = kvm->arch.guest_pmap[ - mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT]; - pfn1 = kvm->arch.guest_pmap[ - mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT]; + pfn0 = kvm->arch.guest_pmap[gfn0]; + pfn1 = kvm->arch.guest_pmap[gfn1]; /* Get attributes from the Guest TLB */ entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | -- cgit v0.10.2 From 0741f52d1b980dbeb290afe67d88fc2928edd8ab Mon Sep 17 00:00:00 2001 From: James Hogan Date: Thu, 11 Aug 2016 11:58:14 +0100 Subject: MIPS: KVM: Fix gfn range check in kseg0 tlb faults MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Two consecutive gfns are loaded into host TLB, so ensure the range check isn't off by one if guest_pmap_npages is odd. Fixes: 858dd5d45733 ("KVM/MIPS32: MMU/TLB operations for the Guest.") Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: "Radim Krčmář" Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org Cc: # 3.10.x- Signed-off-by: Radim Krčmář diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 701c1b6..6a8a218 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -99,7 +99,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, } gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT); - if (gfn >= kvm->arch.guest_pmap_npages) { + if ((gfn | 1) >= kvm->arch.guest_pmap_npages) { kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__, gfn, badvaddr); kvm_mips_dump_host_tlbs(); -- cgit v0.10.2 From 9b731bcfdec4c159ad2e4312e25d69221709b96a Mon Sep 17 00:00:00 2001 From: James Hogan Date: Thu, 11 Aug 2016 11:58:15 +0100 Subject: MIPS: KVM: Propagate kseg0/mapped tlb fault errors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Propagate errors from kvm_mips_handle_kseg0_tlb_fault() and kvm_mips_handle_mapped_seg_tlb_fault(), usually triggering an internal error since they normally indicate the guest accessed bad physical memory or the commpage in an unexpected way. Fixes: 858dd5d45733 ("KVM/MIPS32: MMU/TLB operations for the Guest.") Fixes: e685c689f3a8 ("KVM/MIPS32: Privileged instruction/target branch emulation.") Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: "Radim Krčmář" Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org Cc: # 3.10.x- Signed-off-by: Radim Krčmář diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index 6eb52b9..e788515 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -1642,8 +1642,14 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, preempt_disable(); if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { - if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) - kvm_mips_handle_kseg0_tlb_fault(va, vcpu); + if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 && + kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) { + kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n", + __func__, va, vcpu, read_c0_entryhi()); + er = EMULATE_FAIL; + preempt_enable(); + goto done; + } } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { int index; @@ -1680,12 +1686,18 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, run, vcpu); preempt_enable(); goto dont_update_pc; - } else { - /* - * We fault an entry from the guest tlb to the - * shadow host TLB - */ - kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb); + } + /* + * We fault an entry from the guest tlb to the + * shadow host TLB + */ + if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) { + kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", + __func__, va, index, vcpu, + read_c0_entryhi()); + er = EMULATE_FAIL; + preempt_enable(); + goto done; } } } else { @@ -2659,7 +2671,12 @@ enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, * OK we have a Guest TLB entry, now inject it into the * shadow host TLB */ - kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb); + if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) { + kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", + __func__, va, index, vcpu, + read_c0_entryhi()); + er = EMULATE_FAIL; + } } } diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 6a8a218..6cfdcf5 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -368,9 +368,15 @@ u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu) local_irq_restore(flags); return KVM_INVALID_INST; } - kvm_mips_handle_mapped_seg_tlb_fault(vcpu, - &vcpu->arch. - guest_tlb[index]); + if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, + &vcpu->arch.guest_tlb[index])) { + kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n", + __func__, opc, index, vcpu, + read_c0_entryhi()); + kvm_mips_dump_guest_tlbs(vcpu); + local_irq_restore(flags); + return KVM_INVALID_INST; + } inst = *(opc); } local_irq_restore(flags); -- cgit v0.10.2 From 979cf59acc9d634cc140aadd0d2915947ab303cc Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Fri, 12 Aug 2016 11:45:18 +0000 Subject: ASoC: Intel: Skylake: Fix error return code in skl_probe() Fix to return error code -ENODEV from the error handling case instead of 0, as done elsewhere in this function. Fixes: 87b2bdf02278 ("ASoC: Intel: Skylake: Initialize NHLT table") Signed-off-by: Wei Yongjun Acked-By: Vinod Koul Signed-off-by: Mark Brown diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c index cd59536..e3e7641 100644 --- a/sound/soc/intel/skylake/skl.c +++ b/sound/soc/intel/skylake/skl.c @@ -672,8 +672,10 @@ static int skl_probe(struct pci_dev *pci, skl->nhlt = skl_nhlt_init(bus->dev); - if (skl->nhlt == NULL) + if (skl->nhlt == NULL) { + err = -ENODEV; goto out_free; + } skl_nhlt_update_topology_bin(skl); -- cgit v0.10.2 From 4a008c002133aba0276cbc5d116bbb774aaf1b0d Mon Sep 17 00:00:00 2001 From: Thilo Cestonaro Date: Fri, 12 Aug 2016 14:39:10 +0200 Subject: hwmon: (ftsteutates) Correct ftp urls in driver documentation Signed-off-by: Thilo Cestonaro Signed-off-by: Guenter Roeck diff --git a/Documentation/hwmon/ftsteutates b/Documentation/hwmon/ftsteutates index 2a1bf69..8c10a91 100644 --- a/Documentation/hwmon/ftsteutates +++ b/Documentation/hwmon/ftsteutates @@ -19,5 +19,5 @@ enhancements. It can monitor up to 4 voltages, 16 temperatures and implemented in this driver. Specification of the chip can be found here: -ftp:///pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf -ftp:///pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf +ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf +ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf -- cgit v0.10.2 From 9adeb8e72dbfe976709df01e259ed556ee60e779 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Tue, 9 Aug 2016 18:25:26 -0700 Subject: arm64: Handle el1 synchronous instruction aborts cleanly Executing from a non-executable area gives an ugly message: lkdtm: Performing direct entry EXEC_RODATA lkdtm: attempting ok execution at ffff0000084c0e08 lkdtm: attempting bad execution at ffff000008880700 Bad mode in Synchronous Abort handler detected on CPU2, code 0x8400000e -- IABT (current EL) CPU: 2 PID: 998 Comm: sh Not tainted 4.7.0-rc2+ #13 Hardware name: linux,dummy-virt (DT) task: ffff800077e35780 ti: ffff800077970000 task.ti: ffff800077970000 PC is at lkdtm_rodata_do_nothing+0x0/0x8 LR is at execute_location+0x74/0x88 The 'IABT (current EL)' indicates the error but it's a bit cryptic without knowledge of the ARM ARM. There is also no indication of the specific address which triggered the fault. The increase in kernel page permissions makes hitting this case more likely as well. Handling the case in the vectors gives a much more familiar looking error message: lkdtm: Performing direct entry EXEC_RODATA lkdtm: attempting ok execution at ffff0000084c0840 lkdtm: attempting bad execution at ffff000008880680 Unable to handle kernel paging request at virtual address ffff000008880680 pgd = ffff8000089b2000 [ffff000008880680] *pgd=00000000489b4003, *pud=0000000048904003, *pmd=0000000000000000 Internal error: Oops: 8400000e [#1] PREEMPT SMP Modules linked in: CPU: 1 PID: 997 Comm: sh Not tainted 4.7.0-rc1+ #24 Hardware name: linux,dummy-virt (DT) task: ffff800077f9f080 ti: ffff800008a1c000 task.ti: ffff800008a1c000 PC is at lkdtm_rodata_do_nothing+0x0/0x8 LR is at execute_location+0x74/0x88 Acked-by: Mark Rutland Signed-off-by: Laura Abbott Signed-off-by: Catalin Marinas diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 96e4a2b..441420c 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -353,6 +353,8 @@ el1_sync: lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1 b.eq el1_da + cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1 + b.eq el1_ia cmp x24, #ESR_ELx_EC_SYS64 // configurable trap b.eq el1_undef cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception @@ -364,6 +366,11 @@ el1_sync: cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 b.ge el1_dbg b el1_inv + +el1_ia: + /* + * Fall through to the Data abort case + */ el1_da: /* * Data abort handling diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index c8beaa0..05d2bd7 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -153,6 +153,11 @@ int ptep_set_access_flags(struct vm_area_struct *vma, } #endif +static bool is_el1_instruction_abort(unsigned int esr) +{ + return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR; +} + /* * The kernel tried to access some page that wasn't present. */ @@ -161,8 +166,9 @@ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr, { /* * Are we prepared to handle this kernel fault? + * We are almost certainly not prepared to handle instruction faults. */ - if (fixup_exception(regs)) + if (!is_el1_instruction_abort(esr) && fixup_exception(regs)) return; /* @@ -267,7 +273,8 @@ static inline bool is_permission_fault(unsigned int esr) unsigned int ec = ESR_ELx_EC(esr); unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; - return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM); + return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM) || + (ec == ESR_ELx_EC_IABT_CUR && fsc_type == ESR_ELx_FSC_PERM); } static bool is_el0_instruction_abort(unsigned int esr) @@ -312,6 +319,9 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, if (regs->orig_addr_limit == KERNEL_DS) die("Accessing user space memory with fs=KERNEL_DS", regs, esr); + if (is_el1_instruction_abort(esr)) + die("Attempting to execute userspace memory", regs, esr); + if (!search_exception_tables(regs->pc)) die("Accessing user space memory outside uaccess.h routines", regs, esr); } -- cgit v0.10.2 From e1717e0485af4f47fc4da1e979ac817f9ad61b0f Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 20 Jul 2016 12:00:06 +0300 Subject: perf intel-pt: Fix ip compression The June 2015 Intel SDM introduced IP Compression types 4 and 6. Refer to section 36.4.2.2 Target IP (TIP) Packet - IP Compression. Existing Intel PT packet decoder did not support type 4, and got type 6 wrong. Because type 3 and type 4 have the same number of bytes, the packet 'count' has been changed from being the number of ip bytes to being the type code. That allows the Intel PT decoder to correctly decide whether to sign-extend or use the last ip. However that also meant the code had to be adjusted in a number of places. Currently hardware is not using the new compression types, so this fix has no effect on existing hardware. Signed-off-by: Adrian Hunter Cc: Jiri Olsa Link: http://lkml.kernel.org/r/1469005206-3049-1-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index 9c8f15d..8ff6c6a 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c @@ -123,8 +123,6 @@ struct intel_pt_decoder { bool have_calc_cyc_to_tsc; int exec_mode; unsigned int insn_bytes; - uint64_t sign_bit; - uint64_t sign_bits; uint64_t period; enum intel_pt_period_type period_type; uint64_t tot_insn_cnt; @@ -191,9 +189,6 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params) decoder->data = params->data; decoder->return_compression = params->return_compression; - decoder->sign_bit = (uint64_t)1 << 47; - decoder->sign_bits = ~(((uint64_t)1 << 48) - 1); - decoder->period = params->period; decoder->period_type = params->period_type; @@ -362,21 +357,30 @@ int intel_pt__strerror(int code, char *buf, size_t buflen) return 0; } -static uint64_t intel_pt_calc_ip(struct intel_pt_decoder *decoder, - const struct intel_pt_pkt *packet, +static uint64_t intel_pt_calc_ip(const struct intel_pt_pkt *packet, uint64_t last_ip) { uint64_t ip; switch (packet->count) { - case 2: + case 1: ip = (last_ip & (uint64_t)0xffffffffffff0000ULL) | packet->payload; break; - case 4: + case 2: ip = (last_ip & (uint64_t)0xffffffff00000000ULL) | packet->payload; break; + case 3: + ip = packet->payload; + /* Sign-extend 6-byte ip */ + if (ip & (uint64_t)0x800000000000ULL) + ip |= (uint64_t)0xffff000000000000ULL; + break; + case 4: + ip = (last_ip & (uint64_t)0xffff000000000000ULL) | + packet->payload; + break; case 6: ip = packet->payload; break; @@ -384,16 +388,12 @@ static uint64_t intel_pt_calc_ip(struct intel_pt_decoder *decoder, return 0; } - if (ip & decoder->sign_bit) - return ip | decoder->sign_bits; - return ip; } static inline void intel_pt_set_last_ip(struct intel_pt_decoder *decoder) { - decoder->last_ip = intel_pt_calc_ip(decoder, &decoder->packet, - decoder->last_ip); + decoder->last_ip = intel_pt_calc_ip(&decoder->packet, decoder->last_ip); } static inline void intel_pt_set_ip(struct intel_pt_decoder *decoder) @@ -1657,6 +1657,12 @@ next: } } +static inline bool intel_pt_have_ip(struct intel_pt_decoder *decoder) +{ + return decoder->last_ip || decoder->packet.count == 0 || + decoder->packet.count == 3 || decoder->packet.count == 6; +} + /* Walk PSB+ packets to get in sync. */ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder) { @@ -1677,8 +1683,7 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder) case INTEL_PT_FUP: decoder->pge = true; - if (decoder->last_ip || decoder->packet.count == 6 || - decoder->packet.count == 0) { + if (intel_pt_have_ip(decoder)) { uint64_t current_ip = decoder->ip; intel_pt_set_ip(decoder); @@ -1767,8 +1772,7 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder) case INTEL_PT_TIP_PGE: case INTEL_PT_TIP: decoder->pge = decoder->packet.type != INTEL_PT_TIP_PGD; - if (decoder->last_ip || decoder->packet.count == 6 || - decoder->packet.count == 0) + if (intel_pt_have_ip(decoder)) intel_pt_set_ip(decoder); if (decoder->ip) return 0; @@ -1776,9 +1780,7 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder) case INTEL_PT_FUP: if (decoder->overflow) { - if (decoder->last_ip || - decoder->packet.count == 6 || - decoder->packet.count == 0) + if (intel_pt_have_ip(decoder)) intel_pt_set_ip(decoder); if (decoder->ip) return 0; diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c index b1257c8..4f7b320 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c @@ -292,36 +292,46 @@ static int intel_pt_get_ip(enum intel_pt_pkt_type type, unsigned int byte, const unsigned char *buf, size_t len, struct intel_pt_pkt *packet) { - switch (byte >> 5) { + int ip_len; + + packet->count = byte >> 5; + + switch (packet->count) { case 0: - packet->count = 0; + ip_len = 0; break; case 1: if (len < 3) return INTEL_PT_NEED_MORE_BYTES; - packet->count = 2; + ip_len = 2; packet->payload = le16_to_cpu(*(uint16_t *)(buf + 1)); break; case 2: if (len < 5) return INTEL_PT_NEED_MORE_BYTES; - packet->count = 4; + ip_len = 4; packet->payload = le32_to_cpu(*(uint32_t *)(buf + 1)); break; case 3: - case 6: + case 4: if (len < 7) return INTEL_PT_NEED_MORE_BYTES; - packet->count = 6; + ip_len = 6; memcpy_le64(&packet->payload, buf + 1, 6); break; + case 6: + if (len < 9) + return INTEL_PT_NEED_MORE_BYTES; + ip_len = 8; + packet->payload = le64_to_cpu(*(uint64_t *)(buf + 1)); + break; default: return INTEL_PT_BAD_PACKET; } packet->type = type; - return packet->count + 1; + return ip_len + 1; } static int intel_pt_get_mode(const unsigned char *buf, size_t len, -- cgit v0.10.2 From 33da54fa86e29b87fe1e83bd0f15b4ef2be53ecb Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Thu, 11 Aug 2016 10:50:57 +0200 Subject: perf tools mem: Fix -t store option for record command Michael reported 'perf mem -t store record' being broken. The reason is latest rework of this area: commit acbe613e0c03 ("perf tools: Add monitored events array") We don't mark perf_mem_events store record when -t store option is specified. Committer notes: Before: # perf mem -t store record usleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.020 MB perf.data (7 samples) ] # perf evlist cycles:ppp # After: # perf mem -t store record usleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.020 MB perf.data (7 samples) ] # perf evlist cpu/mem-stores/P # Reported-by: Michael Petlan Signed-off-by: Jiri Olsa Tested-by: Arnaldo Carvalho de Melo Cc: David Ahern Cc: Namhyung Kim Cc: Peter Zijlstra Fixes: acbe613e0c03 ("perf tools: Add monitored events array") Link: http://lkml.kernel.org/r/1470905457-18311-1-git-send-email-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c index d608a2c..d1ce29b 100644 --- a/tools/perf/builtin-mem.c +++ b/tools/perf/builtin-mem.c @@ -88,6 +88,9 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) if (mem->operation & MEM_OPERATION_LOAD) perf_mem_events[PERF_MEM_EVENTS__LOAD].record = true; + if (mem->operation & MEM_OPERATION_STORE) + perf_mem_events[PERF_MEM_EVENTS__STORE].record = true; + if (perf_mem_events[PERF_MEM_EVENTS__LOAD].record) rec_argv[i++] = "-W"; -- cgit v0.10.2 From f046f3df665361d2f89e660f8c79ba164069f02a Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 11 Aug 2016 14:43:59 -0300 Subject: perf ppc64le: Fix build failure when libelf is not present arch__post_process_probe_trace_events() calls get_target_map() to prepare symbol table. get_target_map() is defined inside util/probe-event.c. probe-event.c will only get included in perf binary if CONFIG_LIBELF is set. Hence arch__post_process_probe_trace_events() needs to be defined inside #ifdef HAVE_LIBELF_SUPPORT to solve compilation error. Reported-and-Tested-by: Anton Blanchard Tested-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Alexander Shishkin Cc: Balbir Singh Cc: Naveen N. Rao Cc: Ananth N Mavinakayanahalli Cc: Masami Hiramatsu Cc: Wang Nan Cc: Namhyung Kim Link: http://lkml.kernel.org/r/57ABFF88.8030905@linux.vnet.ibm.com [ Thunderbird MUA mangled it, fix that ] Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c index 8d4dc97..35745a7 100644 --- a/tools/perf/arch/powerpc/util/sym-handling.c +++ b/tools/perf/arch/powerpc/util/sym-handling.c @@ -97,6 +97,7 @@ void arch__fix_tev_from_maps(struct perf_probe_event *pev, } } +#ifdef HAVE_LIBELF_SUPPORT void arch__post_process_probe_trace_events(struct perf_probe_event *pev, int ntevs) { @@ -118,5 +119,6 @@ void arch__post_process_probe_trace_events(struct perf_probe_event *pev, } } } +#endif /* HAVE_LIBELF_SUPPORT */ #endif -- cgit v0.10.2 From 0194e760f7d2f42adb5e1db31b27a4331dd89c2f Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 11 Aug 2016 14:11:05 +0100 Subject: arm64: hibernate: avoid potential TLB conflict In create_safe_exec_page we install a set of global mappings in TTBR0, then subsequently invalidate TLBs. While TTBR0 points at the zero page, and the TLBs should be free of stale global entries, we may have stale ASID-tagged entries (e.g. from the EFI runtime services mappings) for the same VAs. Per the ARM ARM these ASID-tagged entries may conflict with newly-allocated global entries, and we must follow a Break-Before-Make approach to avoid issues resulting from this. This patch reworks create_safe_exec_page to invalidate TLBs while the zero page is still in place, ensuring that there are no potential conflicts when the new TTBR0 value is installed. As a single CPU is online while this code executes, we do not need to perform broadcast TLB maintenance, and can call local_flush_tlb_all(), which also subsumes some barriers. The remaining assembly is converted to use write_sysreg() and isb(). Other than this, we safely manipulate TTBRs in the hibernate dance. The code we install as part of the new TTBR0 mapping (the hibernated kernel's swsusp_arch_suspend_exit) installs a zero page into TTBR1, invalidates TLBs, then installs its preferred value. Upon being restored to the middle of swsusp_arch_suspend, the new image will call __cpu_suspend_exit, which will call cpu_uninstall_idmap, installing the zero page in TTBR0 and invalidating all TLB entries. Fixes: 82869ac57b5d ("arm64: kernel: Add support for hibernate/suspend-to-disk") Signed-off-by: Mark Rutland Acked-by: James Morse Tested-by: James Morse Cc: Lorenzo Pieralisi Cc: Will Deacon Cc: # 4.7+ Signed-off-by: Catalin Marinas diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 21ab5df..b2e7de8 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -35,6 +35,7 @@ #include #include #include +#include #include /* @@ -217,12 +218,22 @@ static int create_safe_exec_page(void *src_start, size_t length, set_pte(pte, __pte(virt_to_phys((void *)dst) | pgprot_val(PAGE_KERNEL_EXEC))); - /* Load our new page tables */ - asm volatile("msr ttbr0_el1, %0;" - "isb;" - "tlbi vmalle1is;" - "dsb ish;" - "isb" : : "r"(virt_to_phys(pgd))); + /* + * Load our new page tables. A strict BBM approach requires that we + * ensure that TLBs are free of any entries that may overlap with the + * global mappings we are about to install. + * + * For a real hibernate/resume cycle TTBR0 currently points to a zero + * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI + * runtime services), while for a userspace-driven test_resume cycle it + * points to userspace page tables (and we must point it at a zero page + * ourselves). Elsewhere we only (un)install the idmap with preemption + * disabled, so T0SZ should be as required regardless. + */ + cpu_set_reserved_ttbr0(); + local_flush_tlb_all(); + write_sysreg(virt_to_phys(pgd), ttbr0_el1); + isb(); *phys_dst_addr = virt_to_phys((void *)dst); -- cgit v0.10.2 From dfbca61af0b654990b9af8297ac574a9986d8275 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 11 Aug 2016 14:11:06 +0100 Subject: arm64: hibernate: handle allocation failures In create_safe_exec_page(), we create a copy of the hibernate exit text, along with some page tables to map this via TTBR0. We then install the new tables in TTBR0. In swsusp_arch_resume() we call create_safe_exec_page() before trying a number of operations which may fail (e.g. copying the linear map page tables). If these fail, we bail out of swsusp_arch_resume() and return an error code, but leave TTBR0 as-is. Subsequently, the core hibernate code will call free_basic_memory_bitmaps(), which will free all of the memory allocations we made, including the page tables installed in TTBR0. Thus, we may have TTBR0 pointing at dangling freed memory for some period of time. If the hibernate attempt was triggered by a user requesting a hibernate test via the reboot syscall, we may return to userspace with the clobbered TTBR0 value. Avoid these issues by reorganising swsusp_arch_resume() such that we have no failure paths after create_safe_exec_page(). We also add a check that the zero page allocation succeeded, matching what we have for other allocations. Fixes: 82869ac57b5d ("arm64: kernel: Add support for hibernate/suspend-to-disk") Signed-off-by: Mark Rutland Acked-by: James Morse Cc: Lorenzo Pieralisi Cc: Will Deacon Cc: # 4.7+ Signed-off-by: Catalin Marinas diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index b2e7de8..65d81f9 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -405,6 +405,38 @@ int swsusp_arch_resume(void) void *, phys_addr_t, phys_addr_t); /* + * Restoring the memory image will overwrite the ttbr1 page tables. + * Create a second copy of just the linear map, and use this when + * restoring. + */ + tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); + if (!tmp_pg_dir) { + pr_err("Failed to allocate memory for temporary page tables."); + rc = -ENOMEM; + goto out; + } + rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0); + if (rc) + goto out; + + /* + * Since we only copied the linear map, we need to find restore_pblist's + * linear map address. + */ + lm_restore_pblist = LMADDR(restore_pblist); + + /* + * We need a zero page that is zero before & after resume in order to + * to break before make on the ttbr1 page tables. + */ + zero_page = (void *)get_safe_page(GFP_ATOMIC); + if (!zero_page) { + pr_err("Failed to allocate zero page."); + rc = -ENOMEM; + goto out; + } + + /* * Locate the exit code in the bottom-but-one page, so that *NULL * still has disastrous affects. */ @@ -430,27 +462,6 @@ int swsusp_arch_resume(void) __flush_dcache_area(hibernate_exit, exit_size); /* - * Restoring the memory image will overwrite the ttbr1 page tables. - * Create a second copy of just the linear map, and use this when - * restoring. - */ - tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); - if (!tmp_pg_dir) { - pr_err("Failed to allocate memory for temporary page tables."); - rc = -ENOMEM; - goto out; - } - rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0); - if (rc) - goto out; - - /* - * Since we only copied the linear map, we need to find restore_pblist's - * linear map address. - */ - lm_restore_pblist = LMADDR(restore_pblist); - - /* * KASLR will cause the el2 vectors to be in a different location in * the resumed kernel. Load hibernate's temporary copy into el2. * @@ -464,12 +475,6 @@ int swsusp_arch_resume(void) __hyp_set_vectors(el2_vectors); } - /* - * We need a zero page that is zero before & after resume in order to - * to break before make on the ttbr1 page tables. - */ - zero_page = (void *)get_safe_page(GFP_ATOMIC); - hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1, resume_hdr.reenter_kernel, lm_restore_pblist, resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); -- cgit v0.10.2 From 2323439fd0c844b0e84d16968699c419809d480e Mon Sep 17 00:00:00 2001 From: Riku Voipio Date: Wed, 15 Jun 2016 11:27:59 +0300 Subject: arm64: defconfig: add options for virtualization and containers Enable options commonly needed by popular virtualization and container applications. Use modules when possible to avoid too much overhead for users not interested. - add namespace and cgroup options needed - add seccomp - optional, but enhances Qemu etc - bridge, nat, veth, macvtap and multicast for routing guests and containers - btfrs and overlayfs modules for container COW backends - while near it, make fuse a module instead of built-in. Generated with make saveconfig and dropping unrelated spurious change hunks while commiting. bloat-o-meter old-vmlinux vmlinux: add/remove: 905/390 grow/shrink: 767/229 up/down: 183513/-94861 (88652) .... Total: Before=10515408, After=10604060, chg +0.84% Signed-off-by: Riku Voipio Signed-off-by: Catalin Marinas diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 0555b7c..2016444 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -15,10 +15,14 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_PIDS=y CONFIG_CGROUP_HUGETLB=y -# CONFIG_UTS_NS is not set -# CONFIG_IPC_NS is not set -# CONFIG_NET_NS is not set +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_USER_NS=y CONFIG_SCHED_AUTOGROUP=y CONFIG_BLK_DEV_INITRD=y CONFIG_KALLSYMS_ALL=y @@ -71,6 +75,7 @@ CONFIG_PREEMPT=y CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_CMA=y +CONFIG_SECCOMP=y CONFIG_XEN=y CONFIG_KEXEC=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set @@ -84,10 +89,37 @@ CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_INET=y +CONFIG_IP_MULTICAST=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y -# CONFIG_IPV6 is not set +CONFIG_IPV6=m +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_MANGLE=m +CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y CONFIG_BPF_JIT=y CONFIG_CFG80211=m CONFIG_MAC80211=m @@ -103,6 +135,7 @@ CONFIG_MTD=y CONFIG_MTD_M25P80=y CONFIG_MTD_SPI_NOR=y CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=m CONFIG_VIRTIO_BLK=y CONFIG_SRAM=y # CONFIG_SCSI_PROC_FS is not set @@ -120,7 +153,10 @@ CONFIG_SATA_SIL24=y CONFIG_PATA_PLATFORM=y CONFIG_PATA_OF_PLATFORM=y CONFIG_NETDEVICES=y +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m CONFIG_TUN=y +CONFIG_VETH=m CONFIG_VIRTIO_NET=y CONFIG_AMD_XGBE=y CONFIG_NET_XGENE=y @@ -350,12 +386,16 @@ CONFIG_EXYNOS_ADC=y CONFIG_PWM_SAMSUNG=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_FANOTIFY=y CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y CONFIG_QUOTA=y CONFIG_AUTOFS4_FS=y -CONFIG_FUSE_FS=y -CONFIG_CUSE=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_OVERLAY_FS=m CONFIG_VFAT_FS=y CONFIG_TMPFS=y CONFIG_HUGETLBFS=y -- cgit v0.10.2 From 53fb45d3df6fb64eff6c314b3fa2e279a2496e5b Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 8 Jun 2016 21:26:23 +0900 Subject: arm64: defconfig: enable CONFIG_LOCALVERSION_AUTO When CONFIG_LOCALVERSION_AUTO is disabled, the version string is just a tag name (or with a '+' appended if HEAD is not a tagged commit). During the development (and especially when git-bisecting), longer version string would be helpful to identify the commit we are running. This is a default y option, so drop the unset to enable it. Signed-off-by: Masahiro Yamada Signed-off-by: Catalin Marinas diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 2016444..eadf485 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -1,4 +1,3 @@ -# CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_AUDIT=y -- cgit v0.10.2 From fc520f8b4fa35ceb36f0ad031c1a297968788236 Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Fri, 12 Aug 2016 13:49:34 +0800 Subject: of/platform: disable the of_platform_default_populate_init() for all the ppc boards With the commit 44a7185c2ae6 ("of/platform: Add common method to populate default bus"), a default function is introduced to populate the default bus and this function is invoked at the arch_initcall_sync level. But a lot of ppc boards use machine_device_initcall() to populate the default bus. This means that the default populate function has higher priority and would override the arch specific population of the bus. The side effect is that some arch specific bus are not probed, then cause various malfunction due to the miss of some devices. Since it is very possible to introduce bugs if we simply change the initcall level for all these boards(about 30+). This just disable this default function for all the ppc boards. Signed-off-by: Kevin Hao Signed-off-by: Rob Herring diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 8aa1976..f39ccd5 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -497,6 +497,7 @@ int of_platform_default_populate(struct device_node *root, } EXPORT_SYMBOL_GPL(of_platform_default_populate); +#ifndef CONFIG_PPC static int __init of_platform_default_populate_init(void) { struct device_node *node; @@ -521,6 +522,7 @@ static int __init of_platform_default_populate_init(void) return 0; } arch_initcall_sync(of_platform_default_populate_init); +#endif static int of_platform_device_destroy(struct device *dev, void *data) { -- cgit v0.10.2 From dd257933fa4b9fea66a1195f8a15111029810abc Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 11 Aug 2016 10:37:39 -0400 Subject: nfsd: don't return an unhashed lock stateid after taking mutex nfsd4_lock will take the st_mutex before working with the stateid it gets, but between the time when we drop the cl_lock and take the mutex, the stateid could become unhashed (a'la FREE_STATEID). If that happens the lock stateid returned to the client will be forgotten. Fix this by first moving the st_mutex acquisition into lookup_or_create_lock_state. Then, have it check to see if the lock stateid is still hashed after taking the mutex. If it's not, then put the stateid and try the find/create again. Signed-off-by: Jeff Layton Tested-by: Alexey Kodanev Cc: stable@vger.kernel.org # feb9dad5 nfsd: Always lock state exclusively. Cc: stable@vger.kernel.org Signed-off-by: J. Bruce Fields diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 0edc182..a204d7e 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -5523,7 +5523,7 @@ static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, - struct nfs4_ol_stateid **lst, bool *new) + struct nfs4_ol_stateid **plst, bool *new) { __be32 status; struct nfs4_file *fi = ost->st_stid.sc_file; @@ -5531,7 +5531,9 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_client *cl = oo->oo_owner.so_client; struct inode *inode = d_inode(cstate->current_fh.fh_dentry); struct nfs4_lockowner *lo; + struct nfs4_ol_stateid *lst; unsigned int strhashval; + bool hashed; lo = find_lockowner_str(cl, &lock->lk_new_owner); if (!lo) { @@ -5547,12 +5549,27 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, goto out; } - *lst = find_or_create_lock_stateid(lo, fi, inode, ost, new); - if (*lst == NULL) { +retry: + lst = find_or_create_lock_stateid(lo, fi, inode, ost, new); + if (lst == NULL) { status = nfserr_jukebox; goto out; } + + mutex_lock(&lst->st_mutex); + + /* See if it's still hashed to avoid race with FREE_STATEID */ + spin_lock(&cl->cl_lock); + hashed = !list_empty(&lst->st_perfile); + spin_unlock(&cl->cl_lock); + + if (!hashed) { + mutex_unlock(&lst->st_mutex); + nfs4_put_stid(&lst->st_stid); + goto retry; + } status = nfs_ok; + *plst = lst; out: nfs4_put_stateowner(&lo->lo_owner); return status; @@ -5619,8 +5636,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, goto out; status = lookup_or_create_lock_state(cstate, open_stp, lock, &lock_stp, &new); - if (status == nfs_ok) - mutex_lock(&lock_stp->st_mutex); } else { status = nfs4_preprocess_seqid_op(cstate, lock->lk_old_lock_seqid, -- cgit v0.10.2 From a7a0729c4532c94f4e8efb4faaf6ef00e5fe19ba Mon Sep 17 00:00:00 2001 From: Jonathan Corbet Date: Fri, 12 Aug 2016 14:11:12 -0600 Subject: docs: Set the Sphinx default highlight language to "guess" This should eliminate a whole class of markup warnings, at the cost of occasionally amusing markup choices; we'll have to see if it works out. Suggested-by: Markus Heiser Signed-off-by: Jonathan Corbet diff --git a/Documentation/conf.py b/Documentation/conf.py index 96b7aa6..106ae9c 100644 --- a/Documentation/conf.py +++ b/Documentation/conf.py @@ -131,7 +131,7 @@ pygments_style = 'sphinx' todo_include_todos = False primary_domain = 'C' -highlight_language = 'C' +highlight_language = 'guess' # -- Options for HTML output ---------------------------------------------- -- cgit v0.10.2 From ce7c6c9e1d997a2670aead3a7b87f4df32c11118 Mon Sep 17 00:00:00 2001 From: Greg Edwards Date: Sat, 30 Jul 2016 10:06:26 -0600 Subject: mpt3sas: Fix resume on WarpDrive flash cards mpt3sas crashes on resume after suspend with WarpDrive flash cards. The reply_post_host_index array is not set back up after the resume, and we deference a stale pointer in _base_interrupt(). [ 47.309711] BUG: unable to handle kernel paging request at ffffc90001f8006c [ 47.318289] IP: [] _base_interrupt+0x49f/0xa30 [mpt3sas] [ 47.326749] PGD 41ccaa067 PUD 41ccab067 PMD 3466c067 PTE 0 [ 47.333848] Oops: 0002 [#1] SMP ... [ 47.452708] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.7.0 #6 [ 47.460506] Hardware name: Dell Inc. OptiPlex 990/06D7TR, BIOS A18 09/24/2013 [ 47.469629] task: ffffffff81c0d500 ti: ffffffff81c00000 task.ti: ffffffff81c00000 [ 47.479112] RIP: 0010:[] [] _base_interrupt+0x49f/0xa30 [mpt3sas] [ 47.490466] RSP: 0018:ffff88041d203e30 EFLAGS: 00010002 [ 47.497801] RAX: 0000000000000001 RBX: ffff880033f4c000 RCX: 0000000000000001 [ 47.506973] RDX: ffffc90001f8006c RSI: 0000000000000082 RDI: 0000000000000082 [ 47.516141] RBP: ffff88041d203eb0 R08: ffff8804118e2820 R09: 0000000000000001 [ 47.525300] R10: 0000000000000001 R11: 00000000100c0000 R12: 0000000000000000 [ 47.534457] R13: ffff880412c487e0 R14: ffff88041a8987d8 R15: 0000000000000001 [ 47.543632] FS: 0000000000000000(0000) GS:ffff88041d200000(0000) knlGS:0000000000000000 [ 47.553796] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 47.561632] CR2: ffffc90001f8006c CR3: 0000000001c06000 CR4: 00000000000406f0 [ 47.570883] Stack: [ 47.575015] 000000001d211228 ffff88041d2100c0 ffff8800c47d8130 0000000000000100 [ 47.584625] ffff8804100c0000 100c000000000000 ffff88041a8992a0 ffff88041a8987f8 [ 47.594230] ffff88041d203e00 ffffffff81111e55 000000000000038c ffff880414ad4280 [ 47.603862] Call Trace: [ 47.608474] [ 47.610413] [] ? call_timer_fn+0x35/0x120 [ 47.620539] [] handle_irq_event_percpu+0x7f/0x1c0 [ 47.629061] [] handle_irq_event+0x2c/0x50 [ 47.636859] [] handle_edge_irq+0x6f/0x130 [ 47.644654] [] handle_irq+0x73/0x120 [ 47.652011] [] ? atomic_notifier_call_chain+0x1a/0x20 [ 47.660854] [] do_IRQ+0x4b/0xd0 [ 47.667777] [] common_interrupt+0x8c/0x8c [ 47.675635] Move the reply_post_host_index array setup into mpt3sas_base_map_resources(), which is also in the resume path. Cc: stable@vger.kernel.org Signed-off-by: Greg Edwards Acked-by: Chaitra P B Signed-off-by: Martin K. Petersen diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 751f13e..750f82c 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -2188,6 +2188,17 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) } else ioc->msix96_vector = 0; + if (ioc->is_warpdrive) { + ioc->reply_post_host_index[0] = (resource_size_t __iomem *) + &ioc->chip->ReplyPostHostIndex; + + for (i = 1; i < ioc->cpu_msix_table_sz; i++) + ioc->reply_post_host_index[i] = + (resource_size_t __iomem *) + ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) + * 4))); + } + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) pr_info(MPT3SAS_FMT "%s: IRQ %d\n", reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : @@ -5280,17 +5291,6 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) if (r) goto out_free_resources; - if (ioc->is_warpdrive) { - ioc->reply_post_host_index[0] = (resource_size_t __iomem *) - &ioc->chip->ReplyPostHostIndex; - - for (i = 1; i < ioc->cpu_msix_table_sz; i++) - ioc->reply_post_host_index[i] = - (resource_size_t __iomem *) - ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) - * 4))); - } - pci_set_drvdata(ioc->pdev, ioc->shost); r = _base_get_ioc_facts(ioc, CAN_SLEEP); if (r) -- cgit v0.10.2 From 5381cfb6f0422da24cfa9da35b0433c0415830e0 Mon Sep 17 00:00:00 2001 From: Sven Van Asbroeck Date: Fri, 12 Aug 2016 09:10:27 -0400 Subject: power: supply: max17042_battery: fix model download bug. The device's model download function returns the model data as an array of u32s, which is later compared to the reference model data. However, since the latter is an array of u16s, the comparison does not happen correctly, and model verification fails. This in turn breaks the POR initialization sequence. Fixes: 39e7213edc4f3 ("max17042_battery: Support regmap to access device's registers") Reported-by: Dan Carpenter Signed-off-by: Sven Van Asbroeck Reviewed-by: Krzysztof Kozlowski Signed-off-by: Sebastian Reichel diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c index 9c65f13..da7a75f 100644 --- a/drivers/power/max17042_battery.c +++ b/drivers/power/max17042_battery.c @@ -457,13 +457,16 @@ static inline void max17042_write_model_data(struct max17042_chip *chip, } static inline void max17042_read_model_data(struct max17042_chip *chip, - u8 addr, u32 *data, int size) + u8 addr, u16 *data, int size) { struct regmap *map = chip->regmap; int i; + u32 tmp; - for (i = 0; i < size; i++) - regmap_read(map, addr + i, &data[i]); + for (i = 0; i < size; i++) { + regmap_read(map, addr + i, &tmp); + data[i] = (u16)tmp; + } } static inline int max17042_model_data_compare(struct max17042_chip *chip, @@ -486,7 +489,7 @@ static int max17042_init_model(struct max17042_chip *chip) { int ret; int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); - u32 *temp_data; + u16 *temp_data; temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); if (!temp_data) @@ -501,7 +504,7 @@ static int max17042_init_model(struct max17042_chip *chip) ret = max17042_model_data_compare( chip, chip->pdata->config_data->cell_char_tbl, - (u16 *)temp_data, + temp_data, table_size); max10742_lock_model(chip); @@ -514,7 +517,7 @@ static int max17042_verify_model_lock(struct max17042_chip *chip) { int i; int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); - u32 *temp_data; + u16 *temp_data; int ret = 0; temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); -- cgit v0.10.2 From 7a4947cf6f26b7d0a5db260d212f6df41a563d23 Mon Sep 17 00:00:00 2001 From: Andy Yan Date: Fri, 12 Aug 2016 18:01:50 +0800 Subject: power: reset: reboot-mode: fix build error of missing ioremap/iounmap on UM commit 4fcd504edbf7 ("power: reset: add reboot mode driver") uses api from syscon, and syscon uses ioremap/iounmap which depends on HAS_IOMEM, so let's depend on MFD_SYSCON instead of selecting it directly to avoid the um-allyesconfig like build error on archs that without iomem: drivers/mfd/syscon.c: In function 'of_syscon_register': drivers/mfd/syscon.c:67:9: error: implicit declaration of function 'ioremap' [-Werror=implicit-function-declaration] base = ioremap(res.start, resource_size(&res)); ^ drivers/mfd/syscon.c:67:7: warning: assignment makes pointer from integer without a cast [-Wint-conversion] base = ioremap(res.start, resource_size(&res)); ^ drivers/mfd/syscon.c:109:2: error: implicit declaration of function 'iounmap' [-Werror=implicit-function-declaration] iounmap(base); ^ Reported-by: Kbuild test robot Fixes: 4fcd504edbf7("power: reset: add reboot mode driver") Signed-off-by: Andy Yan Reviewed-by: Krzysztof Kozlowski Signed-off-by: Sebastian Reichel diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index 3bfac53..c74c3f6 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig @@ -200,8 +200,8 @@ config REBOOT_MODE config SYSCON_REBOOT_MODE tristate "Generic SYSCON regmap reboot mode driver" depends on OF + depends on MFD_SYSCON select REBOOT_MODE - select MFD_SYSCON help Say y here will enable reboot mode driver. This will get reboot mode arguments and store it in SYSCON mapped -- cgit v0.10.2 From bae170efd6c42bf116f513a1dd07639d68fa71b9 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Fri, 12 Aug 2016 20:49:18 +0530 Subject: power: reset: hisi-reboot: Unmap region obtained by of_iomap Free memory mapping, if probe is not successful. Fixes: 4a9b37371822 ("power: reset: move hisilicon reboot code") Signed-off-by: Arvind Yadav Signed-off-by: Sebastian Reichel diff --git a/drivers/power/reset/hisi-reboot.c b/drivers/power/reset/hisi-reboot.c index 9ab7f56..f69387e 100644 --- a/drivers/power/reset/hisi-reboot.c +++ b/drivers/power/reset/hisi-reboot.c @@ -53,13 +53,16 @@ static int hisi_reboot_probe(struct platform_device *pdev) if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) { pr_err("failed to find reboot-offset property\n"); + iounmap(base); return -EINVAL; } err = register_restart_handler(&hisi_restart_nb); - if (err) + if (err) { dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n", err); + iounmap(base); + } return err; } -- cgit v0.10.2 From 62822e2ec4ad091ba31f823f577ef80db52e3c2c Mon Sep 17 00:00:00 2001 From: Thomas Garnier Date: Thu, 11 Aug 2016 14:49:29 -0700 Subject: PM / hibernate: Restore processor state before using per-CPU variables Restore the processor state before calling any other functions to ensure per-CPU variables can be used with KASLR memory randomization. Tracing functions use per-CPU variables (GS based on x86) and one was called just before restoring the processor state fully. It resulted in a double fault when both the tracing & the exception handler functions tried to use a per-CPU variable. Fixes: bb3632c6101b (PM / sleep: trace events for suspend/resume) Reported-and-tested-by: Borislav Petkov Reported-by: Jiri Kosina Tested-by: Rafael J. Wysocki Tested-by: Jiri Kosina Signed-off-by: Thomas Garnier Acked-by: Pavel Machek Signed-off-by: Rafael J. Wysocki diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 0ee1df0..61761aa 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -300,12 +300,12 @@ static int create_image(int platform_mode) save_processor_state(); trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true); error = swsusp_arch_suspend(); + /* Restore control flow magically appears here */ + restore_processor_state(); trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false); if (error) printk(KERN_ERR "PM: Error %d creating hibernation image\n", error); - /* Restore control flow magically appears here */ - restore_processor_state(); if (!in_suspend) events_check_enabled = false; -- cgit v0.10.2 From c2d5be14cb39da34b076b461d8aa2344611364c6 Mon Sep 17 00:00:00 2001 From: Jonathan Corbet Date: Fri, 12 Aug 2016 15:12:36 -0600 Subject: docs: kernel-documentation: remove some highlight directives With the conf.py change, we don't need them to avoid warnings anymore. Signed-off-by: Jonathan Corbet diff --git a/Documentation/kernel-documentation.rst b/Documentation/kernel-documentation.rst index c4eb504..391decc 100644 --- a/Documentation/kernel-documentation.rst +++ b/Documentation/kernel-documentation.rst @@ -366,8 +366,6 @@ Domain`_ references. Cross-referencing from reStructuredText ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. highlight:: none - To cross-reference the functions and types defined in the kernel-doc comments from reStructuredText documents, please use the `Sphinx C Domain`_ references. For example:: @@ -390,8 +388,6 @@ For further details, please refer to the `Sphinx C Domain`_ documentation. Function documentation ---------------------- -.. highlight:: c - The general format of a function and function-like macro kernel-doc comment is:: /** @@ -572,8 +568,6 @@ DocBook XML [DEPRECATED] Converting DocBook to Sphinx ---------------------------- -.. highlight:: none - Over time, we expect all of the documents under ``Documentation/DocBook`` to be converted to Sphinx and reStructuredText. For most DocBook XML documents, a good enough solution is to use the simple ``Documentation/sphinx/tmplcvt`` script, -- cgit v0.10.2 From e120dcb6b2a11840b08f0fc0d2e90256b7b6e842 Mon Sep 17 00:00:00 2001 From: Calvin Owens Date: Fri, 13 May 2016 13:28:21 -0700 Subject: ses: Fix racy cleanup of /sys in remove_dev() Currently we free the resources backing the enclosure device before we call device_unregister(). This is racy: during rmmod of low-level SCSI drivers that hook into enclosure, we end up with a small window of time during which writing to /sys can OOPS. Example trace with mpt3sas: general protection fault: 0000 [#1] SMP KASAN Modules linked in: mpt3sas(-) <...> RIP: [] ses_get_page2_descriptor.isra.6+0x38/0x220 [ses] Call Trace: [] ses_set_fault+0xf4/0x400 [ses] [] set_component_fault+0xa9/0xf0 [enclosure] [] dev_attr_store+0x3c/0x70 [] sysfs_kf_write+0x115/0x180 [] kernfs_fop_write+0x275/0x3a0 [] __vfs_write+0xe0/0x3e0 [] vfs_write+0x13f/0x4a0 [] SyS_write+0x111/0x230 [] entry_SYSCALL_64_fastpath+0x13/0x94 Fortunately the solution is extremely simple: call device_unregister() before we free the resources, and the race no longer exists. The driver core holds a reference over ->remove_dev(), so AFAICT this is safe. Signed-off-by: Calvin Owens Reviewed-by: James Bottomley Signed-off-by: Martin K. Petersen diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 53ef1cb..0e8601a 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -778,6 +778,8 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev) if (!edev) return; + enclosure_unregister(edev); + ses_dev = edev->scratch; edev->scratch = NULL; @@ -789,7 +791,6 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev) kfree(edev->component[0].scratch); put_device(&edev->edev); - enclosure_unregister(edev); } static void ses_intf_remove(struct device *cdev, -- cgit v0.10.2 From 601bbae0bc10d4306857b93d84240b039b3d9a6c Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 10 Aug 2016 23:54:08 +0200 Subject: dsa: mv88e6xxx: hide unused functions When CONFIG_NET_DSA_HWMON is disabled, we get warnings about two unused functions whose only callers are all inside of an #ifdef: drivers/net/dsa/mv88e6xxx.c:3257:12: 'mv88e6xxx_mdio_page_write' defined but not used [-Werror=unused-function] drivers/net/dsa/mv88e6xxx.c:3244:12: 'mv88e6xxx_mdio_page_read' defined but not used [-Werror=unused-function] This adds another ifdef around the function definitions. The warnings appeared after the functions were marked 'static', but the problem was already there before that. Signed-off-by: Arnd Bergmann Fixes: 57d3231057e9 ("net: dsa: mv88e6xxx: fix style issues") Reviewed-by: Vivien Didelot Signed-off-by: David S. Miller diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index d36aedd..d1d9d3c 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3187,6 +3187,7 @@ static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr) return err; } +#ifdef CONFIG_NET_DSA_HWMON static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page, int reg) { @@ -3212,6 +3213,7 @@ static int mv88e6xxx_mdio_page_write(struct dsa_switch *ds, int port, int page, return ret; } +#endif static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port) { -- cgit v0.10.2 From 947d2c2cd3340d11a1737d3f4781170cc3a4b74f Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sat, 13 Aug 2016 02:54:04 +0200 Subject: PM / sleep: Update some system sleep documentation Update some documentation related to system sleep to document new features and remove outdated information from it. Signed-off-by: Rafael J. Wysocki Acked-by: Pavel Machek Reviewed-by: Chen Yu diff --git a/Documentation/power/basic-pm-debugging.txt b/Documentation/power/basic-pm-debugging.txt index b96098c..708f87f 100644 --- a/Documentation/power/basic-pm-debugging.txt +++ b/Documentation/power/basic-pm-debugging.txt @@ -164,7 +164,32 @@ load n/2 modules more and try again. Again, if you find the offending module(s), it(they) must be unloaded every time before hibernation, and please report the problem with it(them). -c) Advanced debugging +c) Using the "test_resume" hibernation option + +/sys/power/disk generally tells the kernel what to do after creating a +hibernation image. One of the available options is "test_resume" which +causes the just created image to be used for immediate restoration. Namely, +after doing: + +# echo test_resume > /sys/power/disk +# echo disk > /sys/power/state + +a hibernation image will be created and a resume from it will be triggered +immediately without involving the platform firmware in any way. + +That test can be used to check if failures to resume from hibernation are +related to bad interactions with the platform firmware. That is, if the above +works every time, but resume from actual hibernation does not work or is +unreliable, the platform firmware may be responsible for the failures. + +On architectures and platforms that support using different kernels to restore +hibernation images (that is, the kernel used to read the image from storage and +load it into memory is different from the one included in the image) or support +kernel address space randomization, it also can be used to check if failures +to resume may be related to the differences between the restore and image +kernels. + +d) Advanced debugging In case that hibernation does not work on your system even in the minimal configuration and compiling more drivers as modules is not practical or some diff --git a/Documentation/power/interface.txt b/Documentation/power/interface.txt index f1f0f59a..974916f 100644 --- a/Documentation/power/interface.txt +++ b/Documentation/power/interface.txt @@ -1,75 +1,76 @@ -Power Management Interface - - -The power management subsystem provides a unified sysfs interface to -userspace, regardless of what architecture or platform one is -running. The interface exists in /sys/power/ directory (assuming sysfs -is mounted at /sys). - -/sys/power/state controls system power state. Reading from this file -returns what states are supported, which is hard-coded to 'freeze', -'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk' -(Suspend-to-Disk). - -Writing to this file one of those strings causes the system to -transition into that state. Please see the file -Documentation/power/states.txt for a description of each of those -states. - - -/sys/power/disk controls the operating mode of the suspend-to-disk -mechanism. Suspend-to-disk can be handled in several ways. We have a -few options for putting the system to sleep - using the platform driver -(e.g. ACPI or other suspend_ops), powering off the system or rebooting the -system (for testing). - -Additionally, /sys/power/disk can be used to turn on one of the two testing -modes of the suspend-to-disk mechanism: 'testproc' or 'test'. If the -suspend-to-disk mechanism is in the 'testproc' mode, writing 'disk' to -/sys/power/state will cause the kernel to disable nonboot CPUs and freeze -tasks, wait for 5 seconds, unfreeze tasks and enable nonboot CPUs. If it is -in the 'test' mode, writing 'disk' to /sys/power/state will cause the kernel -to disable nonboot CPUs and freeze tasks, shrink memory, suspend devices, wait -for 5 seconds, resume devices, unfreeze tasks and enable nonboot CPUs. Then, -we are able to look in the log messages and work out, for example, which code -is being slow and which device drivers are misbehaving. - -Reading from this file will display all supported modes and the currently -selected one in brackets, for example - - [shutdown] reboot test testproc - -Writing to this file will accept one of - - 'platform' (only if the platform supports it) - 'shutdown' - 'reboot' - 'testproc' - 'test' - -/sys/power/image_size controls the size of the image created by -the suspend-to-disk mechanism. It can be written a string -representing a non-negative integer that will be used as an upper -limit of the image size, in bytes. The suspend-to-disk mechanism will -do its best to ensure the image size will not exceed that number. However, -if this turns out to be impossible, it will try to suspend anyway using the -smallest image possible. In particular, if "0" is written to this file, the -suspend image will be as small as possible. - -Reading from this file will display the current image size limit, which -is set to 2/5 of available RAM by default. - -/sys/power/pm_trace controls the code which saves the last PM event point in -the RTC across reboots, so that you can debug a machine that just hangs -during suspend (or more commonly, during resume). Namely, the RTC is only -used to save the last PM event point if this file contains '1'. Initially it -contains '0' which may be changed to '1' by writing a string representing a -nonzero integer into it. - -To use this debugging feature you should attempt to suspend the machine, then -reboot it and run - - dmesg -s 1000000 | grep 'hash matches' - -CAUTION: Using it will cause your machine's real-time (CMOS) clock to be -set to a random invalid time after a resume. +Power Management Interface for System Sleep + +Copyright (c) 2016 Intel Corp., Rafael J. Wysocki + +The power management subsystem provides userspace with a unified sysfs interface +for system sleep regardless of the underlying system architecture or platform. +The interface is located in the /sys/power/ directory (assuming that sysfs is +mounted at /sys). + +/sys/power/state is the system sleep state control file. + +Reading from it returns a list of supported sleep states, encoded as: + +'freeze' (Suspend-to-Idle) +'standby' (Power-On Suspend) +'mem' (Suspend-to-RAM) +'disk' (Suspend-to-Disk) + +Suspend-to-Idle is always supported. Suspend-to-Disk is always supported +too as long the kernel has been configured to support hibernation at all +(ie. CONFIG_HIBERNATION is set in the kernel configuration file). Support +for Suspend-to-RAM and Power-On Suspend depends on the capabilities of the +platform. + +If one of the strings listed in /sys/power/state is written to it, the system +will attempt to transition into the corresponding sleep state. Refer to +Documentation/power/states.txt for a description of each of those states. + +/sys/power/disk controls the operating mode of hibernation (Suspend-to-Disk). +Specifically, it tells the kernel what to do after creating a hibernation image. + +Reading from it returns a list of supported options encoded as: + +'platform' (put the system into sleep using a platform-provided method) +'shutdown' (shut the system down) +'reboot' (reboot the system) +'suspend' (trigger a Suspend-to-RAM transition) +'test_resume' (resume-after-hibernation test mode) + +The currently selected option is printed in square brackets. + +The 'platform' option is only available if the platform provides a special +mechanism to put the system to sleep after creating a hibernation image (ACPI +does that, for example). The 'suspend' option is available if Suspend-to-RAM +is supported. Refer to Documentation/power/basic_pm_debugging.txt for the +description of the 'test_resume' option. + +To select an option, write the string representing it to /sys/power/disk. + +/sys/power/image_size controls the size of hibernation images. + +It can be written a string representing a non-negative integer that will be +used as a best-effort upper limit of the image size, in bytes. The hibernation +core will do its best to ensure that the image size will not exceed that number. +However, if that turns out to be impossible to achieve, a hibernation image will +still be created and its size will be as small as possible. In particular, +writing '0' to this file will enforce hibernation images to be as small as +possible. + +Reading from this file returns the current image size limit, which is set to +around 2/5 of available RAM by default. + +/sys/power/pm_trace controls the PM trace mechanism saving the last suspend +or resume event point in the RTC across reboots. + +It helps to debug hard lockups or reboots due to device driver failures that +occur during system suspend or resume (which is more common) more effectively. + +If /sys/power/pm_trace contains '1', the fingerprint of each suspend/resume +event point in turn will be stored in the RTC memory (overwriting the actual +RTC information), so it will survive a system crash if one occurs right after +storing it and it can be used later to identify the driver that caused the crash +to happen (see Documentation/power/s2ram.txt for more information). + +Initially it contains '0' which may be changed to '1' by writing a string +representing a nonzero integer into it. -- cgit v0.10.2 From 747ea55e4f78fd980350c39570a986b8c1c3e4aa Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 12 Aug 2016 22:17:17 +0200 Subject: bpf: fix bpf_skb_in_cgroup helper naming While hashing out BPF's current_task_under_cgroup helper bits, it came to discussion that the skb_in_cgroup helper name was suboptimally chosen. Tejun says: So, I think in_cgroup should mean that the object is in that particular cgroup while under_cgroup in the subhierarchy of that cgroup. Let's rename the other subhierarchy test to under too. I think that'd be a lot less confusing going forward. [...] It's more intuitive and gives us the room to implement the real "in" test if ever necessary in the future. Since this touches uapi bits, we need to change this as long as v4.8 is not yet officially released. Thus, change the helper enum and rename related bits. Fixes: 4a482f34afcc ("cgroup: bpf: Add bpf_skb_in_cgroup_proto") Reference: http://patchwork.ozlabs.org/patch/658500/ Suggested-by: Sargun Dhillon Suggested-by: Tejun Heo Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index da218fe..9e5fc16 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -339,7 +339,7 @@ enum bpf_func_id { BPF_FUNC_skb_change_type, /** - * bpf_skb_in_cgroup(skb, map, index) - Check cgroup2 membership of skb + * bpf_skb_under_cgroup(skb, map, index) - Check cgroup2 membership of skb * @skb: pointer to skb * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type * @index: index of the cgroup in the bpf_map @@ -348,7 +348,7 @@ enum bpf_func_id { * == 1 skb succeeded the cgroup2 descendant test * < 0 error */ - BPF_FUNC_skb_in_cgroup, + BPF_FUNC_skb_under_cgroup, /** * bpf_get_hash_recalc(skb) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 7094c69..daea765 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1053,7 +1053,7 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id) goto error; break; case BPF_MAP_TYPE_CGROUP_ARRAY: - if (func_id != BPF_FUNC_skb_in_cgroup) + if (func_id != BPF_FUNC_skb_under_cgroup) goto error; break; default: @@ -1075,7 +1075,7 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id) if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) goto error; break; - case BPF_FUNC_skb_in_cgroup: + case BPF_FUNC_skb_under_cgroup: if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) goto error; break; diff --git a/net/core/filter.c b/net/core/filter.c index b5add4e..bd9bf2e 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2317,7 +2317,7 @@ bpf_get_skb_set_tunnel_proto(enum bpf_func_id which) } #ifdef CONFIG_SOCK_CGROUP_DATA -static u64 bpf_skb_in_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) +static u64 bpf_skb_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) { struct sk_buff *skb = (struct sk_buff *)(long)r1; struct bpf_map *map = (struct bpf_map *)(long)r2; @@ -2340,8 +2340,8 @@ static u64 bpf_skb_in_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), cgrp); } -static const struct bpf_func_proto bpf_skb_in_cgroup_proto = { - .func = bpf_skb_in_cgroup, +static const struct bpf_func_proto bpf_skb_under_cgroup_proto = { + .func = bpf_skb_under_cgroup, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, @@ -2421,8 +2421,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id) case BPF_FUNC_get_smp_processor_id: return &bpf_get_smp_processor_id_proto; #ifdef CONFIG_SOCK_CGROUP_DATA - case BPF_FUNC_skb_in_cgroup: - return &bpf_skb_in_cgroup_proto; + case BPF_FUNC_skb_under_cgroup: + return &bpf_skb_under_cgroup_proto; #endif default: return sk_filter_func_proto(func_id); diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h index 217c8d5..7927a09 100644 --- a/samples/bpf/bpf_helpers.h +++ b/samples/bpf/bpf_helpers.h @@ -72,8 +72,8 @@ static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flag (void *) BPF_FUNC_l3_csum_replace; static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) = (void *) BPF_FUNC_l4_csum_replace; -static int (*bpf_skb_in_cgroup)(void *ctx, void *map, int index) = - (void *) BPF_FUNC_skb_in_cgroup; +static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) = + (void *) BPF_FUNC_skb_under_cgroup; #if defined(__x86_64__) diff --git a/samples/bpf/test_cgrp2_tc_kern.c b/samples/bpf/test_cgrp2_tc_kern.c index 2732c37..10ff734 100644 --- a/samples/bpf/test_cgrp2_tc_kern.c +++ b/samples/bpf/test_cgrp2_tc_kern.c @@ -57,7 +57,7 @@ int handle_egress(struct __sk_buff *skb) bpf_trace_printk(dont_care_msg, sizeof(dont_care_msg), eth->h_proto, ip6h->nexthdr); return TC_ACT_OK; - } else if (bpf_skb_in_cgroup(skb, &test_cgrp2_array_pin, 0) != 1) { + } else if (bpf_skb_under_cgroup(skb, &test_cgrp2_array_pin, 0) != 1) { bpf_trace_printk(pass_msg, sizeof(pass_msg)); return TC_ACT_OK; } else { -- cgit v0.10.2 From 783011b13095430262333fd64e17666954064664 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Mon, 21 Mar 2016 04:20:53 -0700 Subject: unicore32: mm: Add missing parameter to arch_vma_access_permitted MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit unicore32 fails to compile with the following errors. mm/memory.c: In function ‘__handle_mm_fault’: mm/memory.c:3381: error: too many arguments to function ‘arch_vma_access_permitted’ mm/gup.c: In function ‘check_vma_flags’: mm/gup.c:456: error: too many arguments to function ‘arch_vma_access_permitted’ mm/gup.c: In function ‘vma_permits_fault’: mm/gup.c:640: error: too many arguments to function ‘arch_vma_access_permitted’ Fixes: d61172b4b695b ("mm/core, x86/mm/pkeys: Differentiate instruction fetches") Cc: Dave Hansen Cc: Thomas Gleixner Cc: Ingo Molnar Signed-off-by: Guenter Roeck Acked-by: Guan Xuetao diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h index e35632e..62dfc64 100644 --- a/arch/unicore32/include/asm/mmu_context.h +++ b/arch/unicore32/include/asm/mmu_context.h @@ -98,7 +98,7 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, } static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, - bool write, bool foreign) + bool write, bool execute, bool foreign) { /* by default, allow everything */ return true; -- cgit v0.10.2 From 2b05980d895e51fc46d3d27c8d1bcc696dddca99 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Wed, 8 Jun 2016 20:11:58 -0700 Subject: h8300: Add missing include file to asm/io.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit h8300 builds fail with arch/h8300/include/asm/io.h:9:15: error: unknown type name ‘u8’ arch/h8300/include/asm/io.h:15:15: error: unknown type name ‘u16’ arch/h8300/include/asm/io.h:21:15: error: unknown type name ‘u32’ and many related errors. Fixes: 23c82d41bdf4 ("kexec-allow-architectures-to-override-boot-mapping-fix") Cc: Andrew Morton Signed-off-by: Guenter Roeck diff --git a/arch/h8300/include/asm/io.h b/arch/h8300/include/asm/io.h index 2e221c5..f86918a 100644 --- a/arch/h8300/include/asm/io.h +++ b/arch/h8300/include/asm/io.h @@ -3,6 +3,8 @@ #ifdef __KERNEL__ +#include + /* H8/300 internal I/O functions */ #define __raw_readb __raw_readb -- cgit v0.10.2 From b4c0e0c61f81dedc82dda35c287ea149ff98b434 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 11 Aug 2016 18:17:22 +0100 Subject: calipso: fix resource leak on calipso_genopt failure Currently, if calipso_genopt fails then the error exit path does not free the ipv6_opt_hdr new causing a memory leak. Fix this by kfree'ing new on the error exit path. Signed-off-by: Colin Ian King Signed-off-by: David S. Miller diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c index c53b92c..37ac9de 100644 --- a/net/ipv6/calipso.c +++ b/net/ipv6/calipso.c @@ -952,8 +952,10 @@ calipso_opt_insert(struct ipv6_opt_hdr *hop, memcpy(new, hop, start); ret_val = calipso_genopt((unsigned char *)new, start, buf_len, doi_def, secattr); - if (ret_val < 0) + if (ret_val < 0) { + kfree(new); return ERR_PTR(ret_val); + } buf_len = start + ret_val; /* At this point buf_len aligns to 4n, so (buf_len & 4) pads to 8n */ -- cgit v0.10.2 From d7005652cd31dfc5660e1e32bf7e53538ef14987 Mon Sep 17 00:00:00 2001 From: "sean.wang@mediatek.com" Date: Sat, 13 Aug 2016 19:16:18 +0800 Subject: net: ethernet: mediatek: fixed that initializing u64_stats_sync is missing To fix runtime warning with lockdep is enabled due that u64_stats_sync is not initialized well, so add it. Signed-off-by: Sean Wang Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index b57ae3a..fe17f8c 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1751,6 +1751,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) goto free_netdev; } spin_lock_init(&mac->hw_stats->stats_lock); + u64_stats_init(&mac->hw_stats->syncp); mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; SET_NETDEV_DEV(eth->netdev[id], eth->dev); -- cgit v0.10.2 From e8c2993a4c9fdb0c9e6fc983edd5b52716ce7442 Mon Sep 17 00:00:00 2001 From: "sean.wang@mediatek.com" Date: Sat, 13 Aug 2016 19:16:19 +0800 Subject: net: ethernet: mediatek: add the missing of_node_put() after node is used done This patch adds the missing of_node_put() after finishing the usage of of_parse_phandle() or of_node_get() used by fixed_phy. Signed-off-by: Sean Wang Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index fe17f8c..0030361 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -269,6 +269,8 @@ static int mtk_phy_connect(struct mtk_mac *mac) ADVERTISED_Autoneg; phy_start_aneg(mac->phy_dev); + of_node_put(np); + return 0; } -- cgit v0.10.2 From 0ed661d5a48fa6df0b50ae64d27fe759a3ce42cf Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 11 Aug 2016 21:38:37 +0200 Subject: bpf: fix write helpers with regards to non-linear parts Fix the bpf_try_make_writable() helper and all call sites we have in BPF, it's currently defect with regards to skbs when the write_len spans into non-linear parts, no matter if cloned or not. There are multiple issues at once. First, using skb_store_bits() is not correct since even if we have a cloned skb, page frags can still be shared. To really make them private, we need to pull them in via __pskb_pull_tail() first, which also gets us a private head via pskb_expand_head() implicitly. This is for helpers like bpf_skb_store_bytes(), bpf_l3_csum_replace(), bpf_l4_csum_replace(). Really, the only thing reasonable and working here is to call skb_ensure_writable() before any write operation. Meaning, via pskb_may_pull() it makes sure that parts we want to access are pulled in and if not does so plus unclones the skb implicitly. If our write_len still fits the headlen and we're cloned and our header of the clone is not writable, then we need to make a private copy via pskb_expand_head(). skb_store_bits() is a bit misleading and only safe to store into non-linear data in different contexts such as 357b40a18b04 ("[IPV6]: IPV6_CHECKSUM socket option can corrupt kernel memory"). For above BPF helper functions, it means after fixed bpf_try_make_writable(), we've pulled in enough, so that we operate always based on skb->data. Thus, the call to skb_header_pointer() and skb_store_bits() becomes superfluous. In bpf_skb_store_bytes(), the len check is unnecessary too since it can only pass in maximum of BPF stack size, so adding offset is guaranteed to never overflow. Also bpf_l3/4_csum_replace() helpers must test for proper offset alignment since they use __sum16 pointer for writing resulting csum. The remaining helpers that change skb data not discussed here yet are bpf_skb_vlan_push(), bpf_skb_vlan_pop() and bpf_skb_change_proto(). The vlan helpers internally call either skb_ensure_writable() (pop case) and skb_cow_head() (push case, for head expansion), respectively. Similarly, bpf_skb_proto_xlat() takes care to not mangle page frags. Fixes: 608cd71a9c7c ("tc: bpf: generalize pedit action") Fixes: 91bc4822c3d6 ("tc: bpf: add checksum helpers") Fixes: 3697649ff29e ("bpf: try harder on clones when writing into skb") Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller diff --git a/net/core/filter.c b/net/core/filter.c index bd9bf2e..cb06ace 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1355,13 +1355,9 @@ static inline int bpf_try_make_writable(struct sk_buff *skb, { int err; - if (!skb_cloned(skb)) - return 0; - if (skb_clone_writable(skb, write_len)) - return 0; - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (!err) - bpf_compute_data_end(skb); + err = skb_ensure_writable(skb, write_len); + bpf_compute_data_end(skb); + return err; } @@ -1379,42 +1375,25 @@ static inline void bpf_pull_mac_rcsum(struct sk_buff *skb) static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) { - struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp); struct sk_buff *skb = (struct sk_buff *) (long) r1; - int offset = (int) r2; + unsigned int offset = (unsigned int) r2; void *from = (void *) (long) r3; unsigned int len = (unsigned int) r4; void *ptr; if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH))) return -EINVAL; - - /* bpf verifier guarantees that: - * 'from' pointer points to bpf program stack - * 'len' bytes of it were initialized - * 'len' > 0 - * 'skb' is a valid pointer to 'struct sk_buff' - * - * so check for invalid 'offset' and too large 'len' - */ - if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff))) + if (unlikely(offset > 0xffff)) return -EFAULT; if (unlikely(bpf_try_make_writable(skb, offset + len))) return -EFAULT; - ptr = skb_header_pointer(skb, offset, len, sp->buff); - if (unlikely(!ptr)) - return -EFAULT; - + ptr = skb->data + offset; if (flags & BPF_F_RECOMPUTE_CSUM) __skb_postpull_rcsum(skb, ptr, len, offset); memcpy(ptr, from, len); - if (ptr == sp->buff) - /* skb_store_bits cannot return -EFAULT here */ - skb_store_bits(skb, offset, ptr, len); - if (flags & BPF_F_RECOMPUTE_CSUM) __skb_postpush_rcsum(skb, ptr, len, offset); if (flags & BPF_F_INVALIDATE_HASH) @@ -1437,12 +1416,12 @@ static const struct bpf_func_proto bpf_skb_store_bytes_proto = { static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) { const struct sk_buff *skb = (const struct sk_buff *)(unsigned long) r1; - int offset = (int) r2; + unsigned int offset = (unsigned int) r2; void *to = (void *)(unsigned long) r3; unsigned int len = (unsigned int) r4; void *ptr; - if (unlikely((u32) offset > 0xffff)) + if (unlikely(offset > 0xffff)) goto err_clear; ptr = skb_header_pointer(skb, offset, len, to); @@ -1470,20 +1449,17 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = { static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) { struct sk_buff *skb = (struct sk_buff *) (long) r1; - int offset = (int) r2; - __sum16 sum, *ptr; + unsigned int offset = (unsigned int) r2; + __sum16 *ptr; if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK))) return -EINVAL; - if (unlikely((u32) offset > 0xffff)) - return -EFAULT; - if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum)))) + if (unlikely(offset > 0xffff || offset & 1)) return -EFAULT; - - ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); - if (unlikely(!ptr)) + if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) return -EFAULT; + ptr = (__sum16 *)(skb->data + offset); switch (flags & BPF_F_HDR_FIELD_MASK) { case 0: if (unlikely(from != 0)) @@ -1501,10 +1477,6 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) return -EINVAL; } - if (ptr == &sum) - /* skb_store_bits guaranteed to not return -EFAULT here */ - skb_store_bits(skb, offset, ptr, sizeof(sum)); - return 0; } @@ -1524,20 +1496,18 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) struct sk_buff *skb = (struct sk_buff *) (long) r1; bool is_pseudo = flags & BPF_F_PSEUDO_HDR; bool is_mmzero = flags & BPF_F_MARK_MANGLED_0; - int offset = (int) r2; - __sum16 sum, *ptr; + unsigned int offset = (unsigned int) r2; + __sum16 *ptr; if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK))) return -EINVAL; - if (unlikely((u32) offset > 0xffff)) + if (unlikely(offset > 0xffff || offset & 1)) return -EFAULT; - if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum)))) + if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) return -EFAULT; - ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); - if (unlikely(!ptr)) - return -EFAULT; + ptr = (__sum16 *)(skb->data + offset); if (is_mmzero && !*ptr) return 0; @@ -1560,10 +1530,6 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) if (is_mmzero && !*ptr) *ptr = CSUM_MANGLED_0; - if (ptr == &sum) - /* skb_store_bits guaranteed to not return -EFAULT here */ - skb_store_bits(skb, offset, ptr, sizeof(sum)); - return 0; } -- cgit v0.10.2 From c15c0ab12fd62f2b19181d05c62d24bc9fa55a42 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 12 Aug 2016 07:48:21 +0200 Subject: ipv6: suppress sparse warnings in IP6_ECN_set_ce() Pass the correct type __wsum to csum_sub() and csum_add(). This doesn't really change anything since __wsum really *is* __be32, but removes the address space warnings from sparse. Cc: Eric Dumazet Fixes: 34ae6a1aa054 ("ipv6: update skb->csum when CE mark is propagated") Signed-off-by: Johannes Berg Acked-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index 0dc0a51..dce2d58 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h @@ -128,7 +128,8 @@ static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph) to = from | htonl(INET_ECN_CE << 20); *(__be32 *)iph = to; if (skb->ip_summed == CHECKSUM_COMPLETE) - skb->csum = csum_add(csum_sub(skb->csum, from), to); + skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from), + (__force __wsum)to); return 1; } -- cgit v0.10.2 From 5ba092efc7ddff040777ae7162f1d195f513571b Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Fri, 12 Aug 2016 10:29:13 +0200 Subject: net/irda: handle iriap_register_lsap() allocation failure If iriap_register_lsap() fails to allocate memory, self->lsap is set to NULL. However, none of the callers handle the failure and irlmp_connect_request() will happily dereference it: iriap_register_lsap: Unable to allocated LSAP! ================================================================================ UBSAN: Undefined behaviour in net/irda/irlmp.c:378:2 member access within null pointer of type 'struct lsap_cb' CPU: 1 PID: 15403 Comm: trinity-c0 Not tainted 4.8.0-rc1+ #81 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.9.3-0-ge2fc41e-prebuilt.qemu-project.org 04/01/2014 0000000000000000 ffff88010c7e78a8 ffffffff82344f40 0000000041b58ab3 ffffffff84f98000 ffffffff82344e94 ffff88010c7e78d0 ffff88010c7e7880 ffff88010630ad00 ffffffff84a5fae0 ffffffff84d3f5c0 000000000000017a Call Trace: [] dump_stack+0xac/0xfc [] ubsan_epilogue+0xd/0x8a [] __ubsan_handle_type_mismatch+0x157/0x411 [] irlmp_connect_request+0x7ac/0x970 [] iriap_connect_request+0xa0/0x160 [] state_s_disconnect+0x88/0xd0 [] iriap_do_client_event+0x94/0x120 [] iriap_getvaluebyclass_request+0x3e0/0x6d0 [] irda_find_lsap_sel+0x1eb/0x630 [] irda_connect+0x828/0x12d0 [] SYSC_connect+0x22b/0x340 [] SyS_connect+0x9/0x10 [] do_syscall_64+0x1b3/0x4b0 [] entry_SYSCALL64_slow_path+0x25/0x25 ================================================================================ The bug seems to have been around since forever. There's more problems with missing error checks in iriap_init() (and indeed all of irda_init()), but that's a bigger problem that needs very careful review and testing. This patch will fix the most serious bug (as it's easily reached from unprivileged userspace). I have tested my patch with a reproducer. Signed-off-by: Vegard Nossum Signed-off-by: David S. Miller diff --git a/net/irda/iriap.c b/net/irda/iriap.c index 4a7ae32a..1138eaf 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c @@ -185,8 +185,12 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv, self->magic = IAS_MAGIC; self->mode = mode; - if (mode == IAS_CLIENT) - iriap_register_lsap(self, slsap_sel, mode); + if (mode == IAS_CLIENT) { + if (iriap_register_lsap(self, slsap_sel, mode)) { + kfree(self); + return NULL; + } + } self->confirm = callback; self->priv = priv; -- cgit v0.10.2 From 54236ab09e9696a27baaae693c288920a26e8588 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Fri, 12 Aug 2016 09:50:51 +0200 Subject: net/sctp: always initialise sctp_ht_iter::start_fail sctp_transport_seq_start() does not currently clear iter->start_fail on success, but relies on it being zero when it is allocated (by seq_open_net()). This can be a problem in the following sequence: open() // allocates iter (and implicitly sets iter->start_fail = 0) read() - iter->start() // fails and sets iter->start_fail = 1 - iter->stop() // doesn't call sctp_transport_walk_stop() (correct) read() again - iter->start() // succeeds, but doesn't change iter->start_fail - iter->stop() // doesn't call sctp_transport_walk_stop() (wrong) We should initialize sctp_ht_iter::start_fail to zero if ->start() succeeds, otherwise it's possible that we leave an old value of 1 there, which will cause ->stop() to not call sctp_transport_walk_stop(), which causes all sorts of problems like not calling rcu_read_unlock() (and preempt_enable()), eventually leading to more warnings like this: BUG: sleeping function called from invalid context at mm/slab.h:388 in_atomic(): 0, irqs_disabled(): 0, pid: 16551, name: trinity-c2 Preemption disabled at:[] rhashtable_walk_start+0x46/0x150 [] preempt_count_add+0x1fb/0x280 [] _raw_spin_lock+0x12/0x40 [] rhashtable_walk_start+0x46/0x150 [] sctp_transport_walk_start+0x2f/0x60 [] sctp_transport_seq_start+0x4d/0x150 [] traverse+0x170/0x850 [] seq_read+0x7cc/0x1180 [] proc_reg_read+0xbc/0x180 [] do_loop_readv_writev+0x134/0x210 [] do_readv_writev+0x565/0x660 [] vfs_readv+0x67/0xa0 [] do_preadv+0x126/0x170 [] SyS_preadv+0xc/0x10 [] do_syscall_64+0x19c/0x410 [] return_from_SYSCALL_64+0x0/0x6a [] 0xffffffffffffffff Notice that this is a subtly different stacktrace from the one in commit 5fc382d875 ("net/sctp: terminate rhashtable walk correctly"). Cc: Xin Long Cc: Herbert Xu Cc: Eric W. Biederman Cc: Marcelo Ricardo Leitner Signed-off-by: Vegard Nossum Acked-By: Neil Horman Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 4cb5aed..ef8ba77 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -293,6 +293,7 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) return ERR_PTR(err); } + iter->start_fail = 0; return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); } -- cgit v0.10.2 From bc561632dddd5af0c4444d919f01cbf6d553aa0a Mon Sep 17 00:00:00 2001 From: Mike Manning Date: Fri, 12 Aug 2016 12:02:38 +0100 Subject: net: ipv6: Do not keep IPv6 addresses when IPv6 is disabled If IPv6 is disabled when the option is set to keep IPv6 addresses on link down, userspace is unaware of this as there is no such indication via netlink. The solution is to remove the IPv6 addresses in this case, which results in netlink messages indicating removal of addresses in the usual manner. This fix also makes the behavior consistent with the case of having IPv6 disabled first, which stops IPv6 addresses from being added. Fixes: f1705ec197e7 ("net: ipv6: Make address flushing on ifdown optional") Signed-off-by: Mike Manning Acked-by: David Ahern Signed-off-by: David S. Miller diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index ab3e796..df8425f 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3543,7 +3543,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) /* combine the user config with event to determine if permanent * addresses are to be removed from address hash table */ - keep_addr = !(how || _keep_addr <= 0); + keep_addr = !(how || _keep_addr <= 0 || idev->cnf.disable_ipv6); /* Step 2: clear hash table */ for (i = 0; i < IN6_ADDR_HSIZE; i++) { @@ -3599,7 +3599,7 @@ restart: /* re-combine the user config with event to determine if permanent * addresses are to be removed from the interface list */ - keep_addr = (!how && _keep_addr > 0); + keep_addr = (!how && _keep_addr > 0 && !idev->cnf.disable_ipv6); INIT_LIST_HEAD(&del_list); list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { -- cgit v0.10.2 From e20038724552cd05e351cd7d7526d646953d26b7 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Fri, 12 Aug 2016 16:10:32 +0200 Subject: macsec: fix lockdep splats when nesting devices Currently, trying to setup a vlan over a macsec device, or other combinations of devices, triggers a lockdep warning. Use netdev_lockdep_set_classes and ndo_get_lock_subclass, similar to what macvlan does. Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index dbd590a..2043e8c 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -270,6 +270,7 @@ struct macsec_dev { struct pcpu_secy_stats __percpu *stats; struct list_head secys; struct gro_cells gro_cells; + unsigned int nest_level; }; /** @@ -2699,6 +2700,8 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, #define MACSEC_FEATURES \ (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) +static struct lock_class_key macsec_netdev_addr_lock_key; + static int macsec_dev_init(struct net_device *dev) { struct macsec_dev *macsec = macsec_priv(dev); @@ -2910,6 +2913,13 @@ static int macsec_get_iflink(const struct net_device *dev) return macsec_priv(dev)->real_dev->ifindex; } + +static int macsec_get_nest_level(struct net_device *dev) +{ + return macsec_priv(dev)->nest_level; +} + + static const struct net_device_ops macsec_netdev_ops = { .ndo_init = macsec_dev_init, .ndo_uninit = macsec_dev_uninit, @@ -2923,6 +2933,7 @@ static const struct net_device_ops macsec_netdev_ops = { .ndo_start_xmit = macsec_start_xmit, .ndo_get_stats64 = macsec_get_stats64, .ndo_get_iflink = macsec_get_iflink, + .ndo_get_lock_subclass = macsec_get_nest_level, }; static const struct device_type macsec_type = { @@ -3050,10 +3061,12 @@ static void macsec_del_dev(struct macsec_dev *macsec) static void macsec_common_dellink(struct net_device *dev, struct list_head *head) { struct macsec_dev *macsec = macsec_priv(dev); + struct net_device *real_dev = macsec->real_dev; unregister_netdevice_queue(dev, head); list_del_rcu(&macsec->secys); macsec_del_dev(macsec); + netdev_upper_dev_unlink(real_dev, dev); macsec_generation++; } @@ -3188,6 +3201,16 @@ static int macsec_newlink(struct net *net, struct net_device *dev, dev_hold(real_dev); + macsec->nest_level = dev_get_nest_level(real_dev, netif_is_macsec) + 1; + netdev_lockdep_set_classes(dev); + lockdep_set_class_and_subclass(&dev->addr_list_lock, + &macsec_netdev_addr_lock_key, + macsec_get_nest_level(dev)); + + err = netdev_upper_dev_link(real_dev, dev); + if (err < 0) + goto unregister; + /* need to be already registered so that ->init has run and * the MAC addr is set */ @@ -3200,12 +3223,12 @@ static int macsec_newlink(struct net *net, struct net_device *dev, if (rx_handler && sci_exists(real_dev, sci)) { err = -EBUSY; - goto unregister; + goto unlink; } err = macsec_add_dev(dev, sci, icv_len); if (err) - goto unregister; + goto unlink; if (data) macsec_changelink_common(dev, data); @@ -3220,6 +3243,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev, del_dev: macsec_del_dev(macsec); +unlink: + netdev_upper_dev_unlink(real_dev, dev); unregister: unregister_netdevice(dev); return err; -- cgit v0.10.2 From 952fcfd08c8109951622579d0ae7b9cd6cafd688 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Fri, 12 Aug 2016 16:10:33 +0200 Subject: net: remove type_check from dev_get_nest_level() The idea for type_check in dev_get_nest_level() was to count the number of nested devices of the same type (currently, only macvlan or vlan devices). This prevented the false positive lockdep warning on configurations such as: eth0 <--- macvlan0 <--- vlan0 <--- macvlan1 However, this doesn't prevent a warning on a configuration such as: eth0 <--- macvlan0 <--- vlan0 eth1 <--- vlan1 <--- macvlan1 In this case, all the locks end up with a nesting subclass of 1, so lockdep thinks that there is still a deadlock: - in the first case we have (macvlan_netdev_addr_lock_key, 1) and then take (vlan_netdev_xmit_lock_key, 1) - in the second case, we have (vlan_netdev_xmit_lock_key, 1) and then take (macvlan_netdev_addr_lock_key, 1) By removing the linktype check in dev_get_nest_level() and always incrementing the nesting depth, lockdep considers this configuration valid. Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 2043e8c..351e701 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -3201,7 +3201,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev, dev_hold(real_dev); - macsec->nest_level = dev_get_nest_level(real_dev, netif_is_macsec) + 1; + macsec->nest_level = dev_get_nest_level(real_dev) + 1; netdev_lockdep_set_classes(dev); lockdep_set_class_and_subclass(&dev->addr_list_lock, &macsec_netdev_addr_lock_key, diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index cd9b538..3234fcd 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1315,7 +1315,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, vlan->dev = dev; vlan->port = port; vlan->set_features = MACVLAN_FEATURES; - vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1; + vlan->nest_level = dev_get_nest_level(lowerdev) + 1; vlan->mode = MACVLAN_MODE_VEPA; if (data && data[IFLA_MACVLAN_MODE]) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 076df53..3a788bf 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3891,8 +3891,7 @@ void netdev_default_l2upper_neigh_destroy(struct net_device *dev, extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; void netdev_rss_key_fill(void *buffer, size_t len); -int dev_get_nest_level(struct net_device *dev, - bool (*type_check)(const struct net_device *dev)); +int dev_get_nest_level(struct net_device *dev); int skb_checksum_help(struct sk_buff *skb); struct sk_buff *__skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path); diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 82a116b..8de138d 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -169,7 +169,7 @@ int register_vlan_dev(struct net_device *dev) if (err < 0) goto out_uninit_mvrp; - vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1; + vlan->nest_level = dev_get_nest_level(real_dev) + 1; err = register_netdevice(dev); if (err < 0) goto out_uninit_mvrp; diff --git a/net/core/dev.c b/net/core/dev.c index 4ce07dc..dd6ce59 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6045,8 +6045,7 @@ void *netdev_lower_dev_get_private(struct net_device *dev, EXPORT_SYMBOL(netdev_lower_dev_get_private); -int dev_get_nest_level(struct net_device *dev, - bool (*type_check)(const struct net_device *dev)) +int dev_get_nest_level(struct net_device *dev) { struct net_device *lower = NULL; struct list_head *iter; @@ -6056,15 +6055,12 @@ int dev_get_nest_level(struct net_device *dev, ASSERT_RTNL(); netdev_for_each_lower_dev(dev, lower, iter) { - nest = dev_get_nest_level(lower, type_check); + nest = dev_get_nest_level(lower); if (max_nest < nest) max_nest = nest; } - if (type_check(dev)) - max_nest++; - - return max_nest; + return max_nest + 1; } EXPORT_SYMBOL(dev_get_nest_level); -- cgit v0.10.2 From d1669c8288a2c86daa6fd59c7fb938c08589d053 Mon Sep 17 00:00:00 2001 From: Markus Heiser Date: Fri, 5 Aug 2016 11:19:43 +0200 Subject: doc-rst: customize RTD theme, drop padding of inline literal Remove the distracting (left/right) padding of inline literals. (HTML ). Requested and discussed in [1]. [1] http://www.spinics.net/lists/linux-media/msg103991.html Signed-off-by: Markus Heiser Acked-by: Hans Verkuil Signed-off-by: Jonathan Corbet diff --git a/Documentation/sphinx-static/theme_overrides.css b/Documentation/sphinx-static/theme_overrides.css index 3a2ac4b..e88461c 100644 --- a/Documentation/sphinx-static/theme_overrides.css +++ b/Documentation/sphinx-static/theme_overrides.css @@ -42,11 +42,12 @@ caption a.headerlink { opacity: 0; } caption a.headerlink:hover { opacity: 1; } - /* inline literal: drop the borderbox and red color */ + /* inline literal: drop the borderbox, padding and red color */ code, .rst-content tt, .rst-content code { color: inherit; border: none; + padding: unset; background: inherit; font-size: 85%; } -- cgit v0.10.2 From ce8cb803d8b90458495f23606c706f0c0c857cdc Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Fri, 12 Aug 2016 18:40:23 +0200 Subject: i2c: mux: demux-pinctrl: properly roll back when adding adapter fails We also need to revert the dynamic OF change, so we get a consistent state again. Otherwise, we might have two devices enabled e.g. after pinctrl setup fails. Signed-off-by: Wolfram Sang Signed-off-by: Wolfram Sang Cc: stable@kernel.org diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c index 8de073a..215ac87 100644 --- a/drivers/i2c/muxes/i2c-demux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c @@ -68,7 +68,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np); if (!adap) { ret = -ENODEV; - goto err; + goto err_with_revert; } p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name); @@ -103,6 +103,8 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne err_with_put: i2c_put_adapter(adap); + err_with_revert: + of_changeset_revert(&priv->chan[new_chan].chgset); err: dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret); return ret; -- cgit v0.10.2 From 4d01d88019261d05ec3bff5f1a6013393faa3b9e Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Wed, 10 Aug 2016 13:37:18 -0700 Subject: i2c: cros-ec-tunnel: Fix usage of cros_ec_cmd_xfer() cros_ec_cmd_xfer returns success status if the command transport completes successfully, but the execution result is incorrectly ignored. In many cases, the execution result is assumed to be successful, leading to ignored errors and operating on uninitialized data. We've recently introduced the cros_ec_cmd_xfer_status() helper to avoid these problems. Let's use it. [Regarding the 'Fixes' tag; there is significant refactoring since the driver's introduction, but the underlying logical error exists throughout I believe] Fixes: 9d230c9e4f4e ("i2c: ChromeOS EC tunnel driver") Cc: # 9798ac6d32c1 mfd: cros_ec: Add cros_ec_cmd_xfer_status() helper Signed-off-by: Brian Norris Reviewed-by: Javier Martinez Canillas Reviewed-by: Guenter Roeck Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c index a0d95ff..2d5ff863 100644 --- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c +++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c @@ -215,7 +215,7 @@ static int ec_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg i2c_msgs[], msg->outsize = request_len; msg->insize = response_len; - result = cros_ec_cmd_xfer(bus->ec, msg); + result = cros_ec_cmd_xfer_status(bus->ec, msg); if (result < 0) { dev_err(dev, "Error transferring EC i2c message %d\n", result); goto exit; -- cgit v0.10.2 From 97ccd4af120c6ee183f424f17672870809bd6efd Mon Sep 17 00:00:00 2001 From: Alexey Khoroshilov Date: Thu, 4 Aug 2016 02:38:44 +0300 Subject: i2c: ocores: add missed clk_disable_unprepare() on failure paths clk_disable_unprepare() is missed on failure paths in ocores_i2c_probe(). Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Alexey Khoroshilov Acked-by: Peter Korsgaard Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index dfa7a4b..ac88a52 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c @@ -379,6 +379,7 @@ static int ocores_i2c_of_probe(struct platform_device *pdev, if (!clock_frequency_present) { dev_err(&pdev->dev, "Missing required parameter 'opencores,ip-clock-frequency'\n"); + clk_disable_unprepare(i2c->clk); return -ENODEV; } i2c->ip_clock_khz = clock_frequency / 1000; @@ -467,20 +468,21 @@ static int ocores_i2c_probe(struct platform_device *pdev) default: dev_err(&pdev->dev, "Unsupported I/O width (%d)\n", i2c->reg_io_width); - return -EINVAL; + ret = -EINVAL; + goto err_clk; } } ret = ocores_init(&pdev->dev, i2c); if (ret) - return ret; + goto err_clk; init_waitqueue_head(&i2c->wait); ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0, pdev->name, i2c); if (ret) { dev_err(&pdev->dev, "Cannot claim IRQ\n"); - return ret; + goto err_clk; } /* hook up driver to tree */ @@ -494,7 +496,7 @@ static int ocores_i2c_probe(struct platform_device *pdev) ret = i2c_add_adapter(&i2c->adap); if (ret) { dev_err(&pdev->dev, "Failed to add adapter\n"); - return ret; + goto err_clk; } /* add in known devices to the bus */ @@ -504,6 +506,10 @@ static int ocores_i2c_probe(struct platform_device *pdev) } return 0; + +err_clk: + clk_disable_unprepare(i2c->clk); + return ret; } static int ocores_i2c_remove(struct platform_device *pdev) -- cgit v0.10.2 From 434f14e745442a4230a40cf47f84deac1028f64f Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Wed, 3 Aug 2016 16:58:26 +0200 Subject: i2c: at91: fix support of the "alternative command" feature The "alternative command" feature was introduced with sama5d2 SoCs. Its purpose is to let the hardware i2c controller automatically send the STOP condition on the i2c bus at the end of a data transfer. Without this feature, the i2c driver has to write the 'STOP' bit into the Control Register so the hardware i2c controller is triggered to send the STOP condition on the bus. Using the "alternative command" feature requires to set the transfer data length into the 8bit DATAL field of the Alternative Command Register. Hence only data transfers up to 255 bytes can take advantage of the "alternative command" feature. For greater data transfer sizes, the driver should use the previous implementation, when the "alternative command" support was not implemented yet. Signed-off-by: Cyrille Pitchen Signed-off-by: Ludovic Desroches Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c index f233726..1bb97f6 100644 --- a/drivers/i2c/busses/i2c-at91.c +++ b/drivers/i2c/busses/i2c-at91.c @@ -38,6 +38,7 @@ #define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */ #define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */ #define AUTOSUSPEND_TIMEOUT 2000 +#define AT91_I2C_MAX_ALT_CMD_DATA_SIZE 256 /* AT91 TWI register definitions */ #define AT91_TWI_CR 0x0000 /* Control Register */ @@ -141,6 +142,7 @@ struct at91_twi_dev { unsigned twi_cwgr_reg; struct at91_twi_pdata *pdata; bool use_dma; + bool use_alt_cmd; bool recv_len_abort; u32 fifo_size; struct at91_twi_dma dma; @@ -269,7 +271,7 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev) /* send stop when last byte has been written */ if (--dev->buf_len == 0) - if (!dev->pdata->has_alt_cmd) + if (!dev->use_alt_cmd) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len); @@ -292,7 +294,7 @@ static void at91_twi_write_data_dma_callback(void *data) * we just have to enable TXCOMP one. */ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP); - if (!dev->pdata->has_alt_cmd) + if (!dev->use_alt_cmd) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); } @@ -410,7 +412,7 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev) } /* send stop if second but last byte has been read */ - if (!dev->pdata->has_alt_cmd && dev->buf_len == 1) + if (!dev->use_alt_cmd && dev->buf_len == 1) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len); @@ -426,7 +428,7 @@ static void at91_twi_read_data_dma_callback(void *data) dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]), dev->buf_len, DMA_FROM_DEVICE); - if (!dev->pdata->has_alt_cmd) { + if (!dev->use_alt_cmd) { /* The last two bytes have to be read without using dma */ dev->buf += dev->buf_len - 2; dev->buf_len = 2; @@ -443,7 +445,7 @@ static void at91_twi_read_data_dma(struct at91_twi_dev *dev) struct dma_chan *chan_rx = dma->chan_rx; size_t buf_len; - buf_len = (dev->pdata->has_alt_cmd) ? dev->buf_len : dev->buf_len - 2; + buf_len = (dev->use_alt_cmd) ? dev->buf_len : dev->buf_len - 2; dma->direction = DMA_FROM_DEVICE; /* Keep in mind that we won't use dma to read the last two bytes */ @@ -651,7 +653,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) unsigned start_flags = AT91_TWI_START; /* if only one byte is to be read, immediately stop transfer */ - if (!has_alt_cmd && dev->buf_len <= 1 && + if (!dev->use_alt_cmd && dev->buf_len <= 1 && !(dev->msg->flags & I2C_M_RECV_LEN)) start_flags |= AT91_TWI_STOP; at91_twi_write(dev, AT91_TWI_CR, start_flags); @@ -745,7 +747,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) int ret; unsigned int_addr_flag = 0; struct i2c_msg *m_start = msg; - bool is_read, use_alt_cmd = false; + bool is_read; dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num); @@ -768,14 +770,16 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) at91_twi_write(dev, AT91_TWI_IADR, internal_address); } + dev->use_alt_cmd = false; is_read = (m_start->flags & I2C_M_RD); if (dev->pdata->has_alt_cmd) { - if (m_start->len > 0) { + if (m_start->len > 0 && + m_start->len < AT91_I2C_MAX_ALT_CMD_DATA_SIZE) { at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN); at91_twi_write(dev, AT91_TWI_ACR, AT91_TWI_ACR_DATAL(m_start->len) | ((is_read) ? AT91_TWI_ACR_DIR : 0)); - use_alt_cmd = true; + dev->use_alt_cmd = true; } else { at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS); } @@ -784,7 +788,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) at91_twi_write(dev, AT91_TWI_MMR, (m_start->addr << 16) | int_addr_flag | - ((!use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0)); + ((!dev->use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0)); dev->buf_len = m_start->len; dev->buf = m_start->buf; -- cgit v0.10.2 From 8d263be8716da67bd8746e266c1934cb9c9933cc Mon Sep 17 00:00:00 2001 From: Daniel Wagner Date: Wed, 3 Aug 2016 14:03:08 +0200 Subject: i2c: bcm-iproc: Use complete() instead of complete_all() There is only one waiter for the completion, therefore there is no need to use complete_all(). Let's make that clear by using complete() instead of complete_all(). The usage pattern of the completion is: bcm_iproc_i2c_xfer_single_msg() reinit_completion() ... (activate the transfer) ... wait_for_completion_timeout() Signed-off-by: Daniel Wagner Acked-by: Ray Jui Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 19c8438..95f7cac 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -158,7 +158,7 @@ static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data) if (status & BIT(IS_M_START_BUSY_SHIFT)) { iproc_i2c->xfer_is_done = 1; - complete_all(&iproc_i2c->done); + complete(&iproc_i2c->done); } writel(status, iproc_i2c->base + IS_OFFSET); -- cgit v0.10.2 From 752c3899f97c469efea42b1ad6de66d18a21ce09 Mon Sep 17 00:00:00 2001 From: Daniel Wagner Date: Wed, 3 Aug 2016 14:03:09 +0200 Subject: i2c: bcm-kona: Use complete() instead of complete_all() There is only one waiter for the completion, therefore there is no need to use complete_all(). Let's make that clear by using complete() instead of complete_all(). The usage pattern of the completion is: bcm_kona_send_i2c_cmd() reinit_completion() ... bcm_kona_i2c_send_cmd_to_ctrl() ... wait_for_completion_timeout() Signed-off-by: Daniel Wagner Acked-by: Ray Jui Reviewed-by: Tim Kryger Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c index ac9f476..f987432 100644 --- a/drivers/i2c/busses/i2c-bcm-kona.c +++ b/drivers/i2c/busses/i2c-bcm-kona.c @@ -229,7 +229,7 @@ static irqreturn_t bcm_kona_i2c_isr(int irq, void *devid) dev->base + TXFCR_OFFSET); writel(status & ~ISR_RESERVED_MASK, dev->base + ISR_OFFSET); - complete_all(&dev->done); + complete(&dev->done); return IRQ_HANDLED; } -- cgit v0.10.2 From fea03a6ab1d8e456c0319c194150621629504f80 Mon Sep 17 00:00:00 2001 From: Daniel Wagner Date: Wed, 3 Aug 2016 14:03:10 +0200 Subject: i2c: brcmstb: Use complete() instead of complete_all() There is only one waiter for the completion, therefore there is no need to use complete_all(). Let's make that clear by using complete() instead of complete_all(). The usage pattern of the completion is: brcmstb_send_i2c_cmd() reinit_completion() ... /* initiate transfer by setting iic_enable */ ... brcmstb_i2c_wait_for_completion() Signed-off-by: Daniel Wagner Reviewed-by: Kamal Dasu Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c index 3f5a4d7..385b57b 100644 --- a/drivers/i2c/busses/i2c-brcmstb.c +++ b/drivers/i2c/busses/i2c-brcmstb.c @@ -228,7 +228,7 @@ static irqreturn_t brcmstb_i2c_isr(int irq, void *devid) return IRQ_NONE; brcmstb_i2c_enable_disable_irq(dev, INT_DISABLE); - complete_all(&dev->done); + complete(&dev->done); dev_dbg(dev->device, "isr handled"); return IRQ_HANDLED; -- cgit v0.10.2 From 0268263f0cf2239eb3a747936d8d36ef5f5e1688 Mon Sep 17 00:00:00 2001 From: Daniel Wagner Date: Wed, 3 Aug 2016 14:03:11 +0200 Subject: i2c: meson: Use complete() instead of complete_all() There is only one waiter for the completion, therefore there is no need to use complete_all(). Let's make that clear by using complete() instead of complete_all(). The usage pattern of the completion is: meson_i2c_xfer_msg() reinit_completion() ... /* Start the transfer */ ... wait_for_completion_timeout() Signed-off-by: Daniel Wagner Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c index 71d3929..76e2898 100644 --- a/drivers/i2c/busses/i2c-meson.c +++ b/drivers/i2c/busses/i2c-meson.c @@ -211,7 +211,7 @@ static void meson_i2c_stop(struct meson_i2c *i2c) meson_i2c_add_token(i2c, TOKEN_STOP); } else { i2c->state = STATE_IDLE; - complete_all(&i2c->done); + complete(&i2c->done); } } @@ -238,7 +238,7 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id) dev_dbg(i2c->dev, "error bit set\n"); i2c->error = -ENXIO; i2c->state = STATE_IDLE; - complete_all(&i2c->done); + complete(&i2c->done); goto out; } @@ -269,7 +269,7 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id) break; case STATE_STOP: i2c->state = STATE_IDLE; - complete_all(&i2c->done); + complete(&i2c->done); break; case STATE_IDLE: break; -- cgit v0.10.2 From 52012619e5a2ca0491426c3712fb9054692d4a3c Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 14 Aug 2016 23:44:21 +0300 Subject: ringtest: test build fix Recent changes to ptr_ring broke the ringtest which lacks a likely() stub. Fix it up. Fixes: 982fb490c298896d15e9323a882f34a57c11ff56 ("ptr_ring: support zero length ring") Signed-off-by: Michael S. Tsirkin diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c index 68e4f9f..bd2ad1d 100644 --- a/tools/virtio/ringtest/ptr_ring.c +++ b/tools/virtio/ringtest/ptr_ring.c @@ -13,6 +13,7 @@ #define cache_line_size() SMP_CACHE_BYTES #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES))) #define unlikely(x) (__builtin_expect(!!(x), 0)) +#define likely(x) (__builtin_expect(!!(x), 1)) #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) typedef pthread_spinlock_t spinlock_t; -- cgit v0.10.2 From 21bc54fc0cdc31de72b57d2b3c79cf9c2b83cf39 Mon Sep 17 00:00:00 2001 From: Gerard Garcia Date: Wed, 10 Aug 2016 17:24:34 +0200 Subject: vhost/vsock: drop space available check for TX vq Remove unnecessary use of enable/disable callback notifications and the incorrect more space available check. The virtio_transport_tx_work handles when the TX virtqueue has more buffers available. Signed-off-by: Gerard Garcia Acked-by: Stefan Hajnoczi Signed-off-by: Michael S. Tsirkin diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 699dfab..936d7ee 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -87,9 +87,6 @@ virtio_transport_send_pkt_work(struct work_struct *work) vq = vsock->vqs[VSOCK_VQ_TX]; - /* Avoid unnecessary interrupts while we're processing the ring */ - virtqueue_disable_cb(vq); - for (;;) { struct virtio_vsock_pkt *pkt; struct scatterlist hdr, buf, *sgs[2]; @@ -99,7 +96,6 @@ virtio_transport_send_pkt_work(struct work_struct *work) spin_lock_bh(&vsock->send_pkt_list_lock); if (list_empty(&vsock->send_pkt_list)) { spin_unlock_bh(&vsock->send_pkt_list_lock); - virtqueue_enable_cb(vq); break; } @@ -118,13 +114,13 @@ virtio_transport_send_pkt_work(struct work_struct *work) } ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL); + /* Usually this means that there is no more space available in + * the vq + */ if (ret < 0) { spin_lock_bh(&vsock->send_pkt_list_lock); list_add(&pkt->list, &vsock->send_pkt_list); spin_unlock_bh(&vsock->send_pkt_list_lock); - - if (!virtqueue_enable_cb(vq) && ret == -ENOSPC) - continue; /* retry now that we have more space */ break; } -- cgit v0.10.2 From 446374d7c7f89603d8151b56824e2cac85ed8e0d Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 15 Aug 2016 04:28:12 +0300 Subject: vhost/test: fix after swiotlb changes Signed-off-by: Michael S. Tsirkin diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c index 388eec4..97fb2f8 100644 --- a/drivers/vhost/test.c +++ b/drivers/vhost/test.c @@ -220,20 +220,20 @@ static long vhost_test_reset_owner(struct vhost_test *n) { void *priv = NULL; long err; - struct vhost_memory *memory; + struct vhost_umem *umem; mutex_lock(&n->dev.mutex); err = vhost_dev_check_owner(&n->dev); if (err) goto done; - memory = vhost_dev_reset_owner_prepare(); - if (!memory) { + umem = vhost_dev_reset_owner_prepare(); + if (!umem) { err = -ENOMEM; goto done; } vhost_test_stop(n, &priv); vhost_test_flush(n); - vhost_dev_reset_owner(&n->dev, memory); + vhost_dev_reset_owner(&n->dev, umem); done: mutex_unlock(&n->dev.mutex); return err; -- cgit v0.10.2 From 6be3ffaa0e15c64f560904b025f5c50bef5886f9 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 15 Aug 2016 04:50:55 +0300 Subject: tools/virtio: add dma stubs Fixes build after recent IOMMU-related changes, mustly by adding more stubs. Signed-off-by: Michael S. Tsirkin diff --git a/tools/virtio/linux/dma-mapping.h b/tools/virtio/linux/dma-mapping.h index 4f93af8..18601f6 100644 --- a/tools/virtio/linux/dma-mapping.h +++ b/tools/virtio/linux/dma-mapping.h @@ -14,4 +14,20 @@ enum dma_data_direction { DMA_NONE = 3, }; +#define dma_alloc_coherent(d, s, hp, f) ({ \ + void *__dma_alloc_coherent_p = kmalloc((s), (f)); \ + *(hp) = (unsigned long)__dma_alloc_coherent_p; \ + __dma_alloc_coherent_p; \ +}) + +#define dma_free_coherent(d, s, p, h) kfree(p) + +#define dma_map_page(d, p, o, s, dir) (page_to_phys(p) + (o)) + +#define dma_map_single(d, p, s, dir) (virt_to_phys(p)) +#define dma_mapping_error(...) (0) + +#define dma_unmap_single(...) do { } while (0) +#define dma_unmap_page(...) do { } while (0) + #endif diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h index 0338499..d9554fc 100644 --- a/tools/virtio/linux/kernel.h +++ b/tools/virtio/linux/kernel.h @@ -20,7 +20,9 @@ #define PAGE_SIZE getpagesize() #define PAGE_MASK (~(PAGE_SIZE-1)) +#define PAGE_ALIGN(x) ((x + PAGE_SIZE - 1) & PAGE_MASK) +typedef unsigned long long phys_addr_t; typedef unsigned long long dma_addr_t; typedef size_t __kernel_size_t; typedef unsigned int __wsum; @@ -57,6 +59,11 @@ static inline void *kzalloc(size_t s, gfp_t gfp) return p; } +static inline void *alloc_pages_exact(size_t s, gfp_t gfp) +{ + return kmalloc(s, gfp); +} + static inline void kfree(void *p) { if (p >= __kfree_ignore_start && p < __kfree_ignore_end) @@ -64,6 +71,11 @@ static inline void kfree(void *p) free(p); } +static inline void free_pages_exact(void *p, size_t s) +{ + kfree(p); +} + static inline void *krealloc(void *p, size_t s, gfp_t gfp) { return realloc(p, s); @@ -105,6 +117,8 @@ static inline void free_page(unsigned long addr) #define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) #define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) +#define WARN_ON_ONCE(cond) ((cond) && fprintf (stderr, "WARNING\n")) + #define min(x, y) ({ \ typeof(x) _min1 = (x); \ typeof(y) _min2 = (y); \ diff --git a/tools/virtio/linux/slab.h b/tools/virtio/linux/slab.h index 81baeac..7e1c119 100644 --- a/tools/virtio/linux/slab.h +++ b/tools/virtio/linux/slab.h @@ -1,2 +1,6 @@ #ifndef LINUX_SLAB_H +#define GFP_KERNEL 0 +#define GFP_ATOMIC 0 +#define __GFP_NOWARN 0 +#define __GFP_ZERO 0 #endif diff --git a/tools/virtio/linux/virtio.h b/tools/virtio/linux/virtio.h index ee125e7..9377c8b 100644 --- a/tools/virtio/linux/virtio.h +++ b/tools/virtio/linux/virtio.h @@ -3,8 +3,12 @@ #include #include +struct device { + void *parent; +}; + struct virtio_device { - void *dev; + struct device dev; u64 features; }; diff --git a/tools/virtio/linux/virtio_config.h b/tools/virtio/linux/virtio_config.h index 57a6964..9ba1181 100644 --- a/tools/virtio/linux/virtio_config.h +++ b/tools/virtio/linux/virtio_config.h @@ -40,6 +40,19 @@ static inline void __virtio_clear_bit(struct virtio_device *vdev, #define virtio_has_feature(dev, feature) \ (__virtio_test_bit((dev), feature)) +/** + * virtio_has_iommu_quirk - determine whether this device has the iommu quirk + * @vdev: the device + */ +static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev) +{ + /* + * Note the reverse polarity of the quirk feature (compared to most + * other features), this is for compatibility with legacy systems. + */ + return !virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); +} + static inline bool virtio_is_little_endian(struct virtio_device *vdev) { return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) || -- cgit v0.10.2 From 694d0d0bb2030d2e36df73e2d23d5770511dbc8d Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 14 Aug 2016 19:11:36 -0700 Subject: Linux 4.8-rc2 diff --git a/Makefile b/Makefile index 8c504f3..5c18baa 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 8 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc2 NAME = Psychotic Stoned Sheep # *DOCUMENTATION* -- cgit v0.10.2 From 4cf0b354d92ee2c642532ee39e330f8f580fd985 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 12 Aug 2016 12:03:52 +0200 Subject: rhashtable: avoid large lock-array allocations Sander reports following splat after netfilter nat bysrc table got converted to rhashtable: swapper/0: page allocation failure: order:3, mode:0x2084020(GFP_ATOMIC|__GFP_COMP) CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.8.0-rc1 [..] [] warn_alloc_failed+0xdd/0x140 [] __alloc_pages_nodemask+0x3e1/0xcf0 [] alloc_pages_current+0x8d/0x110 [] kmalloc_order+0x1f/0x70 [] __kmalloc+0x129/0x140 [] bucket_table_alloc+0xc1/0x1d0 [] rhashtable_insert_rehash+0x5d/0xe0 [] nf_nat_setup_info+0x2ef/0x400 The failure happens when allocating the spinlock array. Even with GFP_KERNEL its unlikely for such a large allocation to succeed. Thomas Graf pointed me at inet_ehash_locks_alloc(), so in addition to adding NOWARN for atomic allocations this also makes the bucket-array sizing more conservative. In commit 095dc8e0c3686 ("tcp: fix/cleanup inet_ehash_locks_alloc()"), Eric Dumazet says: "Budget 2 cache lines per cpu worth of 'spinlocks'". IOW, consider size needed by a single spinlock when determining number of locks per cpu. So with 64 byte per cacheline and 4 byte per spinlock this gives 32 locks per cpu. Resulting size of the lock-array (sizeof(spinlock) == 4): cpus: 1 2 4 8 16 32 64 old: 1k 1k 4k 8k 16k 16k 16k new: 128 256 512 1k 2k 4k 8k 8k allocation should have decent chance of success even with GFP_ATOMIC, and should not fail with GFP_KERNEL. With 72-byte spinlock (LOCKDEP): cpus : 1 2 old: 9k 18k new: ~2k ~4k Reported-by: Sander Eikelenboom Suggested-by: Thomas Graf Signed-off-by: Florian Westphal Signed-off-by: David S. Miller diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 5d845ff..42acd81 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -30,7 +30,7 @@ #define HASH_DEFAULT_SIZE 64UL #define HASH_MIN_SIZE 4U -#define BUCKET_LOCKS_PER_CPU 128UL +#define BUCKET_LOCKS_PER_CPU 32UL static u32 head_hashfn(struct rhashtable *ht, const struct bucket_table *tbl, @@ -70,7 +70,7 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, unsigned int nr_pcpus = num_possible_cpus(); #endif - nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); + nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL); size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); /* Never allocate more than 0.5 locks per bucket */ @@ -83,6 +83,9 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, tbl->locks = vmalloc(size * sizeof(spinlock_t)); else #endif + if (gfp != GFP_KERNEL) + gfp |= __GFP_NOWARN | __GFP_NORETRY; + tbl->locks = kmalloc_array(size, sizeof(spinlock_t), gfp); if (!tbl->locks) -- cgit v0.10.2 From eb8fc32354aa77678dc6e7950a8f0c79cace204f Mon Sep 17 00:00:00 2001 From: Vincent Date: Sun, 14 Aug 2016 15:38:29 +0200 Subject: mlxsw: spectrum_router: Fix use after free MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In mlxsw_sp_router_fib4_add_info_destroy(), the fib_entry pointer is used after it has been freed by mlxsw_sp_fib_entry_destroy(). Use a temporary variable to fix this. Fixes: 61c503f976b5449e ("mlxsw: spectrum_router: Implement fib4 add/del switchdev obj ops") Signed-off-by: Vincent Stehlé Cc: Jiri Pirko Acked-by: Ido Schimmel Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 81418d6..90bb93b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1651,9 +1651,10 @@ static void mlxsw_sp_router_fib4_add_info_destroy(void const *data) const struct mlxsw_sp_router_fib4_add_info *info = data; struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry; struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp; + struct mlxsw_sp_vr *vr = fib_entry->vr; mlxsw_sp_fib_entry_destroy(fib_entry); - mlxsw_sp_vr_put(mlxsw_sp, fib_entry->vr); + mlxsw_sp_vr_put(mlxsw_sp, vr); kfree(info); } -- cgit v0.10.2 From d9853490176c88fff71d03ec9a174b77011f5026 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Thu, 28 Jul 2016 11:50:48 +0200 Subject: drm/etnaviv: take GPU lock later in the submit process Both the fence and event alloc are safe to be done without holding the GPU lock, as they either don't need any locking (fences) or are protected by their own lock (events). This solves a bad locking interaction between the submit path and the recover worker. If userspace manages to exhaust all available events while the GPU is hung, the submit will wait for events to become available holding the GPU lock. The recover worker waits for this lock to become available before trying to recover the GPU which frees up the allocated events. Essentially both paths are deadlocked until the submit path times out waiting for available events, failing the submit that could otherwise be handled just fine if the recover worker had the chance to bring the GPU back in a working state. Signed-off-by: Lucas Stach Reviewed-by: Christian Gmeiner diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 87ef341..b382cf5 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -1333,8 +1333,6 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, if (ret < 0) return ret; - mutex_lock(&gpu->lock); - /* * TODO * @@ -1348,16 +1346,18 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, if (unlikely(event == ~0U)) { DRM_ERROR("no free event\n"); ret = -EBUSY; - goto out_unlock; + goto out_pm_put; } fence = etnaviv_gpu_fence_alloc(gpu); if (!fence) { event_free(gpu, event); ret = -ENOMEM; - goto out_unlock; + goto out_pm_put; } + mutex_lock(&gpu->lock); + gpu->event[event].fence = fence; submit->fence = fence->seqno; gpu->active_fence = submit->fence; @@ -1395,9 +1395,9 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, hangcheck_timer_reset(gpu); ret = 0; -out_unlock: mutex_unlock(&gpu->lock); +out_pm_put: etnaviv_gpu_pm_put(gpu); return ret; -- cgit v0.10.2 From 802934b2cfde463b72cc1b9bc1c081895a90be53 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Fri, 5 Aug 2016 12:29:06 -0400 Subject: dm round robin: do not use this_cpu_ptr() without having preemption disabled Use local_irq_save() to disable preemption before calling this_cpu_ptr(). Reported-by: Benjamin Block Fixes: b0b477c7e0dd ("dm round robin: use percpu 'repeat_count' and 'current_path'") Cc: stable@vger.kernel.org # 4.6+ Suggested-by: Jens Axboe Signed-off-by: Mike Snitzer diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c index 4ace1da..6c25213 100644 --- a/drivers/md/dm-round-robin.c +++ b/drivers/md/dm-round-robin.c @@ -210,14 +210,17 @@ static struct dm_path *rr_select_path(struct path_selector *ps, size_t nr_bytes) struct path_info *pi = NULL; struct dm_path *current_path = NULL; + local_irq_save(flags); current_path = *this_cpu_ptr(s->current_path); if (current_path) { percpu_counter_dec(&s->repeat_count); - if (percpu_counter_read_positive(&s->repeat_count) > 0) + if (percpu_counter_read_positive(&s->repeat_count) > 0) { + local_irq_restore(flags); return current_path; + } } - spin_lock_irqsave(&s->lock, flags); + spin_lock(&s->lock); if (!list_empty(&s->valid_paths)) { pi = list_entry(s->valid_paths.next, struct path_info, list); list_move_tail(&pi->list, &s->valid_paths); -- cgit v0.10.2 From 0a83df6c8cacafbefc5b56b2fbcb92b0d75b744b Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Fri, 15 Jul 2016 17:30:20 -0400 Subject: dm crypt: increase mempool reserve to better support swapping Increase mempool size from 16 to 64 entries. This increase improves swap on dm-crypt performance. When swapping to dm-crypt, all available memory is temporarily exhausted and dm-crypt can only use the mempool reserve. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 4e9784b..eedba67 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -181,7 +181,7 @@ struct crypt_config { u8 key[0]; }; -#define MIN_IOS 16 +#define MIN_IOS 64 static void clone_init(struct dm_crypt_io *, struct bio *); static void kcryptd_queue_crypt(struct dm_crypt_io *io); -- cgit v0.10.2 From add125054b8727103631dce116361668436ef6a7 Mon Sep 17 00:00:00 2001 From: Gavin Li Date: Fri, 12 Aug 2016 00:52:56 -0700 Subject: cdc-acm: fix wrong pipe type on rx interrupt xfers This fixes the "BOGUS urb xfer" warning logged by usb_submit_urb(). Signed-off-by: Gavin Li Acked-by: Oliver Neukum Cc: stable Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 7191230..0f3f62e 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1354,7 +1354,6 @@ made_compressed_probe: spin_lock_init(&acm->write_lock); spin_lock_init(&acm->read_lock); mutex_init(&acm->mutex); - acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); acm->is_int_ep = usb_endpoint_xfer_int(epread); if (acm->is_int_ep) acm->bInterval = epread->bInterval; @@ -1394,14 +1393,14 @@ made_compressed_probe: urb->transfer_dma = rb->dma; if (acm->is_int_ep) { usb_fill_int_urb(urb, acm->dev, - acm->rx_endpoint, + usb_rcvintpipe(usb_dev, epread->bEndpointAddress), rb->base, acm->readsize, acm_read_bulk_callback, rb, acm->bInterval); } else { usb_fill_bulk_urb(urb, acm->dev, - acm->rx_endpoint, + usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress), rb->base, acm->readsize, acm_read_bulk_callback, rb); diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index 05ce308..1f1eabf 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h @@ -96,7 +96,6 @@ struct acm { struct acm_rb read_buffers[ACM_NR]; struct acm_wb *putbuffer; /* for acm_tty_put_char() */ int rx_buflimit; - int rx_endpoint; spinlock_t read_lock; int write_used; /* number of non-empty write buffers */ int transmitting; -- cgit v0.10.2 From 49a7f01064a94c4fd6afb6b4f7e3ccc37d0edf99 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sat, 13 Aug 2016 01:12:10 -0300 Subject: perf jitdump: Add the right header to get the major()/minor() definitions Noticed on Fedora Rawhide: $ gcc --version gcc (GCC) 6.1.1 20160721 (Red Hat 6.1.1-4) $ rpm -q glibc glibc-2.24.90-1.fc26.x86_64 $ CC /tmp/build/perf/util/jitdump.o util/jitdump.c: In function 'jit_repipe_code_load': util/jitdump.c:428:2: error: '__major_from_sys_types' is deprecated: In the GNU C Library, `major' is defined by . For historical compatibility, it is currently defined by as well, but we plan to remove this soon. To use `major', include directly. If you did not intend to use a system-defined macro `major', you should #undef it after including . [-Werror=deprecated-declarations] event->mmap2.maj = major(st.st_dev); ^~~~~ In file included from /usr/include/features.h:397:0, from /usr/include/sys/types.h:25, from util/jitdump.c:1: /usr/include/sys/sysmacros.h:87:1: note: declared here __SYSMACROS_DEFINE_MAJOR (__SYSMACROS_FST_IMPL_TEMPL) Fix it following that recomendation. Cc: Adrian Hunter Cc: David Ahern Cc: Jiri Olsa Cc: Namhyung Kim Cc: Stephane Eranian Cc: Wang Nan Link: http://lkml.kernel.org/n/tip-3majvd0adhfr25rvx4v5e9te@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c index 9f3305f..95f0884 100644 --- a/tools/perf/util/jitdump.c +++ b/tools/perf/util/jitdump.c @@ -1,3 +1,4 @@ +#include #include #include #include -- cgit v0.10.2 From 34276bb062b8449b3b0a208c9b848a1a27920075 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Mon, 15 Aug 2016 14:58:43 +0200 Subject: of: fix reference counting in of_graph_get_endpoint_by_regs The called of_graph_get_next_endpoint() already decrements the refcount of the prev node, so it is wrong to do it again in the calling function. Use the for_each_endpoint_of_node() helper to interate through the endpoint OF nodes, which already does the right thing and simplifies the code a bit. Fixes: 8ccd0d0ca041 (of: add helper for getting endpoint node of specific identifiers) Cc: stable@vger.kernel.org Reported-by: David Jander Signed-off-by: Lucas Stach Acked-by: Philipp Zabel Signed-off-by: Rob Herring diff --git a/drivers/of/base.c b/drivers/of/base.c index cb255a0..3ce6953 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -2342,20 +2342,13 @@ struct device_node *of_graph_get_endpoint_by_regs( const struct device_node *parent, int port_reg, int reg) { struct of_endpoint endpoint; - struct device_node *node, *prev_node = NULL; - - while (1) { - node = of_graph_get_next_endpoint(parent, prev_node); - of_node_put(prev_node); - if (!node) - break; + struct device_node *node = NULL; + for_each_endpoint_of_node(parent, node) { of_graph_parse_endpoint(node, &endpoint); if (((port_reg == -1) || (endpoint.port == port_reg)) && ((reg == -1) || (endpoint.id == reg))) return node; - - prev_node = node; } return NULL; -- cgit v0.10.2 From 88ded4d8d94a550624e1827478e13fecf97a7b0a Mon Sep 17 00:00:00 2001 From: He Kuang Date: Thu, 4 Aug 2016 11:25:42 +0000 Subject: perf script: Show proper message when failed list scripts Perf shows the usage message when perf scripts folder failed to open, which misleads users to let them think the command is being mistyped. This patch shows a proper message and guides users to check the PERF_EXEC_PATH environment variable in that case. Before: $ perf script --list Usage: perf script [] or: perf script [] record